aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/message
diff options
context:
space:
mode:
authorroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
committerroot <root@artemis.panaceas.org>2015-12-25 04:40:36 +0000
commit849369d6c66d3054688672f97d31fceb8e8230fb (patch)
tree6135abc790ca67dedbe07c39806591e70eda81ce /drivers/message
downloadlinux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.gz
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.tar.bz2
linux-3.0.35-kobo-849369d6c66d3054688672f97d31fceb8e8230fb.zip
initial_commit
Diffstat (limited to 'drivers/message')
-rw-r--r--drivers/message/Makefile6
-rw-r--r--drivers/message/fusion/Kconfig123
-rw-r--r--drivers/message/fusion/Makefile14
-rw-r--r--drivers/message/fusion/lsi/mpi.h799
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h3115
-rw-r--r--drivers/message/fusion/lsi/mpi_fc.h366
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt868
-rw-r--r--drivers/message/fusion/lsi/mpi_init.h580
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h1207
-rw-r--r--drivers/message/fusion/lsi/mpi_lan.h214
-rw-r--r--drivers/message/fusion/lsi/mpi_log_fc.h89
-rw-r--r--drivers/message/fusion/lsi/mpi_log_sas.h322
-rw-r--r--drivers/message/fusion/lsi/mpi_raid.h259
-rw-r--r--drivers/message/fusion/lsi/mpi_sas.h278
-rw-r--r--drivers/message/fusion/lsi/mpi_targ.h650
-rw-r--r--drivers/message/fusion/lsi/mpi_tool.h354
-rw-r--r--drivers/message/fusion/lsi/mpi_type.h83
-rw-r--r--drivers/message/fusion/mptbase.c8483
-rw-r--r--drivers/message/fusion/mptbase.h995
-rw-r--r--drivers/message/fusion/mptctl.c3087
-rw-r--r--drivers/message/fusion/mptctl.h467
-rw-r--r--drivers/message/fusion/mptdebug.h291
-rw-r--r--drivers/message/fusion/mptfc.c1557
-rw-r--r--drivers/message/fusion/mptlan.c1544
-rw-r--r--drivers/message/fusion/mptlan.h131
-rw-r--r--drivers/message/fusion/mptsas.c5416
-rw-r--r--drivers/message/fusion/mptsas.h192
-rw-r--r--drivers/message/fusion/mptscsih.c3357
-rw-r--r--drivers/message/fusion/mptscsih.h137
-rw-r--r--drivers/message/fusion/mptspi.c1615
-rw-r--r--drivers/message/i2o/Kconfig121
-rw-r--r--drivers/message/i2o/Makefile16
-rw-r--r--drivers/message/i2o/README98
-rw-r--r--drivers/message/i2o/README.ioctl394
-rw-r--r--drivers/message/i2o/bus-osm.c176
-rw-r--r--drivers/message/i2o/config-osm.c90
-rw-r--r--drivers/message/i2o/core.h69
-rw-r--r--drivers/message/i2o/debug.c472
-rw-r--r--drivers/message/i2o/device.c584
-rw-r--r--drivers/message/i2o/driver.c378
-rw-r--r--drivers/message/i2o/exec-osm.c612
-rw-r--r--drivers/message/i2o/i2o_block.c1232
-rw-r--r--drivers/message/i2o/i2o_block.h103
-rw-r--r--drivers/message/i2o/i2o_config.c1146
-rw-r--r--drivers/message/i2o/i2o_proc.c2104
-rw-r--r--drivers/message/i2o/i2o_scsi.c816
-rw-r--r--drivers/message/i2o/iop.c1248
-rw-r--r--drivers/message/i2o/memory.c313
-rw-r--r--drivers/message/i2o/pci.c497
49 files changed, 47068 insertions, 0 deletions
diff --git a/drivers/message/Makefile b/drivers/message/Makefile
new file mode 100644
index 00000000..97ef5a01
--- /dev/null
+++ b/drivers/message/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for MPT based block devices
+#
+
+obj-$(CONFIG_I2O) += i2o/
+obj-$(CONFIG_FUSION) += fusion/
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
new file mode 100644
index 00000000..a34a11d2
--- /dev/null
+++ b/drivers/message/fusion/Kconfig
@@ -0,0 +1,123 @@
+
+menuconfig FUSION
+ bool "Fusion MPT device support"
+ depends on PCI
+ ---help---
+ Say Y here to get to see options for Fusion Message
+ Passing Technology (MPT) drivers.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if FUSION
+
+config FUSION_SPI
+ tristate "Fusion MPT ScsiHost drivers for SPI"
+ depends on PCI && SCSI
+ select SCSI_SPI_ATTRS
+ ---help---
+ SCSI HOST support for a parallel SCSI host adapters.
+
+ List of supported controllers:
+
+ LSI53C1020
+ LSI53C1020A
+ LSI53C1030
+ LSI53C1035
+ ATTO UL4D
+
+config FUSION_FC
+ tristate "Fusion MPT ScsiHost drivers for FC"
+ depends on PCI && SCSI
+ select SCSI_FC_ATTRS
+ ---help---
+ SCSI HOST support for a Fiber Channel host adapters.
+
+ List of supported controllers:
+
+ LSIFC909
+ LSIFC919
+ LSIFC919X
+ LSIFC929
+ LSIFC929X
+ LSIFC929XL
+ LSIFC949X
+ LSIFC949E
+ Brocade FC 410/420
+
+config FUSION_SAS
+ tristate "Fusion MPT ScsiHost drivers for SAS"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ ---help---
+ SCSI HOST support for a SAS host adapters.
+
+ List of supported controllers:
+
+ LSISAS1064
+ LSISAS1068
+ LSISAS1064E
+ LSISAS1068E
+ LSISAS1078
+
+config FUSION_MAX_SGE
+ int "Maximum number of scatter gather entries (16 - 128)"
+ default "128"
+ range 16 128
+ help
+ This option allows you to specify the maximum number of scatter-
+ gather entries per I/O. The driver default is 128, which matches
+ SCSI_MAX_PHYS_SEGMENTS. However, it may decreased down to 16.
+ Decreasing this parameter will reduce memory requirements
+ on a per controller instance.
+
+config FUSION_CTL
+ tristate "Fusion MPT misc device (ioctl) driver"
+ depends on FUSION_SPI || FUSION_FC || FUSION_SAS
+ ---help---
+ The Fusion MPT misc device driver provides specialized control
+ of MPT adapters via system ioctl calls. Use of ioctl calls to
+ the MPT driver requires that you create and use a misc device
+ node ala:
+ mknod /dev/mptctl c 10 240
+
+ One use of this ioctl interface is to perform an upgrade (reflash)
+ of the MPT adapter firmware. Refer to readme file(s) distributed
+ with the Fusion MPT linux driver for additional details.
+
+ If enabled by saying M to this, a driver named: mptctl
+ will be compiled.
+
+ If unsure whether you really want or need this, say N.
+
+config FUSION_LAN
+ tristate "Fusion MPT LAN driver"
+ depends on FUSION_FC && NET_FC
+ ---help---
+ This module supports LAN IP traffic over Fibre Channel port(s)
+ on Fusion MPT compatible hardware (LSIFC9xx chips).
+ The physical interface used is defined in RFC 2625.
+ Please refer to that document for details.
+
+ Installing this driver requires the knowledge to configure and
+ activate a new network interface, "fc0", using standard Linux tools.
+
+ If enabled by saying M to this, a driver named: mptlan
+ will be compiled.
+
+ If unsure whether you really want or need this, say N.
+
+config FUSION_LOGGING
+ bool "Fusion MPT logging facility"
+ ---help---
+ This turns on a logging facility that can be used to debug a number
+ of Fusion MPT related problems.
+
+ The debug level can be programmed on the fly via SysFS (hex values)
+
+ echo [level] > /sys/class/scsi_host/host#/debug_level
+
+ There are various debug levels that can be found in the source:
+ file:drivers/message/fusion/mptdebug.h
+
+endif # FUSION
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
new file mode 100644
index 00000000..d182a24b
--- /dev/null
+++ b/drivers/message/fusion/Makefile
@@ -0,0 +1,14 @@
+# Fusion MPT drivers; recognized debug defines...
+
+# enable verbose logging
+# CONFIG_FUSION_LOGGING needs to be enabled in Kconfig
+#ccflags-y := -DMPT_DEBUG_VERBOSE
+
+
+#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC
+
+obj-$(CONFIG_FUSION_SPI) += mptbase.o mptscsih.o mptspi.o
+obj-$(CONFIG_FUSION_FC) += mptbase.o mptscsih.o mptfc.o
+obj-$(CONFIG_FUSION_SAS) += mptbase.o mptscsih.o mptsas.o
+obj-$(CONFIG_FUSION_CTL) += mptctl.o
+obj-$(CONFIG_FUSION_LAN) += mptlan.o
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
new file mode 100644
index 00000000..11c0f461
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -0,0 +1,799 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi.h
+ * Title: MPI Message independent structures and definitions
+ * Creation Date: July 27, 2000
+ *
+ * mpi.h Version: 01.05.16
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH definition.
+ * 06-06-00 01.00.01 Update MPI_VERSION_MAJOR and MPI_VERSION_MINOR.
+ * 06-22-00 01.00.02 Added MPI_IOCSTATUS_LAN_ definitions.
+ * Removed LAN_SUSPEND function definition.
+ * Added MPI_MSGFLAGS_CONTINUATION_REPLY definition.
+ * 06-30-00 01.00.03 Added MPI_CONTEXT_REPLY_TYPE_LAN definition.
+ * Added MPI_GET/SET_CONTEXT_REPLY_TYPE macros.
+ * 07-27-00 01.00.04 Added MPI_FAULT_ definitions.
+ * Removed MPI_IOCSTATUS_MSG/DATA_XFER_ERROR definitions.
+ * Added MPI_IOCSTATUS_INTERNAL_ERROR definition.
+ * Added MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH.
+ * 11-02-00 01.01.01 Original release for post 1.0 work.
+ * 12-04-00 01.01.02 Added new function codes.
+ * 01-09-01 01.01.03 Added more definitions to the system interface section
+ * Added MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT.
+ * 01-25-01 01.01.04 Changed MPI_VERSION_MINOR from 0x00 to 0x01.
+ * 02-20-01 01.01.05 Started using MPI_POINTER.
+ * Fixed value for MPI_DIAG_RW_ENABLE.
+ * Added defines for MPI_DIAG_PREVENT_IOC_BOOT and
+ * MPI_DIAG_CLEAR_FLASH_BAD_SIG.
+ * Obsoleted MPI_IOCSTATUS_TARGET_FC_ defines.
+ * 02-27-01 01.01.06 Removed MPI_HOST_INDEX_REGISTER define.
+ * Added function codes for RAID.
+ * 04-09-01 01.01.07 Added alternate define for MPI_DOORBELL_ACTIVE,
+ * MPI_DOORBELL_USED, to better match the spec.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * Changed MPI_VERSION_MINOR from 0x01 to 0x02.
+ * Added define MPI_FUNCTION_TOOLBOX.
+ * 09-28-01 01.02.02 New function code MPI_SCSI_ENCLOSURE_PROCESSOR.
+ * 11-01-01 01.02.03 Changed name to MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR.
+ * 03-14-02 01.02.04 Added MPI_HEADER_VERSION_ defines.
+ * 05-31-02 01.02.05 Bumped MPI_HEADER_VERSION_UNIT.
+ * 07-12-02 01.02.06 Added define for MPI_FUNCTION_MAILBOX.
+ * 09-16-02 01.02.07 Bumped value for MPI_HEADER_VERSION_UNIT.
+ * 11-15-02 01.02.08 Added define MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX and
+ * obsoleted define MPI_IOCSTATUS_TARGET_INVALID_IOCINDEX.
+ * 04-01-03 01.02.09 New IOCStatus code: MPI_IOCSTATUS_FC_EXCHANGE_CANCELED
+ * 06-26-03 01.02.10 Bumped MPI_HEADER_VERSION_UNIT value.
+ * 01-16-04 01.02.11 Added define for MPI_IOCLOGINFO_TYPE_SHIFT.
+ * 04-29-04 01.02.12 Added function codes for MPI_FUNCTION_DIAG_BUFFER_POST
+ * and MPI_FUNCTION_DIAG_RELEASE.
+ * Added MPI_IOCSTATUS_DIAGNOSTIC_RELEASED define.
+ * Bumped MPI_HEADER_VERSION_UNIT value.
+ * 05-11-04 01.03.01 Bumped MPI_VERSION_MINOR for MPI v1.3.
+ * Added codes for Inband.
+ * 08-19-04 01.05.01 Added defines for Host Buffer Access Control doorbell.
+ * Added define for offset of High Priority Request Queue.
+ * Added new function codes and new IOCStatus codes.
+ * Added a IOCLogInfo type of SAS.
+ * 12-07-04 01.05.02 Bumped MPI_HEADER_VERSION_UNIT.
+ * 12-09-04 01.05.03 Bumped MPI_HEADER_VERSION_UNIT.
+ * 01-15-05 01.05.04 Bumped MPI_HEADER_VERSION_UNIT.
+ * 02-09-05 01.05.05 Bumped MPI_HEADER_VERSION_UNIT.
+ * 02-22-05 01.05.06 Bumped MPI_HEADER_VERSION_UNIT.
+ * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
+ * TargetAssistExtended requests.
+ * Removed EEDP IOCStatus codes.
+ * 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
+ * TargetAssistExtended requests.
+ * Added EEDP IOCStatus codes.
+ * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
+ * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
+ * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
+ * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
+ * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
+ * 08-07-07 01.05.14 Bumped MPI_HEADER_VERSION_UNIT.
+ * 01-15-08 01.05.15 Bumped MPI_HEADER_VERSION_UNIT.
+ * 03-28-08 01.05.16 Bumped MPI_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_H
+#define MPI_H
+
+
+/*****************************************************************************
+*
+* M P I V e r s i o n D e f i n i t i o n s
+*
+*****************************************************************************/
+
+#define MPI_VERSION_MAJOR (0x01)
+#define MPI_VERSION_MINOR (0x05)
+#define MPI_VERSION_MAJOR_MASK (0xFF00)
+#define MPI_VERSION_MAJOR_SHIFT (8)
+#define MPI_VERSION_MINOR_MASK (0x00FF)
+#define MPI_VERSION_MINOR_SHIFT (0)
+#define MPI_VERSION ((MPI_VERSION_MAJOR << MPI_VERSION_MAJOR_SHIFT) | \
+ MPI_VERSION_MINOR)
+
+#define MPI_VERSION_01_00 (0x0100)
+#define MPI_VERSION_01_01 (0x0101)
+#define MPI_VERSION_01_02 (0x0102)
+#define MPI_VERSION_01_03 (0x0103)
+#define MPI_VERSION_01_05 (0x0105)
+/* Note: The major versions of 0xe0 through 0xff are reserved */
+
+/* versioning for this MPI header set */
+#define MPI_HEADER_VERSION_UNIT (0x13)
+#define MPI_HEADER_VERSION_DEV (0x00)
+#define MPI_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI_HEADER_VERSION ((MPI_HEADER_VERSION_UNIT << 8) | MPI_HEADER_VERSION_DEV)
+
+/*****************************************************************************
+*
+* I O C S t a t e D e f i n i t i o n s
+*
+*****************************************************************************/
+
+#define MPI_IOC_STATE_RESET (0x00000000)
+#define MPI_IOC_STATE_READY (0x10000000)
+#define MPI_IOC_STATE_OPERATIONAL (0x20000000)
+#define MPI_IOC_STATE_FAULT (0x40000000)
+
+#define MPI_IOC_STATE_MASK (0xF0000000)
+#define MPI_IOC_STATE_SHIFT (28)
+
+/* Fault state codes (product independent range 0x8000-0xFFFF) */
+
+#define MPI_FAULT_REQUEST_MESSAGE_PCI_PARITY_ERROR (0x8111)
+#define MPI_FAULT_REQUEST_MESSAGE_PCI_BUS_FAULT (0x8112)
+#define MPI_FAULT_REPLY_MESSAGE_PCI_PARITY_ERROR (0x8113)
+#define MPI_FAULT_REPLY_MESSAGE_PCI_BUS_FAULT (0x8114)
+#define MPI_FAULT_DATA_SEND_PCI_PARITY_ERROR (0x8115)
+#define MPI_FAULT_DATA_SEND_PCI_BUS_FAULT (0x8116)
+#define MPI_FAULT_DATA_RECEIVE_PCI_PARITY_ERROR (0x8117)
+#define MPI_FAULT_DATA_RECEIVE_PCI_BUS_FAULT (0x8118)
+
+
+/*****************************************************************************
+*
+* P C I S y s t e m I n t e r f a c e R e g i s t e r s
+*
+*****************************************************************************/
+
+/*
+ * Defines for working with the System Doorbell register.
+ * Values for doorbell function codes are included in the section that defines
+ * all the function codes (further on in this file).
+ */
+#define MPI_DOORBELL_OFFSET (0x00000000)
+#define MPI_DOORBELL_ACTIVE (0x08000000) /* DoorbellUsed */
+#define MPI_DOORBELL_USED (MPI_DOORBELL_ACTIVE)
+#define MPI_DOORBELL_ACTIVE_SHIFT (27)
+#define MPI_DOORBELL_WHO_INIT_MASK (0x07000000)
+#define MPI_DOORBELL_WHO_INIT_SHIFT (24)
+#define MPI_DOORBELL_FUNCTION_MASK (0xFF000000)
+#define MPI_DOORBELL_FUNCTION_SHIFT (24)
+#define MPI_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
+#define MPI_DOORBELL_ADD_DWORDS_SHIFT (16)
+#define MPI_DOORBELL_DATA_MASK (0x0000FFFF)
+#define MPI_DOORBELL_FUNCTION_SPECIFIC_MASK (0x0000FFFF)
+
+/* values for Host Buffer Access Control doorbell function */
+#define MPI_DB_HPBAC_VALUE_MASK (0x0000F000)
+#define MPI_DB_HPBAC_ENABLE_ACCESS (0x01)
+#define MPI_DB_HPBAC_DISABLE_ACCESS (0x02)
+#define MPI_DB_HPBAC_FREE_BUFFER (0x03)
+
+
+#define MPI_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI_WRSEQ_KEY_VALUE_MASK (0x0000000F)
+#define MPI_WRSEQ_1ST_KEY_VALUE (0x04)
+#define MPI_WRSEQ_2ND_KEY_VALUE (0x0B)
+#define MPI_WRSEQ_3RD_KEY_VALUE (0x02)
+#define MPI_WRSEQ_4TH_KEY_VALUE (0x07)
+#define MPI_WRSEQ_5TH_KEY_VALUE (0x0D)
+
+#define MPI_DIAGNOSTIC_OFFSET (0x00000008)
+#define MPI_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
+#define MPI_DIAG_PREVENT_IOC_BOOT (0x00000200)
+#define MPI_DIAG_DRWE (0x00000080)
+#define MPI_DIAG_FLASH_BAD_SIG (0x00000040)
+#define MPI_DIAG_RESET_HISTORY (0x00000020)
+#define MPI_DIAG_RW_ENABLE (0x00000010)
+#define MPI_DIAG_RESET_ADAPTER (0x00000004)
+#define MPI_DIAG_DISABLE_ARM (0x00000002)
+#define MPI_DIAG_MEM_ENABLE (0x00000001)
+
+#define MPI_TEST_BASE_ADDRESS_OFFSET (0x0000000C)
+
+#define MPI_DIAG_RW_DATA_OFFSET (0x00000010)
+
+#define MPI_DIAG_RW_ADDRESS_OFFSET (0x00000014)
+
+#define MPI_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
+#define MPI_HIS_IOP_DOORBELL_STATUS (0x80000000)
+#define MPI_HIS_REPLY_MESSAGE_INTERRUPT (0x00000008)
+#define MPI_HIS_DOORBELL_INTERRUPT (0x00000001)
+
+#define MPI_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
+#define MPI_HIM_RIM (0x00000008)
+#define MPI_HIM_DIM (0x00000001)
+
+#define MPI_REQUEST_QUEUE_OFFSET (0x00000040)
+#define MPI_REQUEST_POST_FIFO_OFFSET (0x00000040)
+
+#define MPI_REPLY_QUEUE_OFFSET (0x00000044)
+#define MPI_REPLY_POST_FIFO_OFFSET (0x00000044)
+#define MPI_REPLY_FREE_FIFO_OFFSET (0x00000044)
+
+#define MPI_HI_PRI_REQUEST_QUEUE_OFFSET (0x00000048)
+
+
+
+/*****************************************************************************
+*
+* M e s s a g e F r a m e D e s c r i p t o r s
+*
+*****************************************************************************/
+
+#define MPI_REQ_MF_DESCRIPTOR_NB_MASK (0x00000003)
+#define MPI_REQ_MF_DESCRIPTOR_F_BIT (0x00000004)
+#define MPI_REQ_MF_DESCRIPTOR_ADDRESS_MASK (0xFFFFFFF8)
+
+#define MPI_ADDRESS_REPLY_A_BIT (0x80000000)
+#define MPI_ADDRESS_REPLY_ADDRESS_MASK (0x7FFFFFFF)
+
+#define MPI_CONTEXT_REPLY_A_BIT (0x80000000)
+#define MPI_CONTEXT_REPLY_TYPE_MASK (0x60000000)
+#define MPI_CONTEXT_REPLY_TYPE_SCSI_INIT (0x00)
+#define MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET (0x01)
+#define MPI_CONTEXT_REPLY_TYPE_LAN (0x02)
+#define MPI_CONTEXT_REPLY_TYPE_SHIFT (29)
+#define MPI_CONTEXT_REPLY_CONTEXT_MASK (0x1FFFFFFF)
+
+
+/****************************************************************************/
+/* Context Reply macros */
+/****************************************************************************/
+
+#define MPI_GET_CONTEXT_REPLY_TYPE(x) (((x) & MPI_CONTEXT_REPLY_TYPE_MASK) \
+ >> MPI_CONTEXT_REPLY_TYPE_SHIFT)
+
+#define MPI_SET_CONTEXT_REPLY_TYPE(x, typ) \
+ ((x) = ((x) & ~MPI_CONTEXT_REPLY_TYPE_MASK) | \
+ (((typ) << MPI_CONTEXT_REPLY_TYPE_SHIFT) & \
+ MPI_CONTEXT_REPLY_TYPE_MASK))
+
+
+/*****************************************************************************
+*
+* M e s s a g e F u n c t i o n s
+* 0x80 -> 0x8F reserved for private message use per product
+*
+*
+*****************************************************************************/
+
+#define MPI_FUNCTION_SCSI_IO_REQUEST (0x00)
+#define MPI_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI_FUNCTION_IOC_INIT (0x02)
+#define MPI_FUNCTION_IOC_FACTS (0x03)
+#define MPI_FUNCTION_CONFIG (0x04)
+#define MPI_FUNCTION_PORT_FACTS (0x05)
+#define MPI_FUNCTION_PORT_ENABLE (0x06)
+#define MPI_FUNCTION_EVENT_NOTIFICATION (0x07)
+#define MPI_FUNCTION_EVENT_ACK (0x08)
+#define MPI_FUNCTION_FW_DOWNLOAD (0x09)
+#define MPI_FUNCTION_TARGET_CMD_BUFFER_POST (0x0A)
+#define MPI_FUNCTION_TARGET_ASSIST (0x0B)
+#define MPI_FUNCTION_TARGET_STATUS_SEND (0x0C)
+#define MPI_FUNCTION_TARGET_MODE_ABORT (0x0D)
+#define MPI_FUNCTION_FC_LINK_SRVC_BUF_POST (0x0E)
+#define MPI_FUNCTION_FC_LINK_SRVC_RSP (0x0F)
+#define MPI_FUNCTION_FC_EX_LINK_SRVC_SEND (0x10)
+#define MPI_FUNCTION_FC_ABORT (0x11)
+#define MPI_FUNCTION_FW_UPLOAD (0x12)
+#define MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND (0x13)
+#define MPI_FUNCTION_FC_PRIMITIVE_SEND (0x14)
+
+#define MPI_FUNCTION_RAID_ACTION (0x15)
+#define MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16)
+
+#define MPI_FUNCTION_TOOLBOX (0x17)
+
+#define MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
+
+#define MPI_FUNCTION_MAILBOX (0x19)
+
+#define MPI_FUNCTION_SMP_PASSTHROUGH (0x1A)
+#define MPI_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
+#define MPI_FUNCTION_SATA_PASSTHROUGH (0x1C)
+
+#define MPI_FUNCTION_DIAG_BUFFER_POST (0x1D)
+#define MPI_FUNCTION_DIAG_RELEASE (0x1E)
+
+#define MPI_FUNCTION_SCSI_IO_32 (0x1F)
+
+#define MPI_FUNCTION_LAN_SEND (0x20)
+#define MPI_FUNCTION_LAN_RECEIVE (0x21)
+#define MPI_FUNCTION_LAN_RESET (0x22)
+
+#define MPI_FUNCTION_TARGET_ASSIST_EXTENDED (0x23)
+#define MPI_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
+#define MPI_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
+
+#define MPI_FUNCTION_INBAND_BUFFER_POST (0x28)
+#define MPI_FUNCTION_INBAND_SEND (0x29)
+#define MPI_FUNCTION_INBAND_RSP (0x2A)
+#define MPI_FUNCTION_INBAND_ABORT (0x2B)
+
+#define MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI_FUNCTION_IO_UNIT_RESET (0x41)
+#define MPI_FUNCTION_HANDSHAKE (0x42)
+#define MPI_FUNCTION_REPLY_FRAME_REMOVAL (0x43)
+#define MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL (0x44)
+
+
+/* standard version format */
+typedef struct _MPI_VERSION_STRUCT
+{
+ U8 Dev; /* 00h */
+ U8 Unit; /* 01h */
+ U8 Minor; /* 02h */
+ U8 Major; /* 03h */
+} MPI_VERSION_STRUCT, MPI_POINTER PTR_MPI_VERSION_STRUCT,
+ MpiVersionStruct_t, MPI_POINTER pMpiVersionStruct;
+
+typedef union _MPI_VERSION_FORMAT
+{
+ MPI_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI_VERSION_FORMAT, MPI_POINTER PTR_MPI_VERSION_FORMAT,
+ MpiVersionFormat_t, MPI_POINTER pMpiVersionFormat_t;
+
+
+/*****************************************************************************
+*
+* S c a t t e r G a t h e r E l e m e n t s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Simple element structures */
+/****************************************************************************/
+
+typedef struct _SGE_SIMPLE32
+{
+ U32 FlagsLength;
+ U32 Address;
+} SGE_SIMPLE32, MPI_POINTER PTR_SGE_SIMPLE32,
+ SGESimple32_t, MPI_POINTER pSGESimple32_t;
+
+typedef struct _SGE_SIMPLE64
+{
+ U32 FlagsLength;
+ U64 Address;
+} SGE_SIMPLE64, MPI_POINTER PTR_SGE_SIMPLE64,
+ SGESimple64_t, MPI_POINTER pSGESimple64_t;
+
+typedef struct _SGE_SIMPLE_UNION
+{
+ U32 FlagsLength;
+ union
+ {
+ U32 Address32;
+ U64 Address64;
+ }u;
+} SGE_SIMPLE_UNION, MPI_POINTER PTR_SGE_SIMPLE_UNION,
+ SGESimpleUnion_t, MPI_POINTER pSGESimpleUnion_t;
+
+/****************************************************************************/
+/* Chain element structures */
+/****************************************************************************/
+
+typedef struct _SGE_CHAIN32
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U32 Address;
+} SGE_CHAIN32, MPI_POINTER PTR_SGE_CHAIN32,
+ SGEChain32_t, MPI_POINTER pSGEChain32_t;
+
+typedef struct _SGE_CHAIN64
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U64 Address;
+} SGE_CHAIN64, MPI_POINTER PTR_SGE_CHAIN64,
+ SGEChain64_t, MPI_POINTER pSGEChain64_t;
+
+typedef struct _SGE_CHAIN_UNION
+{
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ union
+ {
+ U32 Address32;
+ U64 Address64;
+ }u;
+} SGE_CHAIN_UNION, MPI_POINTER PTR_SGE_CHAIN_UNION,
+ SGEChainUnion_t, MPI_POINTER pSGEChainUnion_t;
+
+/****************************************************************************/
+/* Transaction Context element */
+/****************************************************************************/
+
+typedef struct _SGE_TRANSACTION32
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[1];
+ U32 TransactionDetails[1];
+} SGE_TRANSACTION32, MPI_POINTER PTR_SGE_TRANSACTION32,
+ SGETransaction32_t, MPI_POINTER pSGETransaction32_t;
+
+typedef struct _SGE_TRANSACTION64
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[2];
+ U32 TransactionDetails[1];
+} SGE_TRANSACTION64, MPI_POINTER PTR_SGE_TRANSACTION64,
+ SGETransaction64_t, MPI_POINTER pSGETransaction64_t;
+
+typedef struct _SGE_TRANSACTION96
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[3];
+ U32 TransactionDetails[1];
+} SGE_TRANSACTION96, MPI_POINTER PTR_SGE_TRANSACTION96,
+ SGETransaction96_t, MPI_POINTER pSGETransaction96_t;
+
+typedef struct _SGE_TRANSACTION128
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[4];
+ U32 TransactionDetails[1];
+} SGE_TRANSACTION128, MPI_POINTER PTR_SGE_TRANSACTION128,
+ SGETransaction_t128, MPI_POINTER pSGETransaction_t128;
+
+typedef struct _SGE_TRANSACTION_UNION
+{
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ union
+ {
+ U32 TransactionContext32[1];
+ U32 TransactionContext64[2];
+ U32 TransactionContext96[3];
+ U32 TransactionContext128[4];
+ }u;
+ U32 TransactionDetails[1];
+} SGE_TRANSACTION_UNION, MPI_POINTER PTR_SGE_TRANSACTION_UNION,
+ SGETransactionUnion_t, MPI_POINTER pSGETransactionUnion_t;
+
+
+/****************************************************************************/
+/* SGE IO types union for IO SGL's */
+/****************************************************************************/
+
+typedef struct _SGE_IO_UNION
+{
+ union
+ {
+ SGE_SIMPLE_UNION Simple;
+ SGE_CHAIN_UNION Chain;
+ } u;
+} SGE_IO_UNION, MPI_POINTER PTR_SGE_IO_UNION,
+ SGEIOUnion_t, MPI_POINTER pSGEIOUnion_t;
+
+/****************************************************************************/
+/* SGE union for SGL's with Simple and Transaction elements */
+/****************************************************************************/
+
+typedef struct _SGE_TRANS_SIMPLE_UNION
+{
+ union
+ {
+ SGE_SIMPLE_UNION Simple;
+ SGE_TRANSACTION_UNION Transaction;
+ } u;
+} SGE_TRANS_SIMPLE_UNION, MPI_POINTER PTR_SGE_TRANS_SIMPLE_UNION,
+ SGETransSimpleUnion_t, MPI_POINTER pSGETransSimpleUnion_t;
+
+/****************************************************************************/
+/* All SGE types union */
+/****************************************************************************/
+
+typedef struct _SGE_MPI_UNION
+{
+ union
+ {
+ SGE_SIMPLE_UNION Simple;
+ SGE_CHAIN_UNION Chain;
+ SGE_TRANSACTION_UNION Transaction;
+ } u;
+} SGE_MPI_UNION, MPI_POINTER PTR_SGE_MPI_UNION,
+ MPI_SGE_UNION_t, MPI_POINTER pMPI_SGE_UNION_t,
+ SGEAllUnion_t, MPI_POINTER pSGEAllUnion_t;
+
+
+/****************************************************************************/
+/* SGE field definition and masks */
+/****************************************************************************/
+
+/* Flags field bit definitions */
+
+#define MPI_SGE_FLAGS_LAST_ELEMENT (0x80)
+#define MPI_SGE_FLAGS_END_OF_BUFFER (0x40)
+#define MPI_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
+#define MPI_SGE_FLAGS_LOCAL_ADDRESS (0x08)
+#define MPI_SGE_FLAGS_DIRECTION (0x04)
+#define MPI_SGE_FLAGS_ADDRESS_SIZE (0x02)
+#define MPI_SGE_FLAGS_END_OF_LIST (0x01)
+
+#define MPI_SGE_FLAGS_SHIFT (24)
+
+#define MPI_SGE_LENGTH_MASK (0x00FFFFFF)
+#define MPI_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
+
+/* Element Type */
+
+#define MPI_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
+#define MPI_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
+#define MPI_SGE_FLAGS_CHAIN_ELEMENT (0x30)
+#define MPI_SGE_FLAGS_ELEMENT_MASK (0x30)
+
+/* Address location */
+
+#define MPI_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
+
+/* Direction */
+
+#define MPI_SGE_FLAGS_IOC_TO_HOST (0x00)
+#define MPI_SGE_FLAGS_HOST_TO_IOC (0x04)
+
+/* Address Size */
+
+#define MPI_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/* Context Size */
+
+#define MPI_SGE_FLAGS_32_BIT_CONTEXT (0x00)
+#define MPI_SGE_FLAGS_64_BIT_CONTEXT (0x02)
+#define MPI_SGE_FLAGS_96_BIT_CONTEXT (0x04)
+#define MPI_SGE_FLAGS_128_BIT_CONTEXT (0x06)
+
+#define MPI_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
+#define MPI_SGE_CHAIN_OFFSET_SHIFT (16)
+
+
+/****************************************************************************/
+/* SGE operation Macros */
+/****************************************************************************/
+
+ /* SIMPLE FlagsLength manipulations... */
+#define MPI_SGE_SET_FLAGS(f) ((U32)(f) << MPI_SGE_FLAGS_SHIFT)
+#define MPI_SGE_GET_FLAGS(fl) (((fl) & ~MPI_SGE_LENGTH_MASK) >> MPI_SGE_FLAGS_SHIFT)
+#define MPI_SGE_LENGTH(fl) ((fl) & MPI_SGE_LENGTH_MASK)
+#define MPI_SGE_CHAIN_LENGTH(fl) ((fl) & MPI_SGE_CHAIN_LENGTH_MASK)
+
+#define MPI_SGE_SET_FLAGS_LENGTH(f,l) (MPI_SGE_SET_FLAGS(f) | MPI_SGE_LENGTH(l))
+
+#define MPI_pSGE_GET_FLAGS(psg) MPI_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI_pSGE_GET_LENGTH(psg) MPI_SGE_LENGTH((psg)->FlagsLength)
+#define MPI_pSGE_SET_FLAGS_LENGTH(psg,f,l) (psg)->FlagsLength = MPI_SGE_SET_FLAGS_LENGTH(f,l)
+ /* CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI_pSGE_SET_FLAGS(psg,f) (psg)->FlagsLength |= MPI_SGE_SET_FLAGS(f)
+#define MPI_pSGE_SET_LENGTH(psg,l) (psg)->FlagsLength |= MPI_SGE_LENGTH(l)
+
+#define MPI_GET_CHAIN_OFFSET(x) ((x&MPI_SGE_CHAIN_OFFSET_MASK)>>MPI_SGE_CHAIN_OFFSET_SHIFT)
+
+
+
+/*****************************************************************************
+*
+* S t a n d a r d M e s s a g e S t r u c t u r e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Standard message request header for all request messages */
+/****************************************************************************/
+
+typedef struct _MSG_REQUEST_HEADER
+{
+ U8 Reserved[2]; /* function specific */
+ U8 ChainOffset;
+ U8 Function;
+ U8 Reserved1[3]; /* function specific */
+ U8 MsgFlags;
+ U32 MsgContext;
+} MSG_REQUEST_HEADER, MPI_POINTER PTR_MSG_REQUEST_HEADER,
+ MPIHeader_t, MPI_POINTER pMPIHeader_t;
+
+
+/****************************************************************************/
+/* Default Reply */
+/****************************************************************************/
+
+typedef struct _MSG_DEFAULT_REPLY
+{
+ U8 Reserved[2]; /* function specific */
+ U8 MsgLength;
+ U8 Function;
+ U8 Reserved1[3]; /* function specific */
+ U8 MsgFlags;
+ U32 MsgContext;
+ U8 Reserved2[2]; /* function specific */
+ U16 IOCStatus;
+ U32 IOCLogInfo;
+} MSG_DEFAULT_REPLY, MPI_POINTER PTR_MSG_DEFAULT_REPLY,
+ MPIDefaultReply_t, MPI_POINTER pMPIDefaultReply_t;
+
+
+/* MsgFlags definition for all replies */
+
+#define MPI_MSGFLAGS_CONTINUATION_REPLY (0x80)
+
+
+/*****************************************************************************
+*
+* I O C S t a t u s V a l u e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Common IOCStatus values for all replies */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_SUCCESS (0x0000)
+#define MPI_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI_IOCSTATUS_BUSY (0x0002)
+#define MPI_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI_IOCSTATUS_RESERVED (0x0005)
+#define MPI_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+
+/****************************************************************************/
+/* Config IOCStatus values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+
+/****************************************************************************/
+/* SCSIIO Reply (SPI & FCP) initiator values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI_IOCSTATUS_SCSI_INVALID_BUS (0x0041)
+#define MPI_IOCSTATUS_SCSI_INVALID_TARGETID (0x0042)
+#define MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
+#define MPI_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
+#define MPI_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
+
+/****************************************************************************/
+/* For use by SCSI Initiator and SCSI Target end-to-end data protection */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
+#define MPI_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
+#define MPI_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
+
+
+/****************************************************************************/
+/* SCSI Target values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_TARGET_PRIORITY_IO (0x0060)
+#define MPI_IOCSTATUS_TARGET_INVALID_PORT (0x0061)
+#define MPI_IOCSTATUS_TARGET_INVALID_IOCINDEX (0x0062) /* obsolete name */
+#define MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT (0x006B)
+#define MPI_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
+#define MPI_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
+#define MPI_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
+#define MPI_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+
+/****************************************************************************/
+/* Additional FCP target values (obsolete) */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_TARGET_FC_ABORTED (0x0066) /* obsolete */
+#define MPI_IOCSTATUS_TARGET_FC_RX_ID_INVALID (0x0067) /* obsolete */
+#define MPI_IOCSTATUS_TARGET_FC_DID_INVALID (0x0068) /* obsolete */
+#define MPI_IOCSTATUS_TARGET_FC_NODE_LOGGED_OUT (0x0069) /* obsolete */
+
+/****************************************************************************/
+/* Fibre Channel Direct Access values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_FC_ABORTED (0x0066)
+#define MPI_IOCSTATUS_FC_RX_ID_INVALID (0x0067)
+#define MPI_IOCSTATUS_FC_DID_INVALID (0x0068)
+#define MPI_IOCSTATUS_FC_NODE_LOGGED_OUT (0x0069)
+#define MPI_IOCSTATUS_FC_EXCHANGE_CANCELED (0x006C)
+
+/****************************************************************************/
+/* LAN values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND (0x0080)
+#define MPI_IOCSTATUS_LAN_DEVICE_FAILURE (0x0081)
+#define MPI_IOCSTATUS_LAN_TRANSMIT_ERROR (0x0082)
+#define MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED (0x0083)
+#define MPI_IOCSTATUS_LAN_RECEIVE_ERROR (0x0084)
+#define MPI_IOCSTATUS_LAN_RECEIVE_ABORTED (0x0085)
+#define MPI_IOCSTATUS_LAN_PARTIAL_PACKET (0x0086)
+#define MPI_IOCSTATUS_LAN_CANCELED (0x0087)
+
+/****************************************************************************/
+/* Serial Attached SCSI values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+
+/****************************************************************************/
+/* Inband values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_INBAND_ABORTED (0x0098)
+#define MPI_IOCSTATUS_INBAND_NO_CONNECTION (0x0099)
+
+/****************************************************************************/
+/* Diagnostic Tools values */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
+
+
+/****************************************************************************/
+/* IOCStatus flag to indicate that log info is available */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
+#define MPI_IOCSTATUS_MASK (0x7FFF)
+
+/****************************************************************************/
+/* LogInfo Types */
+/****************************************************************************/
+
+#define MPI_IOCLOGINFO_TYPE_MASK (0xF0000000)
+#define MPI_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI_IOCLOGINFO_TYPE_SCSI (0x1)
+#define MPI_IOCLOGINFO_TYPE_FC (0x2)
+#define MPI_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI_IOCLOGINFO_TYPE_ISCSI (0x4)
+#define MPI_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+
+
+#endif
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
new file mode 100644
index 00000000..22027e79
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -0,0 +1,3115 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_cnfg.h
+ * Title: MPI Config message, structures, and Pages
+ * Creation Date: July 27, 2000
+ *
+ * mpi_cnfg.h Version: 01.05.18
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-08-00 01.00.02 Added _PAGEVERSION definitions for all pages.
+ * Added FcPhLowestVersion, FcPhHighestVersion, Reserved2
+ * fields to FC_DEVICE_0 page, updated the page version.
+ * Changed _FREE_RUNNING_CLOCK to _PACING_TRANSFERS in
+ * SCSI_PORT_0, SCSI_DEVICE_0 and SCSI_DEVICE_1 pages
+ * and updated the page versions.
+ * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
+ * page and updated the page version.
+ * Added Information field and _INFO_PARAMS_NEGOTIATED
+ * definitionto SCSI_DEVICE_0 page.
+ * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
+ * page version.
+ * Added BucketsRemaining to LAN_1 page, redefined the
+ * state values, and updated the page version.
+ * Revised bus width definitions in SCSI_PORT_0,
+ * SCSI_DEVICE_0 and SCSI_DEVICE_1 pages.
+ * 06-30-00 01.00.04 Added MaxReplySize to LAN_1 page and updated the page
+ * version.
+ * Moved FC_DEVICE_0 PageAddress description to spec.
+ * 07-27-00 01.00.05 Corrected the SubsystemVendorID and SubsystemID field
+ * widths in IOC_0 page and updated the page version.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * Added Manufacturing pages, IO Unit Page 2, SCSI SPI
+ * Port Page 2, FC Port Page 4, FC Port Page 5
+ * 11-15-00 01.01.02 Interim changes to match proposals
+ * 12-04-00 01.01.03 Config page changes to match MPI rev 1.00.01.
+ * 12-05-00 01.01.04 Modified config page actions.
+ * 01-09-01 01.01.05 Added defines for page address formats.
+ * Data size for Manufacturing pages 2 and 3 no longer
+ * defined here.
+ * Io Unit Page 2 size is fixed at 4 adapters and some
+ * flags were changed.
+ * SCSI Port Page 2 Device Settings modified.
+ * New fields added to FC Port Page 0 and some flags
+ * cleaned up.
+ * Removed impedance flash from FC Port Page 1.
+ * Added FC Port pages 6 and 7.
+ * 01-25-01 01.01.06 Added MaxInitiators field to FcPortPage0.
+ * 01-29-01 01.01.07 Changed some defines to make them 32 character unique.
+ * Added some LinkType defines for FcPortPage0.
+ * 02-20-01 01.01.08 Started using MPI_POINTER.
+ * 02-27-01 01.01.09 Replaced MPI_CONFIG_PAGETYPE_SCSI_LUN with
+ * MPI_CONFIG_PAGETYPE_RAID_VOLUME.
+ * Added definitions and structures for IOC Page 2 and
+ * RAID Volume Page 2.
+ * 03-27-01 01.01.10 Added CONFIG_PAGE_FC_PORT_8 and CONFIG_PAGE_FC_PORT_9.
+ * CONFIG_PAGE_FC_PORT_3 now supports persistent by DID.
+ * Added VendorId and ProductRevLevel fields to
+ * RAIDVOL2_IM_PHYS_ID struct.
+ * Modified values for MPI_FCPORTPAGE0_FLAGS_ATTACH_
+ * defines to make them compatible to MPI version 1.0.
+ * Added structure offset comments.
+ * 04-09-01 01.01.11 Added some new defines for the PageAddress field and
+ * removed some obsolete ones.
+ * Added IO Unit Page 3.
+ * Modified defines for Scsi Port Page 2.
+ * Modified RAID Volume Pages.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * Added SepID and SepBus to RVP2 IMPhysicalDisk struct.
+ * Added defines for the SEP bits in RVP2 VolumeSettings.
+ * Modified the DeviceSettings field in RVP2 to use the
+ * proper structure.
+ * Added defines for SES, SAF-TE, and cross channel for
+ * IOCPage2 CapabilitiesFlags.
+ * Removed define for MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE.
+ * Removed define for
+ * MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE.
+ * Added define for MPI_CONFIG_PAGEATTR_RO_PERSISTENT.
+ * 08-29-01 01.02.02 Fixed value for MPI_MANUFACTPAGE_DEVID_53C1035.
+ * Added defines for MPI_FCPORTPAGE1_FLAGS_HARD_ALPA_ONLY
+ * and MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY.
+ * Removed MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS,
+ * MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS, and
+ * MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS, and
+ * MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED.
+ * Added defines for MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED
+ * and MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED.
+ * Added OnBusTimerValue to CONFIG_PAGE_SCSI_PORT_1.
+ * Added rejected bits to SCSI Device Page 0 Information.
+ * Increased size of ALPA array in FC Port Page 2 by one
+ * and removed a one byte reserved field.
+ * 09-28-01 01.02.03 Swapped NegWireSpeedLow and NegWireSpeedLow in
+ * CONFIG_PAGE_LAN_1 to match preferred 64-bit ordering.
+ * Added structures for Manufacturing Page 4, IO Unit
+ * Page 3, IOC Page 3, IOC Page 4, RAID Volume Page 0, and
+ * RAID PhysDisk Page 0.
+ * 10-04-01 01.02.04 Added define for MPI_CONFIG_PAGETYPE_RAID_PHYSDISK.
+ * Modified some of the new defines to make them 32
+ * character unique.
+ * Modified how variable length pages (arrays) are defined.
+ * Added generic defines for hot spare pools and RAID
+ * volume types.
+ * 11-01-01 01.02.05 Added define for MPI_IOUNITPAGE1_DISABLE_IR.
+ * 03-14-02 01.02.06 Added PCISlotNum field to CONFIG_PAGE_IOC_1 along with
+ * related define, and bumped the page version define.
+ * 05-31-02 01.02.07 Added a Flags field to CONFIG_PAGE_IOC_2_RAID_VOL in a
+ * reserved byte and added a define.
+ * Added define for
+ * MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE.
+ * Added new config page: CONFIG_PAGE_IOC_5.
+ * Added MaxAliases, MaxHardAliases, and NumCurrentAliases
+ * fields to CONFIG_PAGE_FC_PORT_0.
+ * Added AltConnector and NumRequestedAliases fields to
+ * CONFIG_PAGE_FC_PORT_1.
+ * Added new config page: CONFIG_PAGE_FC_PORT_10.
+ * 07-12-02 01.02.08 Added more MPI_MANUFACTPAGE_DEVID_ defines.
+ * Added additional MPI_SCSIDEVPAGE0_NP_ defines.
+ * Added more MPI_SCSIDEVPAGE1_RP_ defines.
+ * Added define for
+ * MPI_SCSIDEVPAGE1_CONF_EXTENDED_PARAMS_ENABLE.
+ * Added new config page: CONFIG_PAGE_SCSI_DEVICE_3.
+ * Modified MPI_FCPORTPAGE5_FLAGS_ defines.
+ * 09-16-02 01.02.09 Added MPI_SCSIDEVPAGE1_CONF_FORCE_PPR_MSG define.
+ * 11-15-02 01.02.10 Added ConnectedID defines for CONFIG_PAGE_SCSI_PORT_0.
+ * Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
+ * Added more Flags defines for CONFIG_PAGE_FC_DEVICE_0.
+ * 04-01-03 01.02.11 Added RR_TOV field and additional Flags defines for
+ * CONFIG_PAGE_FC_PORT_1.
+ * Added define MPI_FCPORTPAGE5_FLAGS_DISABLE to disable
+ * an alias.
+ * Added more device id defines.
+ * 06-26-03 01.02.12 Added MPI_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID define.
+ * Added TargetConfig and IDConfig fields to
+ * CONFIG_PAGE_SCSI_PORT_1.
+ * Added more PortFlags defines for CONFIG_PAGE_SCSI_PORT_2
+ * to control DV.
+ * Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
+ * In CONFIG_PAGE_FC_DEVICE_0, replaced Reserved1 field
+ * with ADISCHardALPA.
+ * Added MPI_FC_DEVICE_PAGE0_PROT_FCP_RETRY define.
+ * 01-16-04 01.02.13 Added InitiatorDeviceTimeout and InitiatorIoPendTimeout
+ * fields and related defines to CONFIG_PAGE_FC_PORT_1.
+ * Added define for
+ * MPI_FCPORTPAGE1_FLAGS_SOFT_ALPA_FALLBACK.
+ * Added new fields to the substructures of
+ * CONFIG_PAGE_FC_PORT_10.
+ * 04-29-04 01.02.14 Added define for IDP bit for CONFIG_PAGE_SCSI_PORT_0,
+ * CONFIG_PAGE_SCSI_DEVICE_0, and
+ * CONFIG_PAGE_SCSI_DEVICE_1. Also bumped Page Version for
+ * these pages.
+ * 05-11-04 01.03.01 Added structure for CONFIG_PAGE_INBAND_0.
+ * 08-19-04 01.05.01 Modified MSG_CONFIG request to support extended config
+ * pages.
+ * Added a new structure for extended config page header.
+ * Added new extended config pages types and structures for
+ * SAS IO Unit, SAS Expander, SAS Device, and SAS PHY.
+ * Replaced a reserved byte in CONFIG_PAGE_MANUFACTURING_4
+ * to add a Flags field.
+ * Two new Manufacturing config pages (5 and 6).
+ * Two new bits defined for IO Unit Page 1 Flags field.
+ * Modified CONFIG_PAGE_IO_UNIT_2 to add three new fields
+ * to specify the BIOS boot device.
+ * Four new Flags bits defined for IO Unit Page 2.
+ * Added IO Unit Page 4.
+ * Added EEDP Flags settings to IOC Page 1.
+ * Added new BIOS Page 1 config page.
+ * 10-05-04 01.05.02 Added define for
+ * MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE.
+ * Added new Flags field to CONFIG_PAGE_MANUFACTURING_5 and
+ * associated defines.
+ * Added more defines for SAS IO Unit Page 0
+ * DiscoveryStatus field.
+ * Added define for MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK
+ * and MPI_SAS_IOUNIT0_DS_TABLE_LINK.
+ * Added defines for Physical Mapping Modes to SAS IO Unit
+ * Page 2.
+ * Added define for
+ * MPI_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH.
+ * 10-27-04 01.05.03 Added defines for new SAS PHY page addressing mode.
+ * Added defines for MaxTargetSpinUp to BIOS Page 1.
+ * Added 5 new ControlFlags defines for SAS IO Unit
+ * Page 1.
+ * Added MaxNumPhysicalMappedIDs field to SAS IO Unit
+ * Page 2.
+ * Added AccessStatus field to SAS Device Page 0 and added
+ * new Flags bits for supported SATA features.
+ * 12-07-04 01.05.04 Added config page structures for BIOS Page 2, RAID
+ * Volume Page 1, and RAID Physical Disk Page 1.
+ * Replaced IO Unit Page 1 BootTargetID,BootBus, and
+ * BootAdapterNum with reserved field.
+ * Added DataScrubRate and ResyncRate to RAID Volume
+ * Page 0.
+ * Added MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT
+ * define.
+ * 12-09-04 01.05.05 Added Target Mode Large CDB Enable to FC Port Page 1
+ * Flags field.
+ * Added Auto Port Config flag define for SAS IOUNIT
+ * Page 1 ControlFlags.
+ * Added Disabled bad Phy define to Expander Page 1
+ * Discovery Info field.
+ * Added SAS/SATA device support to SAS IOUnit Page 1
+ * ControlFlags.
+ * Added Unsupported device to SAS Dev Page 0 Flags field
+ * Added disable use SATA Hash Address for SAS IOUNIT
+ * page 1 in ControlFields.
+ * 01-15-05 01.05.06 Added defaults for data scrub rate and resync rate to
+ * Manufacturing Page 4.
+ * Added new defines for BIOS Page 1 IOCSettings field.
+ * Added ExtDiskIdentifier field to RAID Physical Disk
+ * Page 0.
+ * Added new defines for SAS IO Unit Page 1 ControlFlags
+ * and to SAS Device Page 0 Flags to control SATA devices.
+ * Added defines and structures for the new Log Page 0, a
+ * new type of configuration page.
+ * 02-09-05 01.05.07 Added InactiveStatus field to RAID Volume Page 0.
+ * Added WWID field to RAID Volume Page 1.
+ * Added PhysicalPort field to SAS Expander pages 0 and 1.
+ * 03-11-05 01.05.08 Removed the EEDP flags from IOC Page 1.
+ * Added Enclosure/Slot boot device format to BIOS Page 2.
+ * New status value for RAID Volume Page 0 VolumeStatus
+ * (VolumeState subfield).
+ * New value for RAID Physical Page 0 InactiveStatus.
+ * Added Inactive Volume Member flag RAID Physical Disk
+ * Page 0 PhysDiskStatus field.
+ * New physical mapping mode in SAS IO Unit Page 2.
+ * Added CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * Added Slot and Enclosure fields to SAS Device Page 0.
+ * 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
+ * Added more RAID type defines to IOC Page 2.
+ * Added Port Enable Delay settings to BIOS Page 1.
+ * Added Bad Block Table Full define to RAID Volume Page 0.
+ * Added Previous State defines to RAID Physical Disk
+ * Page 0.
+ * Added Max Sata Targets define for DiscoveryStatus field
+ * of SAS IO Unit Page 0.
+ * Added Device Self Test to Control Flags of SAS IO Unit
+ * Page 1.
+ * Added Direct Attach Starting Slot Number define for SAS
+ * IO Unit Page 2.
+ * Added new fields in SAS Device Page 2 for enclosure
+ * mapping.
+ * Added OwnerDevHandle and Flags field to SAS PHY Page 0.
+ * Added IOC GPIO Flags define to SAS Enclosure Page 0.
+ * Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
+ * 08-03-05 01.05.10 Removed ISDataScrubRate and ISResyncRate from
+ * Manufacturing Page 4.
+ * Added MPI_IOUNITPAGE1_SATA_WRITE_CACHE_DISABLE bit.
+ * Added NumDevsPerEnclosure field to SAS IO Unit page 2.
+ * Added MPI_SAS_IOUNIT2_FLAGS_HOST_ASSIGNED_PHYS_MAP
+ * define.
+ * Added EnclosureHandle field to SAS Expander page 0.
+ * Removed redundant NumTableEntriesProg field from SAS
+ * Expander Page 1.
+ * 08-30-05 01.05.11 Added DeviceID for FC949E and changed the DeviceID for
+ * SAS1078.
+ * Added more defines for Manufacturing Page 4 Flags field.
+ * Added more defines for IOCSettings and added
+ * ExpanderSpinup field to Bios Page 1.
+ * Added postpone SATA Init bit to SAS IO Unit Page 1
+ * ControlFlags.
+ * Changed LogEntry format for Log Page 0.
+ * 03-27-06 01.05.12 Added two new Flags defines for Manufacturing Page 4.
+ * Added Manufacturing Page 7.
+ * Added MPI_IOCPAGE2_CAP_FLAGS_RAID_64_BIT_ADDRESSING.
+ * Added IOC Page 6.
+ * Added PrevBootDeviceForm field to CONFIG_PAGE_BIOS_2.
+ * Added MaxLBAHigh field to RAID Volume Page 0.
+ * Added Nvdata version fields to SAS IO Unit Page 0.
+ * Added AdditionalControlFlags, MaxTargetPortConnectTime,
+ * ReportDeviceMissingDelay, and IODeviceMissingDelay
+ * fields to SAS IO Unit Page 1.
+ * 10-11-06 01.05.13 Added NumForceWWID field and ForceWWID array to
+ * Manufacturing Page 5.
+ * Added Manufacturing pages 8 through 10.
+ * Added defines for supported metadata size bits in
+ * CapabilitiesFlags field of IOC Page 6.
+ * Added defines for metadata size bits in VolumeSettings
+ * field of RAID Volume Page 0.
+ * Added SATA Link Reset settings, Enable SATA Asynchronous
+ * Notification bit, and HideNonZeroAttachedPhyIdentifiers
+ * bit to AdditionalControlFlags field of SAS IO Unit
+ * Page 1.
+ * Added defines for Enclosure Devices Unmapped and
+ * Device Limit Exceeded bits in Status field of SAS IO
+ * Unit Page 2.
+ * Added more AccessStatus values for SAS Device Page 0.
+ * Added bit for SATA Asynchronous Notification Support in
+ * Flags field of SAS Device Page 0.
+ * 02-28-07 01.05.14 Added ExtFlags field to Manufacturing Page 4.
+ * Added Disable SMART Polling for CapabilitiesFlags of
+ * IOC Page 6.
+ * Added Disable SMART Polling to DeviceSettings of BIOS
+ * Page 1.
+ * Added Multi-Port Domain bit for DiscoveryStatus field
+ * of SAS IO Unit Page.
+ * Added Multi-Port Domain Illegal flag for SAS IO Unit
+ * Page 1 AdditionalControlFlags field.
+ * 05-24-07 01.05.15 Added Hide Physical Disks with Non-Integrated RAID
+ * Metadata bit to Manufacturing Page 4 ExtFlags field.
+ * Added Internal Connector to End Device Present bit to
+ * Expander Page 0 Flags field.
+ * Fixed define for
+ * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
+ * 08-07-07 01.05.16 Added MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT
+ * define.
+ * Added BIOS Page 4 structure.
+ * Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
+ * Physcial Disk Page 1.
+ * 01-15-07 01.05.17 Added additional bit defines for ExtFlags field of
+ * Manufacturing Page 4.
+ * Added Solid State Drives Supported bit to IOC Page 6
+ * Capabilities Flags.
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 03-28-08 01.05.18 Defined new bits in Manufacturing Page 4 ExtFlags field
+ * to control coercion size and the mixing of SAS and SATA
+ * SSD drives.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_CNFG_H
+#define MPI_CNFG_H
+
+
+/*****************************************************************************
+*
+* C o n f i g M e s s a g e a n d S t r u c t u r e s
+*
+*****************************************************************************/
+
+typedef struct _CONFIG_PAGE_HEADER
+{
+ U8 PageVersion; /* 00h */
+ U8 PageLength; /* 01h */
+ U8 PageNumber; /* 02h */
+ U8 PageType; /* 03h */
+} CONFIG_PAGE_HEADER, MPI_POINTER PTR_CONFIG_PAGE_HEADER,
+ ConfigPageHeader_t, MPI_POINTER pConfigPageHeader_t;
+
+typedef union _CONFIG_PAGE_HEADER_UNION
+{
+ ConfigPageHeader_t Struct;
+ U8 Bytes[4];
+ U16 Word16[2];
+ U32 Word32;
+} ConfigPageHeaderUnion, MPI_POINTER pConfigPageHeaderUnion,
+ CONFIG_PAGE_HEADER_UNION, MPI_POINTER PTR_CONFIG_PAGE_HEADER_UNION;
+
+typedef struct _CONFIG_EXTENDED_PAGE_HEADER
+{
+ U8 PageVersion; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 PageNumber; /* 02h */
+ U8 PageType; /* 03h */
+ U16 ExtPageLength; /* 04h */
+ U8 ExtPageType; /* 06h */
+ U8 Reserved2; /* 07h */
+} CONFIG_EXTENDED_PAGE_HEADER, MPI_POINTER PTR_CONFIG_EXTENDED_PAGE_HEADER,
+ ConfigExtendedPageHeader_t, MPI_POINTER pConfigExtendedPageHeader_t;
+
+
+
+/****************************************************************************
+* PageType field values
+****************************************************************************/
+#define MPI_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI_CONFIG_PAGEATTR_RO_PERSISTENT (0x30)
+#define MPI_CONFIG_PAGEATTR_MASK (0xF0)
+
+#define MPI_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI_CONFIG_PAGETYPE_IOC (0x01)
+#define MPI_CONFIG_PAGETYPE_BIOS (0x02)
+#define MPI_CONFIG_PAGETYPE_SCSI_PORT (0x03)
+#define MPI_CONFIG_PAGETYPE_SCSI_DEVICE (0x04)
+#define MPI_CONFIG_PAGETYPE_FC_PORT (0x05)
+#define MPI_CONFIG_PAGETYPE_FC_DEVICE (0x06)
+#define MPI_CONFIG_PAGETYPE_LAN (0x07)
+#define MPI_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
+#define MPI_CONFIG_PAGETYPE_MANUFACTURING (0x09)
+#define MPI_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
+#define MPI_CONFIG_PAGETYPE_INBAND (0x0B)
+#define MPI_CONFIG_PAGETYPE_EXTENDED (0x0F)
+#define MPI_CONFIG_PAGETYPE_MASK (0x0F)
+
+#define MPI_CONFIG_TYPENUM_MASK (0x0FFF)
+
+
+/****************************************************************************
+* ExtPageType field values
+****************************************************************************/
+#define MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
+#define MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
+#define MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
+#define MPI_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
+#define MPI_CONFIG_EXTPAGETYPE_LOG (0x14)
+#define MPI_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
+
+
+/****************************************************************************
+* PageAddress field values
+****************************************************************************/
+#define MPI_SCSI_PORT_PGAD_PORT_MASK (0x000000FF)
+
+#define MPI_SCSI_DEVICE_FORM_MASK (0xF0000000)
+#define MPI_SCSI_DEVICE_FORM_BUS_TID (0x00000000)
+#define MPI_SCSI_DEVICE_TARGET_ID_MASK (0x000000FF)
+#define MPI_SCSI_DEVICE_TARGET_ID_SHIFT (0)
+#define MPI_SCSI_DEVICE_BUS_MASK (0x0000FF00)
+#define MPI_SCSI_DEVICE_BUS_SHIFT (8)
+#define MPI_SCSI_DEVICE_FORM_TARGET_MODE (0x10000000)
+#define MPI_SCSI_DEVICE_TM_RESPOND_ID_MASK (0x000000FF)
+#define MPI_SCSI_DEVICE_TM_RESPOND_ID_SHIFT (0)
+#define MPI_SCSI_DEVICE_TM_BUS_MASK (0x0000FF00)
+#define MPI_SCSI_DEVICE_TM_BUS_SHIFT (8)
+#define MPI_SCSI_DEVICE_TM_INIT_ID_MASK (0x00FF0000)
+#define MPI_SCSI_DEVICE_TM_INIT_ID_SHIFT (16)
+
+#define MPI_FC_PORT_PGAD_PORT_MASK (0xF0000000)
+#define MPI_FC_PORT_PGAD_PORT_SHIFT (28)
+#define MPI_FC_PORT_PGAD_FORM_MASK (0x0F000000)
+#define MPI_FC_PORT_PGAD_FORM_INDEX (0x01000000)
+#define MPI_FC_PORT_PGAD_INDEX_MASK (0x0000FFFF)
+#define MPI_FC_PORT_PGAD_INDEX_SHIFT (0)
+
+#define MPI_FC_DEVICE_PGAD_PORT_MASK (0xF0000000)
+#define MPI_FC_DEVICE_PGAD_PORT_SHIFT (28)
+#define MPI_FC_DEVICE_PGAD_FORM_MASK (0x0F000000)
+#define MPI_FC_DEVICE_PGAD_FORM_NEXT_DID (0x00000000)
+#define MPI_FC_DEVICE_PGAD_ND_PORT_MASK (0xF0000000)
+#define MPI_FC_DEVICE_PGAD_ND_PORT_SHIFT (28)
+#define MPI_FC_DEVICE_PGAD_ND_DID_MASK (0x00FFFFFF)
+#define MPI_FC_DEVICE_PGAD_ND_DID_SHIFT (0)
+#define MPI_FC_DEVICE_PGAD_FORM_BUS_TID (0x01000000)
+#define MPI_FC_DEVICE_PGAD_BT_BUS_MASK (0x0000FF00)
+#define MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT (8)
+#define MPI_FC_DEVICE_PGAD_BT_TID_MASK (0x000000FF)
+#define MPI_FC_DEVICE_PGAD_BT_TID_SHIFT (0)
+
+#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
+#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_SHIFT (0)
+
+#define MPI_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI_SAS_EXPAND_PGAD_FORM_SHIFT (28)
+#define MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x00000001)
+#define MPI_SAS_EXPAND_PGAD_FORM_HANDLE (0x00000002)
+#define MPI_SAS_EXPAND_PGAD_GNH_MASK_HANDLE (0x0000FFFF)
+#define MPI_SAS_EXPAND_PGAD_GNH_SHIFT_HANDLE (0)
+#define MPI_SAS_EXPAND_PGAD_HPN_MASK_PHY (0x00FF0000)
+#define MPI_SAS_EXPAND_PGAD_HPN_SHIFT_PHY (16)
+#define MPI_SAS_EXPAND_PGAD_HPN_MASK_HANDLE (0x0000FFFF)
+#define MPI_SAS_EXPAND_PGAD_HPN_SHIFT_HANDLE (0)
+#define MPI_SAS_EXPAND_PGAD_H_MASK_HANDLE (0x0000FFFF)
+#define MPI_SAS_EXPAND_PGAD_H_SHIFT_HANDLE (0)
+
+#define MPI_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI_SAS_DEVICE_PGAD_FORM_SHIFT (28)
+#define MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID (0x00000001)
+#define MPI_SAS_DEVICE_PGAD_FORM_HANDLE (0x00000002)
+#define MPI_SAS_DEVICE_PGAD_GNH_HANDLE_MASK (0x0000FFFF)
+#define MPI_SAS_DEVICE_PGAD_GNH_HANDLE_SHIFT (0)
+#define MPI_SAS_DEVICE_PGAD_BT_BUS_MASK (0x0000FF00)
+#define MPI_SAS_DEVICE_PGAD_BT_BUS_SHIFT (8)
+#define MPI_SAS_DEVICE_PGAD_BT_TID_MASK (0x000000FF)
+#define MPI_SAS_DEVICE_PGAD_BT_TID_SHIFT (0)
+#define MPI_SAS_DEVICE_PGAD_H_HANDLE_MASK (0x0000FFFF)
+#define MPI_SAS_DEVICE_PGAD_H_HANDLE_SHIFT (0)
+
+#define MPI_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI_SAS_PHY_PGAD_FORM_SHIFT (28)
+#define MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x0)
+#define MPI_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x1)
+#define MPI_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI_SAS_PHY_PGAD_PHY_NUMBER_SHIFT (0)
+#define MPI_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
+#define MPI_SAS_PHY_PGAD_PHY_TBL_INDEX_SHIFT (0)
+
+#define MPI_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI_SAS_ENCLOS_PGAD_FORM_SHIFT (28)
+#define MPI_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI_SAS_ENCLOS_PGAD_FORM_HANDLE (0x00000001)
+#define MPI_SAS_ENCLOS_PGAD_GNH_HANDLE_MASK (0x0000FFFF)
+#define MPI_SAS_ENCLOS_PGAD_GNH_HANDLE_SHIFT (0)
+#define MPI_SAS_ENCLOS_PGAD_H_HANDLE_MASK (0x0000FFFF)
+#define MPI_SAS_ENCLOS_PGAD_H_HANDLE_SHIFT (0)
+
+
+
+/****************************************************************************
+* Config Request Message
+****************************************************************************/
+typedef struct _MSG_CONFIG
+{
+ U8 Action; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 ExtPageLength; /* 04h */
+ U8 ExtPageType; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Reserved2[8]; /* 0Ch */
+ CONFIG_PAGE_HEADER Header; /* 14h */
+ U32 PageAddress; /* 18h */
+ SGE_IO_UNION PageBufferSGE; /* 1Ch */
+} MSG_CONFIG, MPI_POINTER PTR_MSG_CONFIG,
+ Config_t, MPI_POINTER pConfig_t;
+
+
+/****************************************************************************
+* Action field values
+****************************************************************************/
+#define MPI_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
+#define MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
+#define MPI_CONFIG_ACTION_PAGE_DEFAULT (0x03)
+#define MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
+#define MPI_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
+#define MPI_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
+
+
+/* Config Reply Message */
+typedef struct _MSG_CONFIG_REPLY
+{
+ U8 Action; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 ExtPageLength; /* 04h */
+ U8 ExtPageType; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Reserved2[2]; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ CONFIG_PAGE_HEADER Header; /* 14h */
+} MSG_CONFIG_REPLY, MPI_POINTER PTR_MSG_CONFIG_REPLY,
+ ConfigReply_t, MPI_POINTER pConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+* C o n f i g u r a t i o n P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Manufacturing Config pages
+****************************************************************************/
+#define MPI_MANUFACTPAGE_VENDORID_LSILOGIC (0x1000)
+/* Fibre Channel */
+#define MPI_MANUFACTPAGE_DEVICEID_FC909 (0x0621)
+#define MPI_MANUFACTPAGE_DEVICEID_FC919 (0x0624)
+#define MPI_MANUFACTPAGE_DEVICEID_FC929 (0x0622)
+#define MPI_MANUFACTPAGE_DEVICEID_FC919X (0x0628)
+#define MPI_MANUFACTPAGE_DEVICEID_FC929X (0x0626)
+#define MPI_MANUFACTPAGE_DEVICEID_FC939X (0x0642)
+#define MPI_MANUFACTPAGE_DEVICEID_FC949X (0x0640)
+#define MPI_MANUFACTPAGE_DEVICEID_FC949E (0x0646)
+/* SCSI */
+#define MPI_MANUFACTPAGE_DEVID_53C1030 (0x0030)
+#define MPI_MANUFACTPAGE_DEVID_53C1030ZC (0x0031)
+#define MPI_MANUFACTPAGE_DEVID_1030_53C1035 (0x0032)
+#define MPI_MANUFACTPAGE_DEVID_1030ZC_53C1035 (0x0033)
+#define MPI_MANUFACTPAGE_DEVID_53C1035 (0x0040)
+#define MPI_MANUFACTPAGE_DEVID_53C1035ZC (0x0041)
+/* SAS */
+#define MPI_MANUFACTPAGE_DEVID_SAS1064 (0x0050)
+#define MPI_MANUFACTPAGE_DEVID_SAS1064A (0x005C)
+#define MPI_MANUFACTPAGE_DEVID_SAS1064E (0x0056)
+#define MPI_MANUFACTPAGE_DEVID_SAS1066 (0x005E)
+#define MPI_MANUFACTPAGE_DEVID_SAS1066E (0x005A)
+#define MPI_MANUFACTPAGE_DEVID_SAS1068 (0x0054)
+#define MPI_MANUFACTPAGE_DEVID_SAS1068E (0x0058)
+#define MPI_MANUFACTPAGE_DEVID_SAS1078 (0x0062)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 ChipName[16]; /* 04h */
+ U8 ChipRevision[8]; /* 14h */
+ U8 BoardName[16]; /* 1Ch */
+ U8 BoardAssembly[16]; /* 2Ch */
+ U8 BoardTracerNumber[16]; /* 3Ch */
+
+} CONFIG_PAGE_MANUFACTURING_0, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_0,
+ ManufacturingPage0_t, MPI_POINTER pManufacturingPage0_t;
+
+#define MPI_MANUFACTURING0_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 VPD[256]; /* 04h */
+} CONFIG_PAGE_MANUFACTURING_1, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_1,
+ ManufacturingPage1_t, MPI_POINTER pManufacturingPage1_t;
+
+#define MPI_MANUFACTURING1_PAGEVERSION (0x00)
+
+
+typedef struct _MPI_CHIP_REVISION_ID
+{
+ U16 DeviceID; /* 00h */
+ U8 PCIRevisionID; /* 02h */
+ U8 Reserved; /* 03h */
+} MPI_CHIP_REVISION_ID, MPI_POINTER PTR_MPI_CHIP_REVISION_ID,
+ MpiChipRevisionId_t, MPI_POINTER pMpiChipRevisionId_t;
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
+#endif
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ MPI_CHIP_REVISION_ID ChipId; /* 04h */
+ U32 HwSettings[MPI_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 08h */
+} CONFIG_PAGE_MANUFACTURING_2, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_2,
+ ManufacturingPage2_t, MPI_POINTER pManufacturingPage2_t;
+
+#define MPI_MANUFACTURING2_PAGEVERSION (0x00)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_MAN_PAGE_3_INFO_WORDS
+#define MPI_MAN_PAGE_3_INFO_WORDS (1)
+#endif
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_3
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ MPI_CHIP_REVISION_ID ChipId; /* 04h */
+ U32 Info[MPI_MAN_PAGE_3_INFO_WORDS];/* 08h */
+} CONFIG_PAGE_MANUFACTURING_3, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_3,
+ ManufacturingPage3_t, MPI_POINTER pManufacturingPage3_t;
+
+#define MPI_MANUFACTURING3_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_4
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 04h */
+ U8 InfoOffset0; /* 08h */
+ U8 InfoSize0; /* 09h */
+ U8 InfoOffset1; /* 0Ah */
+ U8 InfoSize1; /* 0Bh */
+ U8 InquirySize; /* 0Ch */
+ U8 Flags; /* 0Dh */
+ U16 ExtFlags; /* 0Eh */
+ U8 InquiryData[56]; /* 10h */
+ U32 ISVolumeSettings; /* 48h */
+ U32 IMEVolumeSettings; /* 4Ch */
+ U32 IMVolumeSettings; /* 50h */
+ U32 Reserved3; /* 54h */
+ U32 Reserved4; /* 58h */
+ U32 Reserved5; /* 5Ch */
+ U8 IMEDataScrubRate; /* 60h */
+ U8 IMEResyncRate; /* 61h */
+ U16 Reserved6; /* 62h */
+ U8 IMDataScrubRate; /* 64h */
+ U8 IMResyncRate; /* 65h */
+ U16 Reserved7; /* 66h */
+ U32 Reserved8; /* 68h */
+ U32 Reserved9; /* 6Ch */
+} CONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4,
+ ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t;
+
+#define MPI_MANUFACTURING4_PAGEVERSION (0x05)
+
+/* defines for the Flags field */
+#define MPI_MANPAGE4_FORCE_BAD_BLOCK_TABLE (0x80)
+#define MPI_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x40)
+#define MPI_MANPAGE4_IME_DISABLE (0x20)
+#define MPI_MANPAGE4_IM_DISABLE (0x10)
+#define MPI_MANPAGE4_IS_DISABLE (0x08)
+#define MPI_MANPAGE4_IR_MODEPAGE8_DISABLE (0x04)
+#define MPI_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x02)
+#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA (0x01)
+
+/* defines for the ExtFlags field */
+#define MPI_MANPAGE4_EXTFLAGS_MASK_COERCION_SIZE (0x0180)
+#define MPI_MANPAGE4_EXTFLAGS_SHIFT_COERCION_SIZE (7)
+#define MPI_MANPAGE4_EXTFLAGS_1GB_COERCION_SIZE (0)
+#define MPI_MANPAGE4_EXTFLAGS_128MB_COERCION_SIZE (1)
+
+#define MPI_MANPAGE4_EXTFLAGS_NO_MIX_SSD_SAS_SATA (0x0040)
+#define MPI_MANPAGE4_EXTFLAGS_MIX_SSD_AND_NON_SSD (0x0020)
+#define MPI_MANPAGE4_EXTFLAGS_DUAL_PORT_SUPPORT (0x0010)
+#define MPI_MANPAGE4_EXTFLAGS_HIDE_NON_IR_METADATA (0x0008)
+#define MPI_MANPAGE4_EXTFLAGS_SAS_CACHE_DISABLE (0x0004)
+#define MPI_MANPAGE4_EXTFLAGS_SATA_CACHE_DISABLE (0x0002)
+#define MPI_MANPAGE4_EXTFLAGS_LEGACY_MODE (0x0001)
+
+
+#ifndef MPI_MANPAGE5_NUM_FORCEWWID
+#define MPI_MANPAGE5_NUM_FORCEWWID (1)
+#endif
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_5
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U64 BaseWWID; /* 04h */
+ U8 Flags; /* 0Ch */
+ U8 NumForceWWID; /* 0Dh */
+ U16 Reserved2; /* 0Eh */
+ U32 Reserved3; /* 10h */
+ U32 Reserved4; /* 14h */
+ U64 ForceWWID[MPI_MANPAGE5_NUM_FORCEWWID]; /* 18h */
+} CONFIG_PAGE_MANUFACTURING_5, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_5,
+ ManufacturingPage5_t, MPI_POINTER pManufacturingPage5_t;
+
+#define MPI_MANUFACTURING5_PAGEVERSION (0x02)
+
+/* defines for the Flags field */
+#define MPI_MANPAGE5_TWO_WWID_PER_PHY (0x01)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_6
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 ProductSpecificInfo;/* 04h */
+} CONFIG_PAGE_MANUFACTURING_6, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_6,
+ ManufacturingPage6_t, MPI_POINTER pManufacturingPage6_t;
+
+#define MPI_MANUFACTURING6_PAGEVERSION (0x00)
+
+
+typedef struct _MPI_MANPAGE7_CONNECTOR_INFO
+{
+ U32 Pinout; /* 00h */
+ U8 Connector[16]; /* 04h */
+ U8 Location; /* 14h */
+ U8 Reserved1; /* 15h */
+ U16 Slot; /* 16h */
+ U32 Reserved2; /* 18h */
+} MPI_MANPAGE7_CONNECTOR_INFO, MPI_POINTER PTR_MPI_MANPAGE7_CONNECTOR_INFO,
+ MpiManPage7ConnectorInfo_t, MPI_POINTER pMpiManPage7ConnectorInfo_t;
+
+/* defines for the Pinout field */
+#define MPI_MANPAGE7_PINOUT_SFF_8484_L4 (0x00080000)
+#define MPI_MANPAGE7_PINOUT_SFF_8484_L3 (0x00040000)
+#define MPI_MANPAGE7_PINOUT_SFF_8484_L2 (0x00020000)
+#define MPI_MANPAGE7_PINOUT_SFF_8484_L1 (0x00010000)
+#define MPI_MANPAGE7_PINOUT_SFF_8470_L4 (0x00000800)
+#define MPI_MANPAGE7_PINOUT_SFF_8470_L3 (0x00000400)
+#define MPI_MANPAGE7_PINOUT_SFF_8470_L2 (0x00000200)
+#define MPI_MANPAGE7_PINOUT_SFF_8470_L1 (0x00000100)
+#define MPI_MANPAGE7_PINOUT_SFF_8482 (0x00000002)
+#define MPI_MANPAGE7_PINOUT_CONNECTION_UNKNOWN (0x00000001)
+
+/* defines for the Location field */
+#define MPI_MANPAGE7_LOCATION_UNKNOWN (0x01)
+#define MPI_MANPAGE7_LOCATION_INTERNAL (0x02)
+#define MPI_MANPAGE7_LOCATION_EXTERNAL (0x04)
+#define MPI_MANPAGE7_LOCATION_SWITCHABLE (0x08)
+#define MPI_MANPAGE7_LOCATION_AUTO (0x10)
+#define MPI_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
+#define MPI_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumPhys at runtime.
+ */
+#ifndef MPI_MANPAGE7_CONNECTOR_INFO_MAX
+#define MPI_MANPAGE7_CONNECTOR_INFO_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_7
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 04h */
+ U32 Reserved2; /* 08h */
+ U32 Flags; /* 0Ch */
+ U8 EnclosureName[16]; /* 10h */
+ U8 NumPhys; /* 20h */
+ U8 Reserved3; /* 21h */
+ U16 Reserved4; /* 22h */
+ MPI_MANPAGE7_CONNECTOR_INFO ConnectorInfo[MPI_MANPAGE7_CONNECTOR_INFO_MAX]; /* 24h */
+} CONFIG_PAGE_MANUFACTURING_7, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_7,
+ ManufacturingPage7_t, MPI_POINTER pManufacturingPage7_t;
+
+#define MPI_MANUFACTURING7_PAGEVERSION (0x00)
+
+/* defines for the Flags field */
+#define MPI_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_8
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 ProductSpecificInfo;/* 04h */
+} CONFIG_PAGE_MANUFACTURING_8, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_8,
+ ManufacturingPage8_t, MPI_POINTER pManufacturingPage8_t;
+
+#define MPI_MANUFACTURING8_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_9
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 ProductSpecificInfo;/* 04h */
+} CONFIG_PAGE_MANUFACTURING_9, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_9,
+ ManufacturingPage9_t, MPI_POINTER pManufacturingPage9_t;
+
+#define MPI_MANUFACTURING9_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_10
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 ProductSpecificInfo;/* 04h */
+} CONFIG_PAGE_MANUFACTURING_10, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_10,
+ ManufacturingPage10_t, MPI_POINTER pManufacturingPage10_t;
+
+#define MPI_MANUFACTURING10_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IO Unit Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_IO_UNIT_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U64 UniqueValue; /* 04h */
+} CONFIG_PAGE_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_0,
+ IOUnitPage0_t, MPI_POINTER pIOUnitPage0_t;
+
+#define MPI_IOUNITPAGE0_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_IO_UNIT_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Flags; /* 04h */
+} CONFIG_PAGE_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_1,
+ IOUnitPage1_t, MPI_POINTER pIOUnitPage1_t;
+
+#define MPI_IOUNITPAGE1_PAGEVERSION (0x02)
+
+/* IO Unit Page 1 Flags defines */
+#define MPI_IOUNITPAGE1_MULTI_FUNCTION (0x00000000)
+#define MPI_IOUNITPAGE1_SINGLE_FUNCTION (0x00000001)
+#define MPI_IOUNITPAGE1_MULTI_PATHING (0x00000002)
+#define MPI_IOUNITPAGE1_SINGLE_PATHING (0x00000000)
+#define MPI_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
+#define MPI_IOUNITPAGE1_DISABLE_QUEUE_FULL_HANDLING (0x00000020)
+#define MPI_IOUNITPAGE1_DISABLE_IR (0x00000040)
+#define MPI_IOUNITPAGE1_FORCE_32 (0x00000080)
+#define MPI_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
+#define MPI_IOUNITPAGE1_SATA_WRITE_CACHE_DISABLE (0x00000200)
+
+typedef struct _MPI_ADAPTER_INFO
+{
+ U8 PciBusNumber; /* 00h */
+ U8 PciDeviceAndFunctionNumber; /* 01h */
+ U16 AdapterFlags; /* 02h */
+} MPI_ADAPTER_INFO, MPI_POINTER PTR_MPI_ADAPTER_INFO,
+ MpiAdapterInfo_t, MPI_POINTER pMpiAdapterInfo_t;
+
+#define MPI_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
+#define MPI_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+
+typedef struct _CONFIG_PAGE_IO_UNIT_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Flags; /* 04h */
+ U32 BiosVersion; /* 08h */
+ MPI_ADAPTER_INFO AdapterOrder[4]; /* 0Ch */
+ U32 Reserved1; /* 1Ch */
+} CONFIG_PAGE_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_2,
+ IOUnitPage2_t, MPI_POINTER pIOUnitPage2_t;
+
+#define MPI_IOUNITPAGE2_PAGEVERSION (0x02)
+
+#define MPI_IOUNITPAGE2_FLAGS_PAUSE_ON_ERROR (0x00000002)
+#define MPI_IOUNITPAGE2_FLAGS_VERBOSE_ENABLE (0x00000004)
+#define MPI_IOUNITPAGE2_FLAGS_COLOR_VIDEO_DISABLE (0x00000008)
+#define MPI_IOUNITPAGE2_FLAGS_DONT_HOOK_INT_40 (0x00000010)
+
+#define MPI_IOUNITPAGE2_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
+#define MPI_IOUNITPAGE2_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
+#define MPI_IOUNITPAGE2_FLAGS_ADAPTER_DISPLAY (0x00000020)
+#define MPI_IOUNITPAGE2_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IO_UNIT_3
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 GPIOCount; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 06h */
+ U16 GPIOVal[MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX]; /* 08h */
+} CONFIG_PAGE_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_3,
+ IOUnitPage3_t, MPI_POINTER pIOUnitPage3_t;
+
+#define MPI_IOUNITPAGE3_PAGEVERSION (0x01)
+
+#define MPI_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFC)
+#define MPI_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
+#define MPI_IOUNITPAGE3_GPIO_SETTING_OFF (0x00)
+#define MPI_IOUNITPAGE3_GPIO_SETTING_ON (0x01)
+
+
+typedef struct _CONFIG_PAGE_IO_UNIT_4
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 04h */
+ SGE_SIMPLE_UNION FWImageSGE; /* 08h */
+} CONFIG_PAGE_IO_UNIT_4, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_4,
+ IOUnitPage4_t, MPI_POINTER pIOUnitPage4_t;
+
+#define MPI_IOUNITPAGE4_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IOC Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_IOC_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 TotalNVStore; /* 04h */
+ U32 FreeNVStore; /* 08h */
+ U16 VendorID; /* 0Ch */
+ U16 DeviceID; /* 0Eh */
+ U8 RevisionID; /* 10h */
+ U8 Reserved[3]; /* 11h */
+ U32 ClassCode; /* 14h */
+ U16 SubsystemVendorID; /* 18h */
+ U16 SubsystemID; /* 1Ah */
+} CONFIG_PAGE_IOC_0, MPI_POINTER PTR_CONFIG_PAGE_IOC_0,
+ IOCPage0_t, MPI_POINTER pIOCPage0_t;
+
+#define MPI_IOCPAGE0_PAGEVERSION (0x01)
+
+
+typedef struct _CONFIG_PAGE_IOC_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Flags; /* 04h */
+ U32 CoalescingTimeout; /* 08h */
+ U8 CoalescingDepth; /* 0Ch */
+ U8 PCISlotNum; /* 0Dh */
+ U8 Reserved[2]; /* 0Eh */
+} CONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1,
+ IOCPage1_t, MPI_POINTER pIOCPage1_t;
+
+#define MPI_IOCPAGE1_PAGEVERSION (0x03)
+
+/* defines for the Flags field */
+#define MPI_IOCPAGE1_EEDP_MODE_MASK (0x07000000)
+#define MPI_IOCPAGE1_EEDP_MODE_OFF (0x00000000)
+#define MPI_IOCPAGE1_EEDP_MODE_T10 (0x01000000)
+#define MPI_IOCPAGE1_EEDP_MODE_LSI_1 (0x02000000)
+#define MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE (0x00000010)
+#define MPI_IOCPAGE1_REPLY_COALESCING (0x00000001)
+
+#define MPI_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
+
+
+typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
+{
+ U8 VolumeID; /* 00h */
+ U8 VolumeBus; /* 01h */
+ U8 VolumeIOC; /* 02h */
+ U8 VolumePageNumber; /* 03h */
+ U8 VolumeType; /* 04h */
+ U8 Flags; /* 05h */
+ U16 Reserved3; /* 06h */
+} CONFIG_PAGE_IOC_2_RAID_VOL, MPI_POINTER PTR_CONFIG_PAGE_IOC_2_RAID_VOL,
+ ConfigPageIoc2RaidVol_t, MPI_POINTER pConfigPageIoc2RaidVol_t;
+
+/* IOC Page 2 Volume RAID Type values, also used in RAID Volume pages */
+
+#define MPI_RAID_VOL_TYPE_IS (0x00)
+#define MPI_RAID_VOL_TYPE_IME (0x01)
+#define MPI_RAID_VOL_TYPE_IM (0x02)
+#define MPI_RAID_VOL_TYPE_RAID_5 (0x03)
+#define MPI_RAID_VOL_TYPE_RAID_6 (0x04)
+#define MPI_RAID_VOL_TYPE_RAID_10 (0x05)
+#define MPI_RAID_VOL_TYPE_RAID_50 (0x06)
+#define MPI_RAID_VOL_TYPE_UNKNOWN (0xFF)
+
+/* IOC Page 2 Volume Flags values */
+
+#define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE (0x08)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX
+#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 CapabilitiesFlags; /* 04h */
+ U8 NumActiveVolumes; /* 08h */
+ U8 MaxVolumes; /* 09h */
+ U8 NumActivePhysDisks; /* 0Ah */
+ U8 MaxPhysDisks; /* 0Bh */
+ CONFIG_PAGE_IOC_2_RAID_VOL RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */
+} CONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
+ IOCPage2_t, MPI_POINTER pIOCPage2_t;
+
+#define MPI_IOCPAGE2_PAGEVERSION (0x04)
+
+/* IOC Page 2 Capabilities flags */
+
+#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT (0x00000001)
+#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT (0x00000002)
+#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT (0x00000004)
+#define MPI_IOCPAGE2_CAP_FLAGS_RAID_5_SUPPORT (0x00000008)
+#define MPI_IOCPAGE2_CAP_FLAGS_RAID_6_SUPPORT (0x00000010)
+#define MPI_IOCPAGE2_CAP_FLAGS_RAID_10_SUPPORT (0x00000020)
+#define MPI_IOCPAGE2_CAP_FLAGS_RAID_50_SUPPORT (0x00000040)
+#define MPI_IOCPAGE2_CAP_FLAGS_RAID_64_BIT_ADDRESSING (0x10000000)
+#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT (0x20000000)
+#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT (0x40000000)
+#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT (0x80000000)
+
+
+typedef struct _IOC_3_PHYS_DISK
+{
+ U8 PhysDiskID; /* 00h */
+ U8 PhysDiskBus; /* 01h */
+ U8 PhysDiskIOC; /* 02h */
+ U8 PhysDiskNum; /* 03h */
+} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK,
+ Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX
+#define MPI_IOC_PAGE_3_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_3
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 NumPhysDisks; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 06h */
+ IOC_3_PHYS_DISK PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */
+} CONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3,
+ IOCPage3_t, MPI_POINTER pIOCPage3_t;
+
+#define MPI_IOCPAGE3_PAGEVERSION (0x00)
+
+
+typedef struct _IOC_4_SEP
+{
+ U8 SEPTargetID; /* 00h */
+ U8 SEPBus; /* 01h */
+ U16 Reserved; /* 02h */
+} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP,
+ Ioc4Sep_t, MPI_POINTER pIoc4Sep_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_4_SEP_MAX
+#define MPI_IOC_PAGE_4_SEP_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_4
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 ActiveSEP; /* 04h */
+ U8 MaxSEP; /* 05h */
+ U16 Reserved1; /* 06h */
+ IOC_4_SEP SEP[MPI_IOC_PAGE_4_SEP_MAX]; /* 08h */
+} CONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4,
+ IOCPage4_t, MPI_POINTER pIOCPage4_t;
+
+#define MPI_IOCPAGE4_PAGEVERSION (0x00)
+
+
+typedef struct _IOC_5_HOT_SPARE
+{
+ U8 PhysDiskNum; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 HotSparePool; /* 02h */
+ U8 Flags; /* 03h */
+} IOC_5_HOT_SPARE, MPI_POINTER PTR_IOC_5_HOT_SPARE,
+ Ioc5HotSpare_t, MPI_POINTER pIoc5HotSpare_t;
+
+/* IOC Page 5 HotSpare Flags */
+#define MPI_IOC_PAGE_5_HOT_SPARE_ACTIVE (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_5_HOT_SPARE_MAX
+#define MPI_IOC_PAGE_5_HOT_SPARE_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_5
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 04h */
+ U8 NumHotSpares; /* 08h */
+ U8 Reserved2; /* 09h */
+ U16 Reserved3; /* 0Ah */
+ IOC_5_HOT_SPARE HotSpare[MPI_IOC_PAGE_5_HOT_SPARE_MAX]; /* 0Ch */
+} CONFIG_PAGE_IOC_5, MPI_POINTER PTR_CONFIG_PAGE_IOC_5,
+ IOCPage5_t, MPI_POINTER pIOCPage5_t;
+
+#define MPI_IOCPAGE5_PAGEVERSION (0x00)
+
+typedef struct _CONFIG_PAGE_IOC_6
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 CapabilitiesFlags; /* 04h */
+ U8 MaxDrivesIS; /* 08h */
+ U8 MaxDrivesIM; /* 09h */
+ U8 MaxDrivesIME; /* 0Ah */
+ U8 Reserved1; /* 0Bh */
+ U8 MinDrivesIS; /* 0Ch */
+ U8 MinDrivesIM; /* 0Dh */
+ U8 MinDrivesIME; /* 0Eh */
+ U8 Reserved2; /* 0Fh */
+ U8 MaxGlobalHotSpares; /* 10h */
+ U8 Reserved3; /* 11h */
+ U16 Reserved4; /* 12h */
+ U32 Reserved5; /* 14h */
+ U32 SupportedStripeSizeMapIS; /* 18h */
+ U32 SupportedStripeSizeMapIME; /* 1Ch */
+ U32 Reserved6; /* 20h */
+ U8 MetadataSize; /* 24h */
+ U8 Reserved7; /* 25h */
+ U16 Reserved8; /* 26h */
+ U16 MaxBadBlockTableEntries; /* 28h */
+ U16 Reserved9; /* 2Ah */
+ U16 IRNvsramUsage; /* 2Ch */
+ U16 Reserved10; /* 2Eh */
+ U32 IRNvsramVersion; /* 30h */
+ U32 Reserved11; /* 34h */
+ U32 Reserved12; /* 38h */
+} CONFIG_PAGE_IOC_6, MPI_POINTER PTR_CONFIG_PAGE_IOC_6,
+ IOCPage6_t, MPI_POINTER pIOCPage6_t;
+
+#define MPI_IOCPAGE6_PAGEVERSION (0x01)
+
+/* IOC Page 6 Capabilities Flags */
+
+#define MPI_IOCPAGE6_CAP_FLAGS_SSD_SUPPORT (0x00000020)
+#define MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT (0x00000010)
+#define MPI_IOCPAGE6_CAP_FLAGS_DISABLE_SMART_POLLING (0x00000008)
+
+#define MPI_IOCPAGE6_CAP_FLAGS_MASK_METADATA_SIZE (0x00000006)
+#define MPI_IOCPAGE6_CAP_FLAGS_64MB_METADATA_SIZE (0x00000000)
+#define MPI_IOCPAGE6_CAP_FLAGS_512MB_METADATA_SIZE (0x00000002)
+
+#define MPI_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
+
+
+/****************************************************************************
+* BIOS Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_BIOS_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 BiosOptions; /* 04h */
+ U32 IOCSettings; /* 08h */
+ U32 Reserved1; /* 0Ch */
+ U32 DeviceSettings; /* 10h */
+ U16 NumberOfDevices; /* 14h */
+ U8 ExpanderSpinup; /* 16h */
+ U8 Reserved2; /* 17h */
+ U16 IOTimeoutBlockDevicesNonRM; /* 18h */
+ U16 IOTimeoutSequential; /* 1Ah */
+ U16 IOTimeoutOther; /* 1Ch */
+ U16 IOTimeoutBlockDevicesRM; /* 1Eh */
+} CONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1,
+ BIOSPage1_t, MPI_POINTER pBIOSPage1_t;
+
+#define MPI_BIOSPAGE1_PAGEVERSION (0x03)
+
+/* values for the BiosOptions field */
+#define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE (0x00000400)
+#define MPI_BIOSPAGE1_OPTIONS_FC_ENABLE (0x00000200)
+#define MPI_BIOSPAGE1_OPTIONS_SAS_ENABLE (0x00000100)
+#define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+
+/* values for the IOCSettings field */
+#define MPI_BIOSPAGE1_IOCSET_MASK_INITIAL_SPINUP_DELAY (0x0F000000)
+#define MPI_BIOSPAGE1_IOCSET_SHIFT_INITIAL_SPINUP_DELAY (24)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_PORT_ENABLE_DELAY (0x00F00000)
+#define MPI_BIOSPAGE1_IOCSET_SHIFT_PORT_ENABLE_DELAY (20)
+
+#define MPI_BIOSPAGE1_IOCSET_AUTO_PORT_ENABLE (0x00080000)
+#define MPI_BIOSPAGE1_IOCSET_DIRECT_ATTACH_SPINUP_MODE (0x00040000)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
+#define MPI_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
+#define MPI_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_MAX_TARGET_SPIN_UP (0x0000F000)
+#define MPI_BIOSPAGE1_IOCSET_SHIFT_MAX_TARGET_SPIN_UP (12)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_SPINUP_DELAY (0x00000F00)
+#define MPI_BIOSPAGE1_IOCSET_SHIFT_SPINUP_DELAY (8)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
+#define MPI_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
+#define MPI_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
+#define MPI_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
+#define MPI_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
+#define MPI_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
+#define MPI_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
+#define MPI_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
+
+#define MPI_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
+
+/* values for the DeviceSettings field */
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+
+/* defines for the ExpanderSpinup field */
+#define MPI_BIOSPAGE1_EXPSPINUP_MASK_MAX_TARGET (0xF0)
+#define MPI_BIOSPAGE1_EXPSPINUP_SHIFT_MAX_TARGET (4)
+#define MPI_BIOSPAGE1_EXPSPINUP_MASK_DELAY (0x0F)
+
+typedef struct _MPI_BOOT_DEVICE_ADAPTER_ORDER
+{
+ U32 Reserved1; /* 00h */
+ U32 Reserved2; /* 04h */
+ U32 Reserved3; /* 08h */
+ U32 Reserved4; /* 0Ch */
+ U32 Reserved5; /* 10h */
+ U32 Reserved6; /* 14h */
+ U32 Reserved7; /* 18h */
+ U32 Reserved8; /* 1Ch */
+ U32 Reserved9; /* 20h */
+ U32 Reserved10; /* 24h */
+ U32 Reserved11; /* 28h */
+ U32 Reserved12; /* 2Ch */
+ U32 Reserved13; /* 30h */
+ U32 Reserved14; /* 34h */
+ U32 Reserved15; /* 38h */
+ U32 Reserved16; /* 3Ch */
+ U32 Reserved17; /* 40h */
+} MPI_BOOT_DEVICE_ADAPTER_ORDER, MPI_POINTER PTR_MPI_BOOT_DEVICE_ADAPTER_ORDER;
+
+typedef struct _MPI_BOOT_DEVICE_ADAPTER_NUMBER
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 AdapterNumber; /* 02h */
+ U8 Reserved1; /* 03h */
+ U32 Reserved2; /* 04h */
+ U32 Reserved3; /* 08h */
+ U32 Reserved4; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U32 Reserved5; /* 18h */
+ U32 Reserved6; /* 1Ch */
+ U32 Reserved7; /* 20h */
+ U32 Reserved8; /* 24h */
+ U32 Reserved9; /* 28h */
+ U32 Reserved10; /* 2Ch */
+ U32 Reserved11; /* 30h */
+ U32 Reserved12; /* 34h */
+ U32 Reserved13; /* 38h */
+ U32 Reserved14; /* 3Ch */
+ U32 Reserved15; /* 40h */
+} MPI_BOOT_DEVICE_ADAPTER_NUMBER, MPI_POINTER PTR_MPI_BOOT_DEVICE_ADAPTER_NUMBER;
+
+typedef struct _MPI_BOOT_DEVICE_PCI_ADDRESS
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U16 PCIAddress; /* 02h */
+ U32 Reserved1; /* 04h */
+ U32 Reserved2; /* 08h */
+ U32 Reserved3; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U32 Reserved4; /* 18h */
+ U32 Reserved5; /* 1Ch */
+ U32 Reserved6; /* 20h */
+ U32 Reserved7; /* 24h */
+ U32 Reserved8; /* 28h */
+ U32 Reserved9; /* 2Ch */
+ U32 Reserved10; /* 30h */
+ U32 Reserved11; /* 34h */
+ U32 Reserved12; /* 38h */
+ U32 Reserved13; /* 3Ch */
+ U32 Reserved14; /* 40h */
+} MPI_BOOT_DEVICE_PCI_ADDRESS, MPI_POINTER PTR_MPI_BOOT_DEVICE_PCI_ADDRESS;
+
+typedef struct _MPI_BOOT_DEVICE_SLOT_NUMBER
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 PCISlotNumber; /* 02h */
+ U8 Reserved1; /* 03h */
+ U32 Reserved2; /* 04h */
+ U32 Reserved3; /* 08h */
+ U32 Reserved4; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U32 Reserved5; /* 18h */
+ U32 Reserved6; /* 1Ch */
+ U32 Reserved7; /* 20h */
+ U32 Reserved8; /* 24h */
+ U32 Reserved9; /* 28h */
+ U32 Reserved10; /* 2Ch */
+ U32 Reserved11; /* 30h */
+ U32 Reserved12; /* 34h */
+ U32 Reserved13; /* 38h */
+ U32 Reserved14; /* 3Ch */
+ U32 Reserved15; /* 40h */
+} MPI_BOOT_DEVICE_PCI_SLOT_NUMBER, MPI_POINTER PTR_MPI_BOOT_DEVICE_PCI_SLOT_NUMBER;
+
+typedef struct _MPI_BOOT_DEVICE_FC_WWN
+{
+ U64 WWPN; /* 00h */
+ U32 Reserved1; /* 08h */
+ U32 Reserved2; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U32 Reserved3; /* 18h */
+ U32 Reserved4; /* 1Ch */
+ U32 Reserved5; /* 20h */
+ U32 Reserved6; /* 24h */
+ U32 Reserved7; /* 28h */
+ U32 Reserved8; /* 2Ch */
+ U32 Reserved9; /* 30h */
+ U32 Reserved10; /* 34h */
+ U32 Reserved11; /* 38h */
+ U32 Reserved12; /* 3Ch */
+ U32 Reserved13; /* 40h */
+} MPI_BOOT_DEVICE_FC_WWN, MPI_POINTER PTR_MPI_BOOT_DEVICE_FC_WWN;
+
+typedef struct _MPI_BOOT_DEVICE_SAS_WWN
+{
+ U64 SASAddress; /* 00h */
+ U32 Reserved1; /* 08h */
+ U32 Reserved2; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U32 Reserved3; /* 18h */
+ U32 Reserved4; /* 1Ch */
+ U32 Reserved5; /* 20h */
+ U32 Reserved6; /* 24h */
+ U32 Reserved7; /* 28h */
+ U32 Reserved8; /* 2Ch */
+ U32 Reserved9; /* 30h */
+ U32 Reserved10; /* 34h */
+ U32 Reserved11; /* 38h */
+ U32 Reserved12; /* 3Ch */
+ U32 Reserved13; /* 40h */
+} MPI_BOOT_DEVICE_SAS_WWN, MPI_POINTER PTR_MPI_BOOT_DEVICE_SAS_WWN;
+
+typedef struct _MPI_BOOT_DEVICE_ENCLOSURE_SLOT
+{
+ U64 EnclosureLogicalID; /* 00h */
+ U32 Reserved1; /* 08h */
+ U32 Reserved2; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U16 SlotNumber; /* 18h */
+ U16 Reserved3; /* 1Ah */
+ U32 Reserved4; /* 1Ch */
+ U32 Reserved5; /* 20h */
+ U32 Reserved6; /* 24h */
+ U32 Reserved7; /* 28h */
+ U32 Reserved8; /* 2Ch */
+ U32 Reserved9; /* 30h */
+ U32 Reserved10; /* 34h */
+ U32 Reserved11; /* 38h */
+ U32 Reserved12; /* 3Ch */
+ U32 Reserved13; /* 40h */
+} MPI_BOOT_DEVICE_ENCLOSURE_SLOT,
+ MPI_POINTER PTR_MPI_BOOT_DEVICE_ENCLOSURE_SLOT;
+
+typedef union _MPI_BIOSPAGE2_BOOT_DEVICE
+{
+ MPI_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
+ MPI_BOOT_DEVICE_ADAPTER_NUMBER AdapterNumber;
+ MPI_BOOT_DEVICE_PCI_ADDRESS PCIAddress;
+ MPI_BOOT_DEVICE_PCI_SLOT_NUMBER PCISlotNumber;
+ MPI_BOOT_DEVICE_FC_WWN FcWwn;
+ MPI_BOOT_DEVICE_SAS_WWN SasWwn;
+ MPI_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
+} MPI_BIOSPAGE2_BOOT_DEVICE, MPI_POINTER PTR_MPI_BIOSPAGE2_BOOT_DEVICE;
+
+typedef struct _CONFIG_PAGE_BIOS_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 04h */
+ U32 Reserved2; /* 08h */
+ U32 Reserved3; /* 0Ch */
+ U32 Reserved4; /* 10h */
+ U32 Reserved5; /* 14h */
+ U32 Reserved6; /* 18h */
+ U8 BootDeviceForm; /* 1Ch */
+ U8 PrevBootDeviceForm; /* 1Ch */
+ U16 Reserved8; /* 1Eh */
+ MPI_BIOSPAGE2_BOOT_DEVICE BootDevice; /* 20h */
+} CONFIG_PAGE_BIOS_2, MPI_POINTER PTR_CONFIG_PAGE_BIOS_2,
+ BIOSPage2_t, MPI_POINTER pBIOSPage2_t;
+
+#define MPI_BIOSPAGE2_PAGEVERSION (0x02)
+
+#define MPI_BIOSPAGE2_FORM_MASK (0x0F)
+#define MPI_BIOSPAGE2_FORM_ADAPTER_ORDER (0x00)
+#define MPI_BIOSPAGE2_FORM_ADAPTER_NUMBER (0x01)
+#define MPI_BIOSPAGE2_FORM_PCI_ADDRESS (0x02)
+#define MPI_BIOSPAGE2_FORM_PCI_SLOT_NUMBER (0x03)
+#define MPI_BIOSPAGE2_FORM_FC_WWN (0x04)
+#define MPI_BIOSPAGE2_FORM_SAS_WWN (0x05)
+#define MPI_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
+
+typedef struct _CONFIG_PAGE_BIOS_4
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U64 ReassignmentBaseWWID; /* 04h */
+} CONFIG_PAGE_BIOS_4, MPI_POINTER PTR_CONFIG_PAGE_BIOS_4,
+ BIOSPage4_t, MPI_POINTER pBIOSPage4_t;
+
+#define MPI_BIOSPAGE4_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SCSI Port Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SCSI_PORT_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Capabilities; /* 04h */
+ U32 PhysicalInterface; /* 08h */
+} CONFIG_PAGE_SCSI_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_0,
+ SCSIPortPage0_t, MPI_POINTER pSCSIPortPage0_t;
+
+#define MPI_SCSIPORTPAGE0_PAGEVERSION (0x02)
+
+#define MPI_SCSIPORTPAGE0_CAP_IU (0x00000001)
+#define MPI_SCSIPORTPAGE0_CAP_DT (0x00000002)
+#define MPI_SCSIPORTPAGE0_CAP_QAS (0x00000004)
+#define MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK (0x0000FF00)
+#define MPI_SCSIPORTPAGE0_SYNC_ASYNC (0x00)
+#define MPI_SCSIPORTPAGE0_SYNC_5 (0x32)
+#define MPI_SCSIPORTPAGE0_SYNC_10 (0x19)
+#define MPI_SCSIPORTPAGE0_SYNC_20 (0x0C)
+#define MPI_SCSIPORTPAGE0_SYNC_33_33 (0x0B)
+#define MPI_SCSIPORTPAGE0_SYNC_40 (0x0A)
+#define MPI_SCSIPORTPAGE0_SYNC_80 (0x09)
+#define MPI_SCSIPORTPAGE0_SYNC_160 (0x08)
+#define MPI_SCSIPORTPAGE0_SYNC_UNKNOWN (0xFF)
+
+#define MPI_SCSIPORTPAGE0_CAP_SHIFT_MIN_SYNC_PERIOD (8)
+#define MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(Cap) \
+ ( ((Cap) & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK) \
+ >> MPI_SCSIPORTPAGE0_CAP_SHIFT_MIN_SYNC_PERIOD \
+ )
+#define MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK (0x00FF0000)
+#define MPI_SCSIPORTPAGE0_CAP_SHIFT_MAX_SYNC_OFFSET (16)
+#define MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(Cap) \
+ ( ((Cap) & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK) \
+ >> MPI_SCSIPORTPAGE0_CAP_SHIFT_MAX_SYNC_OFFSET \
+ )
+#define MPI_SCSIPORTPAGE0_CAP_IDP (0x08000000)
+#define MPI_SCSIPORTPAGE0_CAP_WIDE (0x20000000)
+#define MPI_SCSIPORTPAGE0_CAP_AIP (0x80000000)
+
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK (0x00000003)
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD (0x01)
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE (0x02)
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_LVD (0x03)
+#define MPI_SCSIPORTPAGE0_PHY_MASK_CONNECTED_ID (0xFF000000)
+#define MPI_SCSIPORTPAGE0_PHY_SHIFT_CONNECTED_ID (24)
+#define MPI_SCSIPORTPAGE0_PHY_BUS_FREE_CONNECTED_ID (0xFE)
+#define MPI_SCSIPORTPAGE0_PHY_UNKNOWN_CONNECTED_ID (0xFF)
+
+
+typedef struct _CONFIG_PAGE_SCSI_PORT_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Configuration; /* 04h */
+ U32 OnBusTimerValue; /* 08h */
+ U8 TargetConfig; /* 0Ch */
+ U8 Reserved1; /* 0Dh */
+ U16 IDConfig; /* 0Eh */
+} CONFIG_PAGE_SCSI_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_1,
+ SCSIPortPage1_t, MPI_POINTER pSCSIPortPage1_t;
+
+#define MPI_SCSIPORTPAGE1_PAGEVERSION (0x03)
+
+/* Configuration values */
+#define MPI_SCSIPORTPAGE1_CFG_PORT_SCSI_ID_MASK (0x000000FF)
+#define MPI_SCSIPORTPAGE1_CFG_PORT_RESPONSE_ID_MASK (0xFFFF0000)
+#define MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID (16)
+
+/* TargetConfig values */
+#define MPI_SCSIPORTPAGE1_TARGCONFIG_TARG_ONLY (0x01)
+#define MPI_SCSIPORTPAGE1_TARGCONFIG_INIT_TARG (0x02)
+
+
+typedef struct _MPI_DEVICE_INFO
+{
+ U8 Timeout; /* 00h */
+ U8 SyncFactor; /* 01h */
+ U16 DeviceFlags; /* 02h */
+} MPI_DEVICE_INFO, MPI_POINTER PTR_MPI_DEVICE_INFO,
+ MpiDeviceInfo_t, MPI_POINTER pMpiDeviceInfo_t;
+
+typedef struct _CONFIG_PAGE_SCSI_PORT_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 PortFlags; /* 04h */
+ U32 PortSettings; /* 08h */
+ MPI_DEVICE_INFO DeviceSettings[16]; /* 0Ch */
+} CONFIG_PAGE_SCSI_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_2,
+ SCSIPortPage2_t, MPI_POINTER pSCSIPortPage2_t;
+
+#define MPI_SCSIPORTPAGE2_PAGEVERSION (0x02)
+
+/* PortFlags values */
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_SCAN_HIGH_TO_LOW (0x00000001)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET (0x00000004)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_ALTERNATE_CHS (0x00000008)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_TERMINATION_DISABLE (0x00000010)
+
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK (0x00000060)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_FULL_DV (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_BASIC_DV_ONLY (0x00000020)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV (0x00000060)
+
+
+/* PortSettings values */
+#define MPI_SCSIPORTPAGE2_PORT_HOST_ID_MASK (0x0000000F)
+#define MPI_SCSIPORTPAGE2_PORT_MASK_INIT_HBA (0x00000030)
+#define MPI_SCSIPORTPAGE2_PORT_DISABLE_INIT_HBA (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_BIOS_INIT_HBA (0x00000010)
+#define MPI_SCSIPORTPAGE2_PORT_OS_INIT_HBA (0x00000020)
+#define MPI_SCSIPORTPAGE2_PORT_BIOS_OS_INIT_HBA (0x00000030)
+#define MPI_SCSIPORTPAGE2_PORT_REMOVABLE_MEDIA (0x000000C0)
+#define MPI_SCSIPORTPAGE2_PORT_RM_NONE (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_RM_BOOT_ONLY (0x00000040)
+#define MPI_SCSIPORTPAGE2_PORT_RM_WITH_MEDIA (0x00000080)
+#define MPI_SCSIPORTPAGE2_PORT_SPINUP_DELAY_MASK (0x00000F00)
+#define MPI_SCSIPORTPAGE2_PORT_SHIFT_SPINUP_DELAY (8)
+#define MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS (0x00003000)
+#define MPI_SCSIPORTPAGE2_PORT_NEGO_MASTER_SETTINGS (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_NONE_MASTER_SETTINGS (0x00001000)
+#define MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS (0x00003000)
+
+#define MPI_SCSIPORTPAGE2_DEVICE_DISCONNECT_ENABLE (0x0001)
+#define MPI_SCSIPORTPAGE2_DEVICE_ID_SCAN_ENABLE (0x0002)
+#define MPI_SCSIPORTPAGE2_DEVICE_LUN_SCAN_ENABLE (0x0004)
+#define MPI_SCSIPORTPAGE2_DEVICE_TAG_QUEUE_ENABLE (0x0008)
+#define MPI_SCSIPORTPAGE2_DEVICE_WIDE_DISABLE (0x0010)
+#define MPI_SCSIPORTPAGE2_DEVICE_BOOT_CHOICE (0x0020)
+
+
+/****************************************************************************
+* SCSI Target Device Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 NegotiatedParameters; /* 04h */
+ U32 Information; /* 08h */
+} CONFIG_PAGE_SCSI_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_0,
+ SCSIDevicePage0_t, MPI_POINTER pSCSIDevicePage0_t;
+
+#define MPI_SCSIDEVPAGE0_PAGEVERSION (0x04)
+
+#define MPI_SCSIDEVPAGE0_NP_IU (0x00000001)
+#define MPI_SCSIDEVPAGE0_NP_DT (0x00000002)
+#define MPI_SCSIDEVPAGE0_NP_QAS (0x00000004)
+#define MPI_SCSIDEVPAGE0_NP_HOLD_MCS (0x00000008)
+#define MPI_SCSIDEVPAGE0_NP_WR_FLOW (0x00000010)
+#define MPI_SCSIDEVPAGE0_NP_RD_STRM (0x00000020)
+#define MPI_SCSIDEVPAGE0_NP_RTI (0x00000040)
+#define MPI_SCSIDEVPAGE0_NP_PCOMP_EN (0x00000080)
+#define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK (0x0000FF00)
+#define MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD (8)
+#define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK (0x00FF0000)
+#define MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET (16)
+#define MPI_SCSIDEVPAGE0_NP_IDP (0x08000000)
+#define MPI_SCSIDEVPAGE0_NP_WIDE (0x20000000)
+#define MPI_SCSIDEVPAGE0_NP_AIP (0x80000000)
+
+#define MPI_SCSIDEVPAGE0_INFO_PARAMS_NEGOTIATED (0x00000001)
+#define MPI_SCSIDEVPAGE0_INFO_SDTR_REJECTED (0x00000002)
+#define MPI_SCSIDEVPAGE0_INFO_WDTR_REJECTED (0x00000004)
+#define MPI_SCSIDEVPAGE0_INFO_PPR_REJECTED (0x00000008)
+
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 RequestedParameters; /* 04h */
+ U32 Reserved; /* 08h */
+ U32 Configuration; /* 0Ch */
+} CONFIG_PAGE_SCSI_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_1,
+ SCSIDevicePage1_t, MPI_POINTER pSCSIDevicePage1_t;
+
+#define MPI_SCSIDEVPAGE1_PAGEVERSION (0x05)
+
+#define MPI_SCSIDEVPAGE1_RP_IU (0x00000001)
+#define MPI_SCSIDEVPAGE1_RP_DT (0x00000002)
+#define MPI_SCSIDEVPAGE1_RP_QAS (0x00000004)
+#define MPI_SCSIDEVPAGE1_RP_HOLD_MCS (0x00000008)
+#define MPI_SCSIDEVPAGE1_RP_WR_FLOW (0x00000010)
+#define MPI_SCSIDEVPAGE1_RP_RD_STRM (0x00000020)
+#define MPI_SCSIDEVPAGE1_RP_RTI (0x00000040)
+#define MPI_SCSIDEVPAGE1_RP_PCOMP_EN (0x00000080)
+#define MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK (0x0000FF00)
+#define MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD (8)
+#define MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK (0x00FF0000)
+#define MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET (16)
+#define MPI_SCSIDEVPAGE1_RP_IDP (0x08000000)
+#define MPI_SCSIDEVPAGE1_RP_WIDE (0x20000000)
+#define MPI_SCSIDEVPAGE1_RP_AIP (0x80000000)
+
+#define MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED (0x00000002)
+#define MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED (0x00000004)
+#define MPI_SCSIDEVPAGE1_CONF_EXTENDED_PARAMS_ENABLE (0x00000008)
+#define MPI_SCSIDEVPAGE1_CONF_FORCE_PPR_MSG (0x00000010)
+
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 DomainValidation; /* 04h */
+ U32 ParityPipeSelect; /* 08h */
+ U32 DataPipeSelect; /* 0Ch */
+} CONFIG_PAGE_SCSI_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_2,
+ SCSIDevicePage2_t, MPI_POINTER pSCSIDevicePage2_t;
+
+#define MPI_SCSIDEVPAGE2_PAGEVERSION (0x01)
+
+#define MPI_SCSIDEVPAGE2_DV_ISI_ENABLE (0x00000010)
+#define MPI_SCSIDEVPAGE2_DV_SECONDARY_DRIVER_ENABLE (0x00000020)
+#define MPI_SCSIDEVPAGE2_DV_SLEW_RATE_CTRL (0x00000380)
+#define MPI_SCSIDEVPAGE2_DV_PRIM_DRIVE_STR_CTRL (0x00001C00)
+#define MPI_SCSIDEVPAGE2_DV_SECOND_DRIVE_STR_CTRL (0x0000E000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKH_ST (0x10000000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKS_ST (0x20000000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKH_DT (0x40000000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKS_DT (0x80000000)
+
+#define MPI_SCSIDEVPAGE2_PPS_PPS_MASK (0x00000003)
+
+#define MPI_SCSIDEVPAGE2_DPS_BIT_0_PL_SELECT_MASK (0x00000003)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_1_PL_SELECT_MASK (0x0000000C)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_2_PL_SELECT_MASK (0x00000030)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_3_PL_SELECT_MASK (0x000000C0)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_4_PL_SELECT_MASK (0x00000300)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_5_PL_SELECT_MASK (0x00000C00)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_6_PL_SELECT_MASK (0x00003000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_7_PL_SELECT_MASK (0x0000C000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_8_PL_SELECT_MASK (0x00030000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_9_PL_SELECT_MASK (0x000C0000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_10_PL_SELECT_MASK (0x00300000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_11_PL_SELECT_MASK (0x00C00000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_12_PL_SELECT_MASK (0x03000000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_13_PL_SELECT_MASK (0x0C000000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_14_PL_SELECT_MASK (0x30000000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_15_PL_SELECT_MASK (0xC0000000)
+
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_3
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U16 MsgRejectCount; /* 04h */
+ U16 PhaseErrorCount; /* 06h */
+ U16 ParityErrorCount; /* 08h */
+ U16 Reserved; /* 0Ah */
+} CONFIG_PAGE_SCSI_DEVICE_3, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_3,
+ SCSIDevicePage3_t, MPI_POINTER pSCSIDevicePage3_t;
+
+#define MPI_SCSIDEVPAGE3_PAGEVERSION (0x00)
+
+#define MPI_SCSIDEVPAGE3_MAX_COUNTER (0xFFFE)
+#define MPI_SCSIDEVPAGE3_UNSUPPORTED_COUNTER (0xFFFF)
+
+
+/****************************************************************************
+* FC Port Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_FC_PORT_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Flags; /* 04h */
+ U8 MPIPortNumber; /* 08h */
+ U8 LinkType; /* 09h */
+ U8 PortState; /* 0Ah */
+ U8 Reserved; /* 0Bh */
+ U32 PortIdentifier; /* 0Ch */
+ U64 WWNN; /* 10h */
+ U64 WWPN; /* 18h */
+ U32 SupportedServiceClass; /* 20h */
+ U32 SupportedSpeeds; /* 24h */
+ U32 CurrentSpeed; /* 28h */
+ U32 MaxFrameSize; /* 2Ch */
+ U64 FabricWWNN; /* 30h */
+ U64 FabricWWPN; /* 38h */
+ U32 DiscoveredPortsCount; /* 40h */
+ U32 MaxInitiators; /* 44h */
+ U8 MaxAliasesSupported; /* 48h */
+ U8 MaxHardAliasesSupported; /* 49h */
+ U8 NumCurrentAliases; /* 4Ah */
+ U8 Reserved1; /* 4Bh */
+} CONFIG_PAGE_FC_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_0,
+ FCPortPage0_t, MPI_POINTER pFCPortPage0_t;
+
+#define MPI_FCPORTPAGE0_PAGEVERSION (0x02)
+
+#define MPI_FCPORTPAGE0_FLAGS_PROT_MASK (0x0000000F)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_FCP_INIT (MPI_PORTFACTS_PROTOCOL_INITIATOR)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_FCP_TARG (MPI_PORTFACTS_PROTOCOL_TARGET)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_LAN (MPI_PORTFACTS_PROTOCOL_LAN)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_LOGBUSADDR (MPI_PORTFACTS_PROTOCOL_LOGBUSADDR)
+
+#define MPI_FCPORTPAGE0_FLAGS_ALIAS_ALPA_SUPPORTED (0x00000010)
+#define MPI_FCPORTPAGE0_FLAGS_ALIAS_WWN_SUPPORTED (0x00000020)
+#define MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID (0x00000040)
+
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK (0x00000F00)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT (0x00000000)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT (0x00000100)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP (0x00000200)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT (0x00000400)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP (0x00000800)
+
+#define MPI_FCPORTPAGE0_LTYPE_RESERVED (0x00)
+#define MPI_FCPORTPAGE0_LTYPE_OTHER (0x01)
+#define MPI_FCPORTPAGE0_LTYPE_UNKNOWN (0x02)
+#define MPI_FCPORTPAGE0_LTYPE_COPPER (0x03)
+#define MPI_FCPORTPAGE0_LTYPE_SINGLE_1300 (0x04)
+#define MPI_FCPORTPAGE0_LTYPE_SINGLE_1500 (0x05)
+#define MPI_FCPORTPAGE0_LTYPE_50_LASER_MULTI (0x06)
+#define MPI_FCPORTPAGE0_LTYPE_50_LED_MULTI (0x07)
+#define MPI_FCPORTPAGE0_LTYPE_62_LASER_MULTI (0x08)
+#define MPI_FCPORTPAGE0_LTYPE_62_LED_MULTI (0x09)
+#define MPI_FCPORTPAGE0_LTYPE_MULTI_LONG_WAVE (0x0A)
+#define MPI_FCPORTPAGE0_LTYPE_MULTI_SHORT_WAVE (0x0B)
+#define MPI_FCPORTPAGE0_LTYPE_LASER_SHORT_WAVE (0x0C)
+#define MPI_FCPORTPAGE0_LTYPE_LED_SHORT_WAVE (0x0D)
+#define MPI_FCPORTPAGE0_LTYPE_1300_LONG_WAVE (0x0E)
+#define MPI_FCPORTPAGE0_LTYPE_1500_LONG_WAVE (0x0F)
+
+#define MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN (0x01) /*(SNIA)HBA_PORTSTATE_UNKNOWN 1 Unknown */
+#define MPI_FCPORTPAGE0_PORTSTATE_ONLINE (0x02) /*(SNIA)HBA_PORTSTATE_ONLINE 2 Operational */
+#define MPI_FCPORTPAGE0_PORTSTATE_OFFLINE (0x03) /*(SNIA)HBA_PORTSTATE_OFFLINE 3 User Offline */
+#define MPI_FCPORTPAGE0_PORTSTATE_BYPASSED (0x04) /*(SNIA)HBA_PORTSTATE_BYPASSED 4 Bypassed */
+#define MPI_FCPORTPAGE0_PORTSTATE_DIAGNOST (0x05) /*(SNIA)HBA_PORTSTATE_DIAGNOSTICS 5 In diagnostics mode */
+#define MPI_FCPORTPAGE0_PORTSTATE_LINKDOWN (0x06) /*(SNIA)HBA_PORTSTATE_LINKDOWN 6 Link Down */
+#define MPI_FCPORTPAGE0_PORTSTATE_ERROR (0x07) /*(SNIA)HBA_PORTSTATE_ERROR 7 Port Error */
+#define MPI_FCPORTPAGE0_PORTSTATE_LOOPBACK (0x08) /*(SNIA)HBA_PORTSTATE_LOOPBACK 8 Loopback */
+
+#define MPI_FCPORTPAGE0_SUPPORT_CLASS_1 (0x00000001)
+#define MPI_FCPORTPAGE0_SUPPORT_CLASS_2 (0x00000002)
+#define MPI_FCPORTPAGE0_SUPPORT_CLASS_3 (0x00000004)
+
+#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0 Unknown - transceiver incapable of reporting */
+#define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT 1 1 GBit/sec */
+#define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT 2 2 GBit/sec */
+#define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT 4 10 GBit/sec */
+#define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT 8 4 GBit/sec */
+
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_NOT_NEGOTIATED (0x00008000) /* (SNIA)HBA_PORTSPEED_NOT_NEGOTIATED (1<<15) Speed not established */
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Flags; /* 04h */
+ U64 NoSEEPROMWWNN; /* 08h */
+ U64 NoSEEPROMWWPN; /* 10h */
+ U8 HardALPA; /* 18h */
+ U8 LinkConfig; /* 19h */
+ U8 TopologyConfig; /* 1Ah */
+ U8 AltConnector; /* 1Bh */
+ U8 NumRequestedAliases; /* 1Ch */
+ U8 RR_TOV; /* 1Dh */
+ U8 InitiatorDeviceTimeout; /* 1Eh */
+ U8 InitiatorIoPendTimeout; /* 1Fh */
+} CONFIG_PAGE_FC_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_1,
+ FCPortPage1_t, MPI_POINTER pFCPortPage1_t;
+
+#define MPI_FCPORTPAGE1_PAGEVERSION (0x06)
+
+#define MPI_FCPORTPAGE1_FLAGS_EXT_FCP_STATUS_EN (0x08000000)
+#define MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY (0x04000000)
+#define MPI_FCPORTPAGE1_FLAGS_FORCE_USE_NOSEEPROM_WWNS (0x02000000)
+#define MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS (0x01000000)
+#define MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID (0x00800000)
+#define MPI_FCPORTPAGE1_FLAGS_PORT_OFFLINE (0x00400000)
+#define MPI_FCPORTPAGE1_FLAGS_SOFT_ALPA_FALLBACK (0x00200000)
+#define MPI_FCPORTPAGE1_FLAGS_TARGET_LARGE_CDB_ENABLE (0x00000080)
+#define MPI_FCPORTPAGE1_FLAGS_MASK_RR_TOV_UNITS (0x00000070)
+#define MPI_FCPORTPAGE1_FLAGS_SUPPRESS_PROT_REG (0x00000008)
+#define MPI_FCPORTPAGE1_FLAGS_PLOGI_ON_LOGO (0x00000004)
+#define MPI_FCPORTPAGE1_FLAGS_MAINTAIN_LOGINS (0x00000002)
+#define MPI_FCPORTPAGE1_FLAGS_SORT_BY_DID (0x00000001)
+#define MPI_FCPORTPAGE1_FLAGS_SORT_BY_WWN (0x00000000)
+
+#define MPI_FCPORTPAGE1_FLAGS_PROT_MASK (0xF0000000)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT (28)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT ((U32)MPI_PORTFACTS_PROTOCOL_INITIATOR << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG ((U32)MPI_PORTFACTS_PROTOCOL_TARGET << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_LAN ((U32)MPI_PORTFACTS_PROTOCOL_LAN << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_LOGBUSADDR ((U32)MPI_PORTFACTS_PROTOCOL_LOGBUSADDR << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+
+#define MPI_FCPORTPAGE1_FLAGS_NONE_RR_TOV_UNITS (0x00000000)
+#define MPI_FCPORTPAGE1_FLAGS_THOUSANDTH_RR_TOV_UNITS (0x00000010)
+#define MPI_FCPORTPAGE1_FLAGS_TENTH_RR_TOV_UNITS (0x00000030)
+#define MPI_FCPORTPAGE1_FLAGS_TEN_RR_TOV_UNITS (0x00000050)
+
+#define MPI_FCPORTPAGE1_HARD_ALPA_NOT_USED (0xFF)
+
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_MASK (0x0F)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_1GIG (0x00)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_2GIG (0x01)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_4GIG (0x02)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_10GIG (0x03)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_AUTO (0x0F)
+
+#define MPI_FCPORTPAGE1_TOPOLOGY_MASK (0x0F)
+#define MPI_FCPORTPAGE1_TOPOLOGY_NLPORT (0x01)
+#define MPI_FCPORTPAGE1_TOPOLOGY_NPORT (0x02)
+#define MPI_FCPORTPAGE1_TOPOLOGY_AUTO (0x0F)
+
+#define MPI_FCPORTPAGE1_ALT_CONN_UNKNOWN (0x00)
+
+#define MPI_FCPORTPAGE1_INITIATOR_DEV_TIMEOUT_MASK (0x7F)
+#define MPI_FCPORTPAGE1_INITIATOR_DEV_UNIT_16 (0x80)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 NumberActive; /* 04h */
+ U8 ALPA[127]; /* 05h */
+} CONFIG_PAGE_FC_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_2,
+ FCPortPage2_t, MPI_POINTER pFCPortPage2_t;
+
+#define MPI_FCPORTPAGE2_PAGEVERSION (0x01)
+
+
+typedef struct _WWN_FORMAT
+{
+ U64 WWNN; /* 00h */
+ U64 WWPN; /* 08h */
+} WWN_FORMAT, MPI_POINTER PTR_WWN_FORMAT,
+ WWNFormat, MPI_POINTER pWWNFormat;
+
+typedef union _FC_PORT_PERSISTENT_PHYSICAL_ID
+{
+ WWN_FORMAT WWN;
+ U32 Did;
+} FC_PORT_PERSISTENT_PHYSICAL_ID, MPI_POINTER PTR_FC_PORT_PERSISTENT_PHYSICAL_ID,
+ PersistentPhysicalId_t, MPI_POINTER pPersistentPhysicalId_t;
+
+typedef struct _FC_PORT_PERSISTENT
+{
+ FC_PORT_PERSISTENT_PHYSICAL_ID PhysicalIdentifier; /* 00h */
+ U8 TargetID; /* 10h */
+ U8 Bus; /* 11h */
+ U16 Flags; /* 12h */
+} FC_PORT_PERSISTENT, MPI_POINTER PTR_FC_PORT_PERSISTENT,
+ PersistentData_t, MPI_POINTER pPersistentData_t;
+
+#define MPI_PERSISTENT_FLAGS_SHIFT (16)
+#define MPI_PERSISTENT_FLAGS_ENTRY_VALID (0x0001)
+#define MPI_PERSISTENT_FLAGS_SCAN_ID (0x0002)
+#define MPI_PERSISTENT_FLAGS_SCAN_LUNS (0x0004)
+#define MPI_PERSISTENT_FLAGS_BOOT_DEVICE (0x0008)
+#define MPI_PERSISTENT_FLAGS_BY_DID (0x0080)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_FC_PORT_PAGE_3_ENTRY_MAX
+#define MPI_FC_PORT_PAGE_3_ENTRY_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_FC_PORT_3
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ FC_PORT_PERSISTENT Entry[MPI_FC_PORT_PAGE_3_ENTRY_MAX]; /* 04h */
+} CONFIG_PAGE_FC_PORT_3, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_3,
+ FCPortPage3_t, MPI_POINTER pFCPortPage3_t;
+
+#define MPI_FCPORTPAGE3_PAGEVERSION (0x01)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_4
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 PortFlags; /* 04h */
+ U32 PortSettings; /* 08h */
+} CONFIG_PAGE_FC_PORT_4, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_4,
+ FCPortPage4_t, MPI_POINTER pFCPortPage4_t;
+
+#define MPI_FCPORTPAGE4_PAGEVERSION (0x00)
+
+#define MPI_FCPORTPAGE4_PORT_FLAGS_ALTERNATE_CHS (0x00000008)
+
+#define MPI_FCPORTPAGE4_PORT_MASK_INIT_HBA (0x00000030)
+#define MPI_FCPORTPAGE4_PORT_DISABLE_INIT_HBA (0x00000000)
+#define MPI_FCPORTPAGE4_PORT_BIOS_INIT_HBA (0x00000010)
+#define MPI_FCPORTPAGE4_PORT_OS_INIT_HBA (0x00000020)
+#define MPI_FCPORTPAGE4_PORT_BIOS_OS_INIT_HBA (0x00000030)
+#define MPI_FCPORTPAGE4_PORT_REMOVABLE_MEDIA (0x000000C0)
+#define MPI_FCPORTPAGE4_PORT_SPINUP_DELAY_MASK (0x00000F00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_5_ALIAS_INFO
+{
+ U8 Flags; /* 00h */
+ U8 AliasAlpa; /* 01h */
+ U16 Reserved; /* 02h */
+ U64 AliasWWNN; /* 04h */
+ U64 AliasWWPN; /* 0Ch */
+} CONFIG_PAGE_FC_PORT_5_ALIAS_INFO,
+ MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5_ALIAS_INFO,
+ FcPortPage5AliasInfo_t, MPI_POINTER pFcPortPage5AliasInfo_t;
+
+typedef struct _CONFIG_PAGE_FC_PORT_5
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ CONFIG_PAGE_FC_PORT_5_ALIAS_INFO AliasInfo; /* 04h */
+} CONFIG_PAGE_FC_PORT_5, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5,
+ FCPortPage5_t, MPI_POINTER pFCPortPage5_t;
+
+#define MPI_FCPORTPAGE5_PAGEVERSION (0x02)
+
+#define MPI_FCPORTPAGE5_FLAGS_ALPA_ACQUIRED (0x01)
+#define MPI_FCPORTPAGE5_FLAGS_HARD_ALPA (0x02)
+#define MPI_FCPORTPAGE5_FLAGS_HARD_WWNN (0x04)
+#define MPI_FCPORTPAGE5_FLAGS_HARD_WWPN (0x08)
+#define MPI_FCPORTPAGE5_FLAGS_DISABLE (0x10)
+
+typedef struct _CONFIG_PAGE_FC_PORT_6
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved; /* 04h */
+ U64 TimeSinceReset; /* 08h */
+ U64 TxFrames; /* 10h */
+ U64 RxFrames; /* 18h */
+ U64 TxWords; /* 20h */
+ U64 RxWords; /* 28h */
+ U64 LipCount; /* 30h */
+ U64 NosCount; /* 38h */
+ U64 ErrorFrames; /* 40h */
+ U64 DumpedFrames; /* 48h */
+ U64 LinkFailureCount; /* 50h */
+ U64 LossOfSyncCount; /* 58h */
+ U64 LossOfSignalCount; /* 60h */
+ U64 PrimativeSeqErrCount; /* 68h */
+ U64 InvalidTxWordCount; /* 70h */
+ U64 InvalidCrcCount; /* 78h */
+ U64 FcpInitiatorIoCount; /* 80h */
+} CONFIG_PAGE_FC_PORT_6, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_6,
+ FCPortPage6_t, MPI_POINTER pFCPortPage6_t;
+
+#define MPI_FCPORTPAGE6_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_7
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved; /* 04h */
+ U8 PortSymbolicName[256]; /* 08h */
+} CONFIG_PAGE_FC_PORT_7, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_7,
+ FCPortPage7_t, MPI_POINTER pFCPortPage7_t;
+
+#define MPI_FCPORTPAGE7_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_8
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 BitVector[8]; /* 04h */
+} CONFIG_PAGE_FC_PORT_8, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_8,
+ FCPortPage8_t, MPI_POINTER pFCPortPage8_t;
+
+#define MPI_FCPORTPAGE8_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_9
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U32 Reserved; /* 04h */
+ U64 GlobalWWPN; /* 08h */
+ U64 GlobalWWNN; /* 10h */
+ U32 UnitType; /* 18h */
+ U32 PhysicalPortNumber; /* 1Ch */
+ U32 NumAttachedNodes; /* 20h */
+ U16 IPVersion; /* 24h */
+ U16 UDPPortNumber; /* 26h */
+ U8 IPAddress[16]; /* 28h */
+ U16 Reserved1; /* 38h */
+ U16 TopologyDiscoveryFlags; /* 3Ah */
+} CONFIG_PAGE_FC_PORT_9, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_9,
+ FCPortPage9_t, MPI_POINTER pFCPortPage9_t;
+
+#define MPI_FCPORTPAGE9_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA
+{
+ U8 Id; /* 10h */
+ U8 ExtId; /* 11h */
+ U8 Connector; /* 12h */
+ U8 Transceiver[8]; /* 13h */
+ U8 Encoding; /* 1Bh */
+ U8 BitRate_100mbs; /* 1Ch */
+ U8 Reserved1; /* 1Dh */
+ U8 Length9u_km; /* 1Eh */
+ U8 Length9u_100m; /* 1Fh */
+ U8 Length50u_10m; /* 20h */
+ U8 Length62p5u_10m; /* 21h */
+ U8 LengthCopper_m; /* 22h */
+ U8 Reseverved2; /* 22h */
+ U8 VendorName[16]; /* 24h */
+ U8 Reserved3; /* 34h */
+ U8 VendorOUI[3]; /* 35h */
+ U8 VendorPN[16]; /* 38h */
+ U8 VendorRev[4]; /* 48h */
+ U16 Wavelength; /* 4Ch */
+ U8 Reserved4; /* 4Eh */
+ U8 CC_BASE; /* 4Fh */
+} CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA,
+ MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA,
+ FCPortPage10BaseSfpData_t, MPI_POINTER pFCPortPage10BaseSfpData_t;
+
+#define MPI_FCPORT10_BASE_ID_UNKNOWN (0x00)
+#define MPI_FCPORT10_BASE_ID_GBIC (0x01)
+#define MPI_FCPORT10_BASE_ID_FIXED (0x02)
+#define MPI_FCPORT10_BASE_ID_SFP (0x03)
+#define MPI_FCPORT10_BASE_ID_SFP_MIN (0x04)
+#define MPI_FCPORT10_BASE_ID_SFP_MAX (0x7F)
+#define MPI_FCPORT10_BASE_ID_VEND_SPEC_MASK (0x80)
+
+#define MPI_FCPORT10_BASE_EXTID_UNKNOWN (0x00)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF1 (0x01)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF2 (0x02)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF3 (0x03)
+#define MPI_FCPORT10_BASE_EXTID_SEEPROM (0x04)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF5 (0x05)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF6 (0x06)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF7 (0x07)
+#define MPI_FCPORT10_BASE_EXTID_VNDSPC_MASK (0x80)
+
+#define MPI_FCPORT10_BASE_CONN_UNKNOWN (0x00)
+#define MPI_FCPORT10_BASE_CONN_SC (0x01)
+#define MPI_FCPORT10_BASE_CONN_COPPER1 (0x02)
+#define MPI_FCPORT10_BASE_CONN_COPPER2 (0x03)
+#define MPI_FCPORT10_BASE_CONN_BNC_TNC (0x04)
+#define MPI_FCPORT10_BASE_CONN_COAXIAL (0x05)
+#define MPI_FCPORT10_BASE_CONN_FIBERJACK (0x06)
+#define MPI_FCPORT10_BASE_CONN_LC (0x07)
+#define MPI_FCPORT10_BASE_CONN_MT_RJ (0x08)
+#define MPI_FCPORT10_BASE_CONN_MU (0x09)
+#define MPI_FCPORT10_BASE_CONN_SG (0x0A)
+#define MPI_FCPORT10_BASE_CONN_OPT_PIGT (0x0B)
+#define MPI_FCPORT10_BASE_CONN_RSV1_MIN (0x0C)
+#define MPI_FCPORT10_BASE_CONN_RSV1_MAX (0x1F)
+#define MPI_FCPORT10_BASE_CONN_HSSDC_II (0x20)
+#define MPI_FCPORT10_BASE_CONN_CPR_PIGT (0x21)
+#define MPI_FCPORT10_BASE_CONN_RSV2_MIN (0x22)
+#define MPI_FCPORT10_BASE_CONN_RSV2_MAX (0x7F)
+#define MPI_FCPORT10_BASE_CONN_VNDSPC_MASK (0x80)
+
+#define MPI_FCPORT10_BASE_ENCODE_UNSPEC (0x00)
+#define MPI_FCPORT10_BASE_ENCODE_8B10B (0x01)
+#define MPI_FCPORT10_BASE_ENCODE_4B5B (0x02)
+#define MPI_FCPORT10_BASE_ENCODE_NRZ (0x03)
+#define MPI_FCPORT10_BASE_ENCODE_MANCHESTER (0x04)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA
+{
+ U8 Options[2]; /* 50h */
+ U8 BitRateMax; /* 52h */
+ U8 BitRateMin; /* 53h */
+ U8 VendorSN[16]; /* 54h */
+ U8 DateCode[8]; /* 64h */
+ U8 DiagMonitoringType; /* 6Ch */
+ U8 EnhancedOptions; /* 6Dh */
+ U8 SFF8472Compliance; /* 6Eh */
+ U8 CC_EXT; /* 6Fh */
+} CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA,
+ MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA,
+ FCPortPage10ExtendedSfpData_t, MPI_POINTER pFCPortPage10ExtendedSfpData_t;
+
+#define MPI_FCPORT10_EXT_OPTION1_RATESEL (0x20)
+#define MPI_FCPORT10_EXT_OPTION1_TX_DISABLE (0x10)
+#define MPI_FCPORT10_EXT_OPTION1_TX_FAULT (0x08)
+#define MPI_FCPORT10_EXT_OPTION1_LOS_INVERT (0x04)
+#define MPI_FCPORT10_EXT_OPTION1_LOS (0x02)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_10
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 Flags; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 06h */
+ U32 HwConfig1; /* 08h */
+ U32 HwConfig2; /* 0Ch */
+ CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA Base; /* 10h */
+ CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA Extended; /* 50h */
+ U8 VendorSpecific[32]; /* 70h */
+} CONFIG_PAGE_FC_PORT_10, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10,
+ FCPortPage10_t, MPI_POINTER pFCPortPage10_t;
+
+#define MPI_FCPORTPAGE10_PAGEVERSION (0x01)
+
+/* standard MODDEF pin definitions (from GBIC spec.) */
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_MASK (0x00000007)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF2 (0x00000001)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF1 (0x00000002)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF0 (0x00000004)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_NOGBIC (0x00000007)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_CPR_IEEE_CX (0x00000006)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_COPPER (0x00000005)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_OPTICAL_LW (0x00000004)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SEEPROM (0x00000003)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SW_OPTICAL (0x00000002)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_LX_IEEE_OPT_LW (0x00000001)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SX_IEEE_OPT_SW (0x00000000)
+
+#define MPI_FCPORTPAGE10_FLAGS_CC_BASE_OK (0x00000010)
+#define MPI_FCPORTPAGE10_FLAGS_CC_EXT_OK (0x00000020)
+
+
+/****************************************************************************
+* FC Device Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_FC_DEVICE_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U64 WWNN; /* 04h */
+ U64 WWPN; /* 0Ch */
+ U32 PortIdentifier; /* 14h */
+ U8 Protocol; /* 18h */
+ U8 Flags; /* 19h */
+ U16 BBCredit; /* 1Ah */
+ U16 MaxRxFrameSize; /* 1Ch */
+ U8 ADISCHardALPA; /* 1Eh */
+ U8 PortNumber; /* 1Fh */
+ U8 FcPhLowestVersion; /* 20h */
+ U8 FcPhHighestVersion; /* 21h */
+ U8 CurrentTargetID; /* 22h */
+ U8 CurrentBus; /* 23h */
+} CONFIG_PAGE_FC_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_FC_DEVICE_0,
+ FCDevicePage0_t, MPI_POINTER pFCDevicePage0_t;
+
+#define MPI_FC_DEVICE_PAGE0_PAGEVERSION (0x03)
+
+#define MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID (0x01)
+#define MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID (0x02)
+#define MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID (0x04)
+
+#define MPI_FC_DEVICE_PAGE0_PROT_IP (0x01)
+#define MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET (0x02)
+#define MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR (0x04)
+#define MPI_FC_DEVICE_PAGE0_PROT_FCP_RETRY (0x08)
+
+#define MPI_FC_DEVICE_PAGE0_PGAD_PORT_MASK (MPI_FC_DEVICE_PGAD_PORT_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_MASK (MPI_FC_DEVICE_PGAD_FORM_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_NEXT_DID (MPI_FC_DEVICE_PGAD_FORM_NEXT_DID)
+#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_BUS_TID (MPI_FC_DEVICE_PGAD_FORM_BUS_TID)
+#define MPI_FC_DEVICE_PAGE0_PGAD_DID_MASK (MPI_FC_DEVICE_PGAD_ND_DID_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_MASK (MPI_FC_DEVICE_PGAD_BT_BUS_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_SHIFT (MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT)
+#define MPI_FC_DEVICE_PAGE0_PGAD_TID_MASK (MPI_FC_DEVICE_PGAD_BT_TID_MASK)
+
+#define MPI_FC_DEVICE_PAGE0_HARD_ALPA_UNKNOWN (0xFF)
+
+/****************************************************************************
+* RAID Volume Config Pages
+****************************************************************************/
+
+typedef struct _RAID_VOL0_PHYS_DISK
+{
+ U16 Reserved; /* 00h */
+ U8 PhysDiskMap; /* 02h */
+ U8 PhysDiskNum; /* 03h */
+} RAID_VOL0_PHYS_DISK, MPI_POINTER PTR_RAID_VOL0_PHYS_DISK,
+ RaidVol0PhysDisk_t, MPI_POINTER pRaidVol0PhysDisk_t;
+
+#define MPI_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
+#define MPI_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _RAID_VOL0_STATUS
+{
+ U8 Flags; /* 00h */
+ U8 State; /* 01h */
+ U16 Reserved; /* 02h */
+} RAID_VOL0_STATUS, MPI_POINTER PTR_RAID_VOL0_STATUS,
+ RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t;
+
+/* RAID Volume Page 0 VolumeStatus defines */
+#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED (0x01)
+#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED (0x02)
+#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x04)
+#define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x08)
+#define MPI_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x10)
+
+#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL (0x00)
+#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED (0x01)
+#define MPI_RAIDVOL0_STATUS_STATE_FAILED (0x02)
+#define MPI_RAIDVOL0_STATUS_STATE_MISSING (0x03)
+
+typedef struct _RAID_VOL0_SETTINGS
+{
+ U16 Settings; /* 00h */
+ U8 HotSparePool; /* 01h */ /* MPI_RAID_HOT_SPARE_POOL_ */
+ U8 Reserved; /* 02h */
+} RAID_VOL0_SETTINGS, MPI_POINTER PTR_RAID_VOL0_SETTINGS,
+ RaidVol0Settings, MPI_POINTER pRaidVol0Settings;
+
+/* RAID Volume Page 0 VolumeSettings defines */
+#define MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE (0x0001)
+#define MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART (0x0002)
+#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE (0x0004)
+#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC (0x0008)
+#define MPI_RAIDVOL0_SETTING_FAST_DATA_SCRUBBING_0102 (0x0020) /* obsolete */
+
+#define MPI_RAIDVOL0_SETTING_MASK_METADATA_SIZE (0x00C0)
+#define MPI_RAIDVOL0_SETTING_64MB_METADATA_SIZE (0x0000)
+#define MPI_RAIDVOL0_SETTING_512MB_METADATA_SIZE (0x0040)
+
+#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0010)
+#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS (0x8000)
+
+/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI_RAID_HOT_SPARE_POOL_0 (0x01)
+#define MPI_RAID_HOT_SPARE_POOL_1 (0x02)
+#define MPI_RAID_HOT_SPARE_POOL_2 (0x04)
+#define MPI_RAID_HOT_SPARE_POOL_3 (0x08)
+#define MPI_RAID_HOT_SPARE_POOL_4 (0x10)
+#define MPI_RAID_HOT_SPARE_POOL_5 (0x20)
+#define MPI_RAID_HOT_SPARE_POOL_6 (0x40)
+#define MPI_RAID_HOT_SPARE_POOL_7 (0x80)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_RAID_VOL_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 VolumeID; /* 04h */
+ U8 VolumeBus; /* 05h */
+ U8 VolumeIOC; /* 06h */
+ U8 VolumeType; /* 07h */ /* MPI_RAID_VOL_TYPE_ */
+ RAID_VOL0_STATUS VolumeStatus; /* 08h */
+ RAID_VOL0_SETTINGS VolumeSettings; /* 0Ch */
+ U32 MaxLBA; /* 10h */
+ U32 MaxLBAHigh; /* 14h */
+ U32 StripeSize; /* 18h */
+ U32 Reserved2; /* 1Ch */
+ U32 Reserved3; /* 20h */
+ U8 NumPhysDisks; /* 24h */
+ U8 DataScrubRate; /* 25h */
+ U8 ResyncRate; /* 26h */
+ U8 InactiveStatus; /* 27h */
+ RAID_VOL0_PHYS_DISK PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */
+} CONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
+ RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
+
+#define MPI_RAIDVOLPAGE0_PAGEVERSION (0x07)
+
+/* values for RAID Volume Page 0 InactiveStatus field */
+#define MPI_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
+#define MPI_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
+#define MPI_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
+#define MPI_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
+#define MPI_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
+#define MPI_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
+#define MPI_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
+
+
+typedef struct _CONFIG_PAGE_RAID_VOL_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 VolumeID; /* 04h */
+ U8 VolumeBus; /* 05h */
+ U8 VolumeIOC; /* 06h */
+ U8 Reserved0; /* 07h */
+ U8 GUID[24]; /* 08h */
+ U8 Name[32]; /* 20h */
+ U64 WWID; /* 40h */
+ U32 Reserved1; /* 48h */
+ U32 Reserved2; /* 4Ch */
+} CONFIG_PAGE_RAID_VOL_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_1,
+ RaidVolumePage1_t, MPI_POINTER pRaidVolumePage1_t;
+
+#define MPI_RAIDVOLPAGE1_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* RAID Physical Disk Config Pages
+****************************************************************************/
+
+typedef struct _RAID_PHYS_DISK0_ERROR_DATA
+{
+ U8 ErrorCdbByte; /* 00h */
+ U8 ErrorSenseKey; /* 01h */
+ U16 Reserved; /* 02h */
+ U16 ErrorCount; /* 04h */
+ U8 ErrorASC; /* 06h */
+ U8 ErrorASCQ; /* 07h */
+ U16 SmartCount; /* 08h */
+ U8 SmartASC; /* 0Ah */
+ U8 SmartASCQ; /* 0Bh */
+} RAID_PHYS_DISK0_ERROR_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_ERROR_DATA,
+ RaidPhysDisk0ErrorData_t, MPI_POINTER pRaidPhysDisk0ErrorData_t;
+
+typedef struct _RAID_PHYS_DISK_INQUIRY_DATA
+{
+ U8 VendorID[8]; /* 00h */
+ U8 ProductID[16]; /* 08h */
+ U8 ProductRevLevel[4]; /* 18h */
+ U8 Info[32]; /* 1Ch */
+} RAID_PHYS_DISK0_INQUIRY_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_INQUIRY_DATA,
+ RaidPhysDisk0InquiryData, MPI_POINTER pRaidPhysDisk0InquiryData;
+
+typedef struct _RAID_PHYS_DISK0_SETTINGS
+{
+ U8 SepID; /* 00h */
+ U8 SepBus; /* 01h */
+ U8 HotSparePool; /* 02h */ /* MPI_RAID_HOT_SPARE_POOL_ */
+ U8 PhysDiskSettings; /* 03h */
+} RAID_PHYS_DISK0_SETTINGS, MPI_POINTER PTR_RAID_PHYS_DISK0_SETTINGS,
+ RaidPhysDiskSettings_t, MPI_POINTER pRaidPhysDiskSettings_t;
+
+typedef struct _RAID_PHYS_DISK0_STATUS
+{
+ U8 Flags; /* 00h */
+ U8 State; /* 01h */
+ U16 Reserved; /* 02h */
+} RAID_PHYS_DISK0_STATUS, MPI_POINTER PTR_RAID_PHYS_DISK0_STATUS,
+ RaidPhysDiskStatus_t, MPI_POINTER pRaidPhysDiskStatus_t;
+
+/* RAID Physical Disk PhysDiskStatus flags */
+
+#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x01)
+#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED (0x02)
+#define MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x04)
+#define MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00)
+#define MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x08)
+
+#define MPI_PHYSDISK0_STATUS_ONLINE (0x00)
+#define MPI_PHYSDISK0_STATUS_MISSING (0x01)
+#define MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE (0x02)
+#define MPI_PHYSDISK0_STATUS_FAILED (0x03)
+#define MPI_PHYSDISK0_STATUS_INITIALIZING (0x04)
+#define MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED (0x05)
+#define MPI_PHYSDISK0_STATUS_FAILED_REQUESTED (0x06)
+#define MPI_PHYSDISK0_STATUS_OTHER_OFFLINE (0xFF)
+
+typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 PhysDiskID; /* 04h */
+ U8 PhysDiskBus; /* 05h */
+ U8 PhysDiskIOC; /* 06h */
+ U8 PhysDiskNum; /* 07h */
+ RAID_PHYS_DISK0_SETTINGS PhysDiskSettings; /* 08h */
+ U32 Reserved1; /* 0Ch */
+ U8 ExtDiskIdentifier[8]; /* 10h */
+ U8 DiskIdentifier[16]; /* 18h */
+ RAID_PHYS_DISK0_INQUIRY_DATA InquiryData; /* 28h */
+ RAID_PHYS_DISK0_STATUS PhysDiskStatus; /* 64h */
+ U32 MaxLBA; /* 68h */
+ RAID_PHYS_DISK0_ERROR_DATA ErrorData; /* 6Ch */
+} CONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0,
+ RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t;
+
+#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION (0x02)
+
+
+typedef struct _RAID_PHYS_DISK1_PATH
+{
+ U8 PhysDiskID; /* 00h */
+ U8 PhysDiskBus; /* 01h */
+ U16 Reserved1; /* 02h */
+ U64 WWID; /* 04h */
+ U64 OwnerWWID; /* 0Ch */
+ U8 OwnerIdentifier; /* 14h */
+ U8 Reserved2; /* 15h */
+ U16 Flags; /* 16h */
+} RAID_PHYS_DISK1_PATH, MPI_POINTER PTR_RAID_PHYS_DISK1_PATH,
+ RaidPhysDisk1Path_t, MPI_POINTER pRaidPhysDisk1Path_t;
+
+/* RAID Physical Disk Page 1 Flags field defines */
+#define MPI_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
+#define MPI_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength or NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI_RAID_PHYS_DISK1_PATH_MAX
+#define MPI_RAID_PHYS_DISK1_PATH_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_1
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 NumPhysDiskPaths; /* 04h */
+ U8 PhysDiskNum; /* 05h */
+ U16 Reserved2; /* 06h */
+ U32 Reserved1; /* 08h */
+ RAID_PHYS_DISK1_PATH Path[MPI_RAID_PHYS_DISK1_PATH_MAX];/* 0Ch */
+} CONFIG_PAGE_RAID_PHYS_DISK_1, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_1,
+ RaidPhysDiskPage1_t, MPI_POINTER pRaidPhysDiskPage1_t;
+
+#define MPI_RAIDPHYSDISKPAGE1_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* LAN Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_LAN_0
+{
+ ConfigPageHeader_t Header; /* 00h */
+ U16 TxRxModes; /* 04h */
+ U16 Reserved; /* 06h */
+ U32 PacketPrePad; /* 08h */
+} CONFIG_PAGE_LAN_0, MPI_POINTER PTR_CONFIG_PAGE_LAN_0,
+ LANPage0_t, MPI_POINTER pLANPage0_t;
+
+#define MPI_LAN_PAGE0_PAGEVERSION (0x01)
+
+#define MPI_LAN_PAGE0_RETURN_LOOPBACK (0x0000)
+#define MPI_LAN_PAGE0_SUPPRESS_LOOPBACK (0x0001)
+#define MPI_LAN_PAGE0_LOOPBACK_MASK (0x0001)
+
+typedef struct _CONFIG_PAGE_LAN_1
+{
+ ConfigPageHeader_t Header; /* 00h */
+ U16 Reserved; /* 04h */
+ U8 CurrentDeviceState; /* 06h */
+ U8 Reserved1; /* 07h */
+ U32 MinPacketSize; /* 08h */
+ U32 MaxPacketSize; /* 0Ch */
+ U32 HardwareAddressLow; /* 10h */
+ U32 HardwareAddressHigh; /* 14h */
+ U32 MaxWireSpeedLow; /* 18h */
+ U32 MaxWireSpeedHigh; /* 1Ch */
+ U32 BucketsRemaining; /* 20h */
+ U32 MaxReplySize; /* 24h */
+ U32 NegWireSpeedLow; /* 28h */
+ U32 NegWireSpeedHigh; /* 2Ch */
+} CONFIG_PAGE_LAN_1, MPI_POINTER PTR_CONFIG_PAGE_LAN_1,
+ LANPage1_t, MPI_POINTER pLANPage1_t;
+
+#define MPI_LAN_PAGE1_PAGEVERSION (0x03)
+
+#define MPI_LAN_PAGE1_DEV_STATE_RESET (0x00)
+#define MPI_LAN_PAGE1_DEV_STATE_OPERATIONAL (0x01)
+
+
+/****************************************************************************
+* Inband Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_INBAND_0
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ MPI_VERSION_FORMAT InbandVersion; /* 04h */
+ U16 MaximumBuffers; /* 08h */
+ U16 Reserved1; /* 0Ah */
+} CONFIG_PAGE_INBAND_0, MPI_POINTER PTR_CONFIG_PAGE_INBAND_0,
+ InbandPage0_t, MPI_POINTER pInbandPage0_t;
+
+#define MPI_INBAND_PAGEVERSION (0x00)
+
+
+
+/****************************************************************************
+* SAS IO Unit Config Pages
+****************************************************************************/
+
+typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA
+{
+ U8 Port; /* 00h */
+ U8 PortFlags; /* 01h */
+ U8 PhyFlags; /* 02h */
+ U8 NegotiatedLinkRate; /* 03h */
+ U32 ControllerPhyDeviceInfo;/* 04h */
+ U16 AttachedDeviceHandle; /* 08h */
+ U16 ControllerDevHandle; /* 0Ah */
+ U32 DiscoveryStatus; /* 0Ch */
+} MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA,
+ SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_SAS_IOUNIT0_PHY_MAX
+#define MPI_SAS_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U16 NvdataVersionDefault; /* 08h */
+ U16 NvdataVersionPersistent; /* 0Ah */
+ U8 NumPhys; /* 0Ch */
+ U8 Reserved2; /* 0Dh */
+ U16 Reserved3; /* 0Eh */
+ MPI_SAS_IO_UNIT0_PHY_DATA PhyData[MPI_SAS_IOUNIT0_PHY_MAX]; /* 10h */
+} CONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
+ SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
+
+#define MPI_SASIOUNITPAGE0_PAGEVERSION (0x04)
+
+/* values for SAS IO Unit Page 0 PortFlags */
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS (0x08)
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_0_TARGET_IOC_NUM (0x00)
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_1_TARGET_IOC_NUM (0x04)
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/* values for SAS IO Unit Page 0 PhyFlags */
+#define MPI_SAS_IOUNIT0_PHY_FLAGS_PHY_DISABLED (0x04)
+#define MPI_SAS_IOUNIT0_PHY_FLAGS_TX_INVERT (0x02)
+#define MPI_SAS_IOUNIT0_PHY_FLAGS_RX_INVERT (0x01)
+
+/* values for SAS IO Unit Page 0 NegotiatedLinkRate */
+#define MPI_SAS_IOUNIT0_RATE_UNKNOWN (0x00)
+#define MPI_SAS_IOUNIT0_RATE_PHY_DISABLED (0x01)
+#define MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION (0x02)
+#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI_SAS_IOUNIT0_RATE_1_5 (0x08)
+#define MPI_SAS_IOUNIT0_RATE_3_0 (0x09)
+#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A)
+
+/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+/* values for SAS IO Unit Page 0 DiscoveryStatus */
+#define MPI_SAS_IOUNIT0_DS_LOOP_DETECTED (0x00000001)
+#define MPI_SAS_IOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI_SAS_IOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI_SAS_IOUNIT0_DS_EXPANDER_ERR (0x00000008)
+#define MPI_SAS_IOUNIT0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI_SAS_IOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI_SAS_IOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI_SAS_IOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI_SAS_IOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI_SAS_IOUNIT0_DS_TABLE_LINK (0x00000400)
+#define MPI_SAS_IOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI_SAS_IOUNIT0_DS_MAX_SATA_TARGETS (0x00001000)
+#define MPI_SAS_IOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
+
+
+typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
+{
+ U8 Port; /* 00h */
+ U8 PortFlags; /* 01h */
+ U8 PhyFlags; /* 02h */
+ U8 MaxMinLinkRate; /* 03h */
+ U32 ControllerPhyDeviceInfo; /* 04h */
+ U16 MaxTargetPortConnectTime; /* 08h */
+ U16 Reserved1; /* 0Ah */
+} MPI_SAS_IO_UNIT1_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT1_PHY_DATA,
+ SasIOUnit1PhyData, MPI_POINTER pSasIOUnit1PhyData;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_SAS_IOUNIT1_PHY_MAX
+#define MPI_SAS_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U16 ControlFlags; /* 08h */
+ U16 MaxNumSATATargets; /* 0Ah */
+ U16 AdditionalControlFlags; /* 0Ch */
+ U16 Reserved1; /* 0Eh */
+ U8 NumPhys; /* 10h */
+ U8 SATAMaxQDepth; /* 11h */
+ U8 ReportDeviceMissingDelay; /* 12h */
+ U8 IODeviceMissingDelay; /* 13h */
+ MPI_SAS_IO_UNIT1_PHY_DATA PhyData[MPI_SAS_IOUNIT1_PHY_MAX]; /* 14h */
+} CONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1,
+ SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t;
+
+#define MPI_SASIOUNITPAGE1_PAGEVERSION (0x07)
+
+/* values for SAS IO Unit Page 1 ControlFlags */
+#define MPI_SAS_IOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
+#define MPI_SAS_IOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
+#define MPI_SAS_IOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
+#define MPI_SAS_IOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+#define MPI_SAS_IOUNIT1_CONTROL_DISABLE_SAS_HASH (0x0800)
+
+#define MPI_SAS_IOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
+#define MPI_SAS_IOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
+#define MPI_SAS_IOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x00)
+#define MPI_SAS_IOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x01)
+#define MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x02)
+
+#define MPI_SAS_IOUNIT1_CONTROL_POSTPONE_SATA_INIT (0x0100)
+#define MPI_SAS_IOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI_SAS_IOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI_SAS_IOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI_SAS_IOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI_SAS_IOUNIT1_CONTROL_PHY_ENABLE_ORDER_HIGH (0x0008)
+#define MPI_SAS_IOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI_SAS_IOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI_SAS_IOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
+
+/* values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI_SAS_IOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI_SAS_IOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI_SAS_IOUNIT1_ACONTROL_HIDE_NONZERO_ATTACHED_PHY_IDENT (0x0020)
+#define MPI_SAS_IOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI_SAS_IOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI_SAS_IOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI_SAS_IOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI_SAS_IOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+
+/* defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
+#define MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
+#define MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
+
+/* values for SAS IO Unit Page 1 PortFlags */
+#define MPI_SAS_IOUNIT1_PORT_FLAGS_0_TARGET_IOC_NUM (0x00)
+#define MPI_SAS_IOUNIT1_PORT_FLAGS_1_TARGET_IOC_NUM (0x04)
+#define MPI_SAS_IOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/* values for SAS IO Unit Page 0 PhyFlags */
+#define MPI_SAS_IOUNIT1_PHY_FLAGS_PHY_DISABLE (0x04)
+#define MPI_SAS_IOUNIT1_PHY_FLAGS_TX_INVERT (0x02)
+#define MPI_SAS_IOUNIT1_PHY_FLAGS_RX_INVERT (0x01)
+
+/* values for SAS IO Unit Page 0 MaxMinLinkRate */
+#define MPI_SAS_IOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI_SAS_IOUNIT1_MAX_RATE_1_5 (0x80)
+#define MPI_SAS_IOUNIT1_MAX_RATE_3_0 (0x90)
+#define MPI_SAS_IOUNIT1_MIN_RATE_MASK (0x0F)
+#define MPI_SAS_IOUNIT1_MIN_RATE_1_5 (0x08)
+#define MPI_SAS_IOUNIT1_MIN_RATE_3_0 (0x09)
+
+/* see mpi_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U8 NumDevsPerEnclosure; /* 08h */
+ U8 Reserved1; /* 09h */
+ U16 Reserved2; /* 0Ah */
+ U16 MaxPersistentIDs; /* 0Ch */
+ U16 NumPersistentIDsUsed; /* 0Eh */
+ U8 Status; /* 10h */
+ U8 Flags; /* 11h */
+ U16 MaxNumPhysicalMappedIDs;/* 12h */
+} CONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
+ SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
+
+#define MPI_SASIOUNITPAGE2_PAGEVERSION (0x06)
+
+/* values for SAS IO Unit Page 2 Status field */
+#define MPI_SAS_IOUNIT2_STATUS_DEVICE_LIMIT_EXCEEDED (0x08)
+#define MPI_SAS_IOUNIT2_STATUS_ENCLOSURE_DEVICES_UNMAPPED (0x04)
+#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
+#define MPI_SAS_IOUNIT2_STATUS_FULL_PERSISTENT_MAPPINGS (0x01)
+
+/* values for SAS IO Unit Page 2 Flags field */
+#define MPI_SAS_IOUNIT2_FLAGS_DISABLE_PERSISTENT_MAPPINGS (0x01)
+/* Physical Mapping Modes */
+#define MPI_SAS_IOUNIT2_FLAGS_MASK_PHYS_MAP_MODE (0x0E)
+#define MPI_SAS_IOUNIT2_FLAGS_SHIFT_PHYS_MAP_MODE (1)
+#define MPI_SAS_IOUNIT2_FLAGS_NO_PHYS_MAP (0x00)
+#define MPI_SAS_IOUNIT2_FLAGS_DIRECT_ATTACH_PHYS_MAP (0x01)
+#define MPI_SAS_IOUNIT2_FLAGS_ENCLOSURE_SLOT_PHYS_MAP (0x02)
+#define MPI_SAS_IOUNIT2_FLAGS_HOST_ASSIGNED_PHYS_MAP (0x07)
+
+#define MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT (0x10)
+#define MPI_SAS_IOUNIT2_FLAGS_DA_STARTING_SLOT (0x20)
+
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 08h */
+ U32 MaxInvalidDwordCount; /* 0Ch */
+ U32 InvalidDwordCountTime; /* 10h */
+ U32 MaxRunningDisparityErrorCount; /* 14h */
+ U32 RunningDisparityErrorTime; /* 18h */
+ U32 MaxLossDwordSynchCount; /* 1Ch */
+ U32 LossDwordSynchCountTime; /* 20h */
+ U32 MaxPhyResetProblemCount; /* 24h */
+ U32 PhyResetProblemTime; /* 28h */
+} CONFIG_PAGE_SAS_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_3,
+ SasIOUnitPage3_t, MPI_POINTER pSasIOUnitPage3_t;
+
+#define MPI_SASIOUNITPAGE3_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Expander Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SAS_EXPANDER_0
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U8 PhysicalPort; /* 08h */
+ U8 Reserved1; /* 09h */
+ U16 EnclosureHandle; /* 0Ah */
+ U64 SASAddress; /* 0Ch */
+ U32 DiscoveryStatus; /* 14h */
+ U16 DevHandle; /* 18h */
+ U16 ParentDevHandle; /* 1Ah */
+ U16 ExpanderChangeCount; /* 1Ch */
+ U16 ExpanderRouteIndexes; /* 1Eh */
+ U8 NumPhys; /* 20h */
+ U8 SASLevel; /* 21h */
+ U8 Flags; /* 22h */
+ U8 Reserved3; /* 23h */
+} CONFIG_PAGE_SAS_EXPANDER_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_EXPANDER_0,
+ SasExpanderPage0_t, MPI_POINTER pSasExpanderPage0_t;
+
+#define MPI_SASEXPANDER0_PAGEVERSION (0x03)
+
+/* values for SAS Expander Page 0 DiscoveryStatus field */
+#define MPI_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
+#define MPI_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI_SAS_EXPANDER0_DS_EXPANDER_ERR (0x00000008)
+#define MPI_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
+#define MPI_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
+
+/* values for SAS Expander Page 0 Flags field */
+#define MPI_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x04)
+#define MPI_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x02)
+#define MPI_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x01)
+
+
+typedef struct _CONFIG_PAGE_SAS_EXPANDER_1
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U8 PhysicalPort; /* 08h */
+ U8 Reserved1; /* 09h */
+ U16 Reserved2; /* 0Ah */
+ U8 NumPhys; /* 0Ch */
+ U8 Phy; /* 0Dh */
+ U16 NumTableEntriesProgrammed; /* 0Eh */
+ U8 ProgrammedLinkRate; /* 10h */
+ U8 HwLinkRate; /* 11h */
+ U16 AttachedDevHandle; /* 12h */
+ U32 PhyInfo; /* 14h */
+ U32 AttachedDeviceInfo; /* 18h */
+ U16 OwnerDevHandle; /* 1Ch */
+ U8 ChangeCount; /* 1Eh */
+ U8 NegotiatedLinkRate; /* 1Fh */
+ U8 PhyIdentifier; /* 20h */
+ U8 AttachedPhyIdentifier; /* 21h */
+ U8 Reserved3; /* 22h */
+ U8 DiscoveryInfo; /* 23h */
+ U32 Reserved4; /* 24h */
+} CONFIG_PAGE_SAS_EXPANDER_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_EXPANDER_1,
+ SasExpanderPage1_t, MPI_POINTER pSasExpanderPage1_t;
+
+#define MPI_SASEXPANDER1_PAGEVERSION (0x01)
+
+/* use MPI_SAS_PHY0_PRATE_ defines for ProgrammedLinkRate */
+
+/* use MPI_SAS_PHY0_HWRATE_ defines for HwLinkRate */
+
+/* use MPI_SAS_PHY0_PHYINFO_ defines for PhyInfo */
+
+/* see mpi_sas.h for values for SAS Expander Page 1 AttachedDeviceInfo values */
+
+/* values for SAS Expander Page 1 DiscoveryInfo field */
+#define MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+
+/* values for SAS Expander Page 1 NegotiatedLinkRate field */
+#define MPI_SAS_EXPANDER1_NEG_RATE_UNKNOWN (0x00)
+#define MPI_SAS_EXPANDER1_NEG_RATE_PHY_DISABLED (0x01)
+#define MPI_SAS_EXPANDER1_NEG_RATE_FAILED_NEGOTIATION (0x02)
+#define MPI_SAS_EXPANDER1_NEG_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI_SAS_EXPANDER1_NEG_RATE_1_5 (0x08)
+#define MPI_SAS_EXPANDER1_NEG_RATE_3_0 (0x09)
+
+
+/****************************************************************************
+* SAS Device Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SAS_DEVICE_0
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U16 Slot; /* 08h */
+ U16 EnclosureHandle; /* 0Ah */
+ U64 SASAddress; /* 0Ch */
+ U16 ParentDevHandle; /* 14h */
+ U8 PhyNum; /* 16h */
+ U8 AccessStatus; /* 17h */
+ U16 DevHandle; /* 18h */
+ U8 TargetID; /* 1Ah */
+ U8 Bus; /* 1Bh */
+ U32 DeviceInfo; /* 1Ch */
+ U16 Flags; /* 20h */
+ U8 PhysicalPort; /* 22h */
+ U8 Reserved2; /* 23h */
+} CONFIG_PAGE_SAS_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_0,
+ SasDevicePage0_t, MPI_POINTER pSasDevicePage0_t;
+
+#define MPI_SASDEVICE0_PAGEVERSION (0x05)
+
+/* values for SAS Device Page 0 AccessStatus field */
+#define MPI_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
+#define MPI_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
+#define MPI_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
+#define MPI_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
+/* specific values for SATA Init failures */
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
+#define MPI_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
+
+/* values for SAS Device Page 0 Flags field */
+#define MPI_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
+#define MPI_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
+#define MPI_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
+#define MPI_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
+#define MPI_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
+#define MPI_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT (0x0004)
+#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED (0x0002)
+#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+
+/* see mpi_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+
+typedef struct _CONFIG_PAGE_SAS_DEVICE_1
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 08h */
+ U64 SASAddress; /* 0Ch */
+ U32 Reserved2; /* 14h */
+ U16 DevHandle; /* 18h */
+ U8 TargetID; /* 1Ah */
+ U8 Bus; /* 1Bh */
+ U8 InitialRegDeviceFIS[20];/* 1Ch */
+} CONFIG_PAGE_SAS_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_1,
+ SasDevicePage1_t, MPI_POINTER pSasDevicePage1_t;
+
+#define MPI_SASDEVICE1_PAGEVERSION (0x00)
+
+
+typedef struct _CONFIG_PAGE_SAS_DEVICE_2
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U64 PhysicalIdentifier; /* 08h */
+ U32 EnclosureMapping; /* 10h */
+} CONFIG_PAGE_SAS_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_2,
+ SasDevicePage2_t, MPI_POINTER pSasDevicePage2_t;
+
+#define MPI_SASDEVICE2_PAGEVERSION (0x01)
+
+/* defines for SAS Device Page 2 EnclosureMapping field */
+#define MPI_SASDEVICE2_ENC_MAP_MASK_MISSING_COUNT (0x0000000F)
+#define MPI_SASDEVICE2_ENC_MAP_SHIFT_MISSING_COUNT (0)
+#define MPI_SASDEVICE2_ENC_MAP_MASK_NUM_SLOTS (0x000007F0)
+#define MPI_SASDEVICE2_ENC_MAP_SHIFT_NUM_SLOTS (4)
+#define MPI_SASDEVICE2_ENC_MAP_MASK_START_INDEX (0x001FF800)
+#define MPI_SASDEVICE2_ENC_MAP_SHIFT_START_INDEX (11)
+
+
+/****************************************************************************
+* SAS PHY Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SAS_PHY_0
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U16 OwnerDevHandle; /* 08h */
+ U16 Reserved1; /* 0Ah */
+ U64 SASAddress; /* 0Ch */
+ U16 AttachedDevHandle; /* 14h */
+ U8 AttachedPhyIdentifier; /* 16h */
+ U8 Reserved2; /* 17h */
+ U32 AttachedDeviceInfo; /* 18h */
+ U8 ProgrammedLinkRate; /* 1Ch */
+ U8 HwLinkRate; /* 1Dh */
+ U8 ChangeCount; /* 1Eh */
+ U8 Flags; /* 1Fh */
+ U32 PhyInfo; /* 20h */
+} CONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
+ SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
+
+#define MPI_SASPHY0_PAGEVERSION (0x01)
+
+/* values for SAS PHY Page 0 ProgrammedLinkRate field */
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_3_0 (0x09)
+
+/* values for SAS PHY Page 0 HwLinkRate field */
+#define MPI_SAS_PHY0_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI_SAS_PHY0_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0 (0x09)
+
+/* values for SAS PHY Page 0 Flags field */
+#define MPI_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+
+/* values for SAS PHY Page 0 PhyInfo field */
+#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR (0x00002000)
+#define MPI_SAS_PHY0_PHYINFO_VIRTUAL_PHY (0x00001000)
+
+#define MPI_SAS_PHY0_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
+#define MPI_SAS_PHY0_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
+
+#define MPI_SAS_PHY0_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
+#define MPI_SAS_PHY0_PHYINFO_DIRECT_ROUTING (0x00000000)
+#define MPI_SAS_PHY0_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
+#define MPI_SAS_PHY0_PHYINFO_TABLE_ROUTING (0x00000020)
+
+#define MPI_SAS_PHY0_PHYINFO_MASK_LINK_RATE (0x0000000F)
+#define MPI_SAS_PHY0_PHYINFO_UNKNOWN_LINK_RATE (0x00000000)
+#define MPI_SAS_PHY0_PHYINFO_PHY_DISABLED (0x00000001)
+#define MPI_SAS_PHY0_PHYINFO_NEGOTIATION_FAILED (0x00000002)
+#define MPI_SAS_PHY0_PHYINFO_SATA_OOB_COMPLETE (0x00000003)
+#define MPI_SAS_PHY0_PHYINFO_RATE_1_5 (0x00000008)
+#define MPI_SAS_PHY0_PHYINFO_RATE_3_0 (0x00000009)
+
+
+typedef struct _CONFIG_PAGE_SAS_PHY_1
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 08h */
+ U32 InvalidDwordCount; /* 0Ch */
+ U32 RunningDisparityErrorCount; /* 10h */
+ U32 LossDwordSynchCount; /* 14h */
+ U32 PhyResetProblemCount; /* 18h */
+} CONFIG_PAGE_SAS_PHY_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_1,
+ SasPhyPage1_t, MPI_POINTER pSasPhyPage1_t;
+
+#define MPI_SASPHY1_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Enclosure Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SAS_ENCLOSURE_0
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 08h */
+ U64 EnclosureLogicalID; /* 0Ch */
+ U16 Flags; /* 14h */
+ U16 EnclosureHandle; /* 16h */
+ U16 NumSlots; /* 18h */
+ U16 StartSlot; /* 1Ah */
+ U8 StartTargetID; /* 1Ch */
+ U8 StartBus; /* 1Dh */
+ U8 SEPTargetID; /* 1Eh */
+ U8 SEPBus; /* 1Fh */
+ U32 Reserved2; /* 20h */
+ U32 Reserved3; /* 24h */
+} CONFIG_PAGE_SAS_ENCLOSURE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ SasEnclosurePage0_t, MPI_POINTER pSasEnclosurePage0_t;
+
+#define MPI_SASENCLOSURE0_PAGEVERSION (0x01)
+
+/* values for SAS Enclosure Page 0 Flags field */
+#define MPI_SAS_ENCLS0_FLAGS_SEP_BUS_ID_VALID (0x0020)
+#define MPI_SAS_ENCLS0_FLAGS_START_BUS_ID_VALID (0x0010)
+
+#define MPI_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+
+
+/****************************************************************************
+* Log Config Pages
+****************************************************************************/
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check NumLogEntries at runtime.
+ */
+#ifndef MPI_LOG_0_NUM_LOG_ENTRIES
+#define MPI_LOG_0_NUM_LOG_ENTRIES (1)
+#endif
+
+#define MPI_LOG_0_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI_LOG_0_ENTRY
+{
+ U32 TimeStamp; /* 00h */
+ U32 Reserved1; /* 04h */
+ U16 LogSequence; /* 08h */
+ U16 LogEntryQualifier; /* 0Ah */
+ U8 LogData[MPI_LOG_0_LOG_DATA_LENGTH]; /* 0Ch */
+} MPI_LOG_0_ENTRY, MPI_POINTER PTR_MPI_LOG_0_ENTRY,
+ MpiLog0Entry_t, MPI_POINTER pMpiLog0Entry_t;
+
+/* values for Log Page 0 LogEntry LogEntryQualifier field */
+#define MPI_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
+#define MPI_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
+
+typedef struct _CONFIG_PAGE_LOG_0
+{
+ CONFIG_EXTENDED_PAGE_HEADER Header; /* 00h */
+ U32 Reserved1; /* 08h */
+ U32 Reserved2; /* 0Ch */
+ U16 NumLogEntries; /* 10h */
+ U16 Reserved3; /* 12h */
+ MPI_LOG_0_ENTRY LogEntry[MPI_LOG_0_NUM_LOG_ENTRIES]; /* 14h */
+} CONFIG_PAGE_LOG_0, MPI_POINTER PTR_CONFIG_PAGE_LOG_0,
+ LogPage0_t, MPI_POINTER pLogPage0_t;
+
+#define MPI_LOG_0_PAGEVERSION (0x01)
+
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h
new file mode 100644
index 00000000..7d663ce7
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_fc.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_fc.h
+ * Title: MPI Fibre Channel messages and structures
+ * Creation Date: June 12, 2000
+ *
+ * mpi_fc.h Version: 01.05.01
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-12-00 01.00.02 Added _MSG_FC_ABORT_REPLY structure.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 12-04-00 01.01.02 Added messages for Common Transport Send and
+ * Primitive Send.
+ * 01-09-01 01.01.03 Modifed some of the new flags to have an MPI prefix
+ * and modified the FcPrimitiveSend flags.
+ * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
+ * field.
+ * Added FC_ABORT_TYPE_CT_SEND_REQUEST and
+ * FC_ABORT_TYPE_EXLINKSEND_REQUEST for FcAbort request.
+ * Added MPI_FC_PRIM_SEND_FLAGS_STOP_SEND.
+ * 02-20-01 01.01.05 Started using MPI_POINTER.
+ * 03-27-01 01.01.06 Added Flags field to MSG_LINK_SERVICE_BUFFER_POST_REPLY
+ * and defined MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED.
+ * Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define.
+ * Added structure offset comments.
+ * 04-09-01 01.01.07 Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 09-28-01 01.02.02 Change name of reserved field in
+ * MSG_LINK_SERVICE_RSP_REPLY.
+ * 05-31-02 01.02.03 Adding AliasIndex to FC Direct Access requests.
+ * 01-16-04 01.02.04 Added define for MPI_FC_PRIM_SEND_FLAGS_ML_RESET_LINK.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_FC_H
+#define MPI_FC_H
+
+
+/*****************************************************************************
+*
+* F C D i r e c t A c c e s s M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Link Service Buffer Post messages */
+/****************************************************************************/
+
+typedef struct _MSG_LINK_SERVICE_BUFFER_POST_REQUEST
+{
+ U8 BufferPostFlags; /* 00h */
+ U8 BufferCount; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved; /* 04h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ SGE_TRANS_SIMPLE_UNION SGL;
+} MSG_LINK_SERVICE_BUFFER_POST_REQUEST,
+ MPI_POINTER PTR_MSG_LINK_SERVICE_BUFFER_POST_REQUEST,
+ LinkServiceBufferPostRequest_t, MPI_POINTER pLinkServiceBufferPostRequest_t;
+
+#define LINK_SERVICE_BUFFER_POST_FLAGS_PORT_MASK (0x01)
+
+typedef struct _WWNFORMAT
+{
+ U32 PortNameHigh; /* 00h */
+ U32 PortNameLow; /* 04h */
+ U32 NodeNameHigh; /* 08h */
+ U32 NodeNameLow; /* 0Ch */
+} WWNFORMAT,
+ WwnFormat_t;
+
+/* Link Service Buffer Post Reply */
+typedef struct _MSG_LINK_SERVICE_BUFFER_POST_REPLY
+{
+ U8 Flags; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 TransferLength; /* 14h */
+ U32 TransactionContext; /* 18h */
+ U32 Rctl_Did; /* 1Ch */
+ U32 Csctl_Sid; /* 20h */
+ U32 Type_Fctl; /* 24h */
+ U16 SeqCnt; /* 28h */
+ U8 Dfctl; /* 2Ah */
+ U8 SeqId; /* 2Bh */
+ U16 Rxid; /* 2Ch */
+ U16 Oxid; /* 2Eh */
+ U32 Parameter; /* 30h */
+ WWNFORMAT Wwn; /* 34h */
+} MSG_LINK_SERVICE_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY,
+ LinkServiceBufferPostReply_t, MPI_POINTER pLinkServiceBufferPostReply_t;
+
+#define MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED (0x80)
+
+#define MPI_FC_DID_MASK (0x00FFFFFF)
+#define MPI_FC_DID_SHIFT (0)
+#define MPI_FC_RCTL_MASK (0xFF000000)
+#define MPI_FC_RCTL_SHIFT (24)
+#define MPI_FC_SID_MASK (0x00FFFFFF)
+#define MPI_FC_SID_SHIFT (0)
+#define MPI_FC_CSCTL_MASK (0xFF000000)
+#define MPI_FC_CSCTL_SHIFT (24)
+#define MPI_FC_FCTL_MASK (0x00FFFFFF)
+#define MPI_FC_FCTL_SHIFT (0)
+#define MPI_FC_TYPE_MASK (0xFF000000)
+#define MPI_FC_TYPE_SHIFT (24)
+
+/* obsolete name for the above */
+#define FCP_TARGET_DID_MASK (0x00FFFFFF)
+#define FCP_TARGET_DID_SHIFT (0)
+#define FCP_TARGET_RCTL_MASK (0xFF000000)
+#define FCP_TARGET_RCTL_SHIFT (24)
+#define FCP_TARGET_SID_MASK (0x00FFFFFF)
+#define FCP_TARGET_SID_SHIFT (0)
+#define FCP_TARGET_CSCTL_MASK (0xFF000000)
+#define FCP_TARGET_CSCTL_SHIFT (24)
+#define FCP_TARGET_FCTL_MASK (0x00FFFFFF)
+#define FCP_TARGET_FCTL_SHIFT (0)
+#define FCP_TARGET_TYPE_MASK (0xFF000000)
+#define FCP_TARGET_TYPE_SHIFT (24)
+
+
+/****************************************************************************/
+/* Link Service Response messages */
+/****************************************************************************/
+
+typedef struct _MSG_LINK_SERVICE_RSP_REQUEST
+{
+ U8 RspFlags; /* 00h */
+ U8 RspLength; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Rctl_Did; /* 0Ch */
+ U32 Csctl_Sid; /* 10h */
+ U32 Type_Fctl; /* 14h */
+ U16 SeqCnt; /* 18h */
+ U8 Dfctl; /* 1Ah */
+ U8 SeqId; /* 1Bh */
+ U16 Rxid; /* 1Ch */
+ U16 Oxid; /* 1Eh */
+ U32 Parameter; /* 20h */
+ SGE_SIMPLE_UNION SGL; /* 24h */
+} MSG_LINK_SERVICE_RSP_REQUEST, MPI_POINTER PTR_MSG_LINK_SERVICE_RSP_REQUEST,
+ LinkServiceRspRequest_t, MPI_POINTER pLinkServiceRspRequest_t;
+
+#define LINK_SERVICE_RSP_FLAGS_IMMEDIATE (0x80)
+#define LINK_SERVICE_RSP_FLAGS_PORT_MASK (0x01)
+
+
+/* Link Service Response Reply */
+typedef struct _MSG_LINK_SERVICE_RSP_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved_0100_InitiatorIndex; /* 06h */ /* obsolete InitiatorIndex */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 InitiatorIndex; /* 14h */
+} MSG_LINK_SERVICE_RSP_REPLY, MPI_POINTER PTR_MSG_LINK_SERVICE_RSP_REPLY,
+ LinkServiceRspReply_t, MPI_POINTER pLinkServiceRspReply_t;
+
+
+/****************************************************************************/
+/* Extended Link Service Send messages */
+/****************************************************************************/
+
+typedef struct _MSG_EXLINK_SERVICE_SEND_REQUEST
+{
+ U8 SendFlags; /* 00h */
+ U8 AliasIndex; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U32 MsgFlags_Did; /* 04h */
+ U32 MsgContext; /* 08h */
+ U32 ElsCommandCode; /* 0Ch */
+ SGE_SIMPLE_UNION SGL; /* 10h */
+} MSG_EXLINK_SERVICE_SEND_REQUEST, MPI_POINTER PTR_MSG_EXLINK_SERVICE_SEND_REQUEST,
+ ExLinkServiceSendRequest_t, MPI_POINTER pExLinkServiceSendRequest_t;
+
+#define EX_LINK_SERVICE_SEND_DID_MASK (0x00FFFFFF)
+#define EX_LINK_SERVICE_SEND_DID_SHIFT (0)
+#define EX_LINK_SERVICE_SEND_MSGFLAGS_MASK (0xFF000000)
+#define EX_LINK_SERVICE_SEND_MSGFLAGS_SHIFT (24)
+
+
+/* Extended Link Service Send Reply */
+typedef struct _MSG_EXLINK_SERVICE_SEND_REPLY
+{
+ U8 Reserved; /* 00h */
+ U8 AliasIndex; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 ResponseLength; /* 14h */
+} MSG_EXLINK_SERVICE_SEND_REPLY, MPI_POINTER PTR_MSG_EXLINK_SERVICE_SEND_REPLY,
+ ExLinkServiceSendReply_t, MPI_POINTER pExLinkServiceSendReply_t;
+
+/****************************************************************************/
+/* FC Abort messages */
+/****************************************************************************/
+
+typedef struct _MSG_FC_ABORT_REQUEST
+{
+ U8 AbortFlags; /* 00h */
+ U8 AbortType; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 TransactionContextToAbort; /* 0Ch */
+} MSG_FC_ABORT_REQUEST, MPI_POINTER PTR_MSG_FC_ABORT_REQUEST,
+ FcAbortRequest_t, MPI_POINTER pFcAbortRequest_t;
+
+#define FC_ABORT_FLAG_PORT_MASK (0x01)
+
+#define FC_ABORT_TYPE_ALL_FC_BUFFERS (0x00)
+#define FC_ABORT_TYPE_EXACT_FC_BUFFER (0x01)
+#define FC_ABORT_TYPE_CT_SEND_REQUEST (0x02)
+#define FC_ABORT_TYPE_EXLINKSEND_REQUEST (0x03)
+
+/* FC Abort Reply */
+typedef struct _MSG_FC_ABORT_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_FC_ABORT_REPLY, MPI_POINTER PTR_MSG_FC_ABORT_REPLY,
+ FcAbortReply_t, MPI_POINTER pFcAbortReply_t;
+
+
+/****************************************************************************/
+/* FC Common Transport Send messages */
+/****************************************************************************/
+
+typedef struct _MSG_FC_COMMON_TRANSPORT_SEND_REQUEST
+{
+ U8 SendFlags; /* 00h */
+ U8 AliasIndex; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U32 MsgFlags_Did; /* 04h */
+ U32 MsgContext; /* 08h */
+ U16 CTCommandCode; /* 0Ch */
+ U8 FsType; /* 0Eh */
+ U8 Reserved1; /* 0Fh */
+ SGE_SIMPLE_UNION SGL; /* 10h */
+} MSG_FC_COMMON_TRANSPORT_SEND_REQUEST,
+ MPI_POINTER PTR_MSG_FC_COMMON_TRANSPORT_SEND_REQUEST,
+ FcCommonTransportSendRequest_t, MPI_POINTER pFcCommonTransportSendRequest_t;
+
+#define MPI_FC_CT_SEND_DID_MASK (0x00FFFFFF)
+#define MPI_FC_CT_SEND_DID_SHIFT (0)
+#define MPI_FC_CT_SEND_MSGFLAGS_MASK (0xFF000000)
+#define MPI_FC_CT_SEND_MSGFLAGS_SHIFT (24)
+
+
+/* FC Common Transport Send Reply */
+typedef struct _MSG_FC_COMMON_TRANSPORT_SEND_REPLY
+{
+ U8 Reserved; /* 00h */
+ U8 AliasIndex; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 ResponseLength; /* 14h */
+} MSG_FC_COMMON_TRANSPORT_SEND_REPLY, MPI_POINTER PTR_MSG_FC_COMMON_TRANSPORT_SEND_REPLY,
+ FcCommonTransportSendReply_t, MPI_POINTER pFcCommonTransportSendReply_t;
+
+
+/****************************************************************************/
+/* FC Primitive Send messages */
+/****************************************************************************/
+
+typedef struct _MSG_FC_PRIMITIVE_SEND_REQUEST
+{
+ U8 SendFlags; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 FcPrimitive[4]; /* 0Ch */
+} MSG_FC_PRIMITIVE_SEND_REQUEST, MPI_POINTER PTR_MSG_FC_PRIMITIVE_SEND_REQUEST,
+ FcPrimitiveSendRequest_t, MPI_POINTER pFcPrimitiveSendRequest_t;
+
+#define MPI_FC_PRIM_SEND_FLAGS_PORT_MASK (0x01)
+#define MPI_FC_PRIM_SEND_FLAGS_ML_RESET_LINK (0x02)
+#define MPI_FC_PRIM_SEND_FLAGS_RESET_LINK (0x04)
+#define MPI_FC_PRIM_SEND_FLAGS_STOP_SEND (0x08)
+#define MPI_FC_PRIM_SEND_FLAGS_SEND_ONCE (0x10)
+#define MPI_FC_PRIM_SEND_FLAGS_SEND_AROUND (0x20)
+#define MPI_FC_PRIM_SEND_FLAGS_UNTIL_FULL (0x40)
+#define MPI_FC_PRIM_SEND_FLAGS_FOREVER (0x80)
+
+/* FC Primitive Send Reply */
+typedef struct _MSG_FC_PRIMITIVE_SEND_REPLY
+{
+ U8 SendFlags; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_FC_PRIMITIVE_SEND_REPLY, MPI_POINTER PTR_MSG_FC_PRIMITIVE_SEND_REPLY,
+ FcPrimitiveSendReply_t, MPI_POINTER pFcPrimitiveSendReply_t;
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
new file mode 100644
index 00000000..fa9249b4
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -0,0 +1,868 @@
+
+ ==============================
+ MPI Header File Change History
+ ==============================
+
+ Copyright (c) 2000-2008 LSI Corporation.
+
+ ---------------------------------------
+ Header Set Release Version: 01.05.19
+ Header Set Release Date: 03-28-08
+ ---------------------------------------
+
+ Filename Current version Prior version
+ ---------- --------------- -------------
+ mpi.h 01.05.16 01.05.15
+ mpi_ioc.h 01.05.16 01.05.15
+ mpi_cnfg.h 01.05.18 01.05.17
+ mpi_init.h 01.05.09 01.05.09
+ mpi_targ.h 01.05.06 01.05.06
+ mpi_fc.h 01.05.01 01.05.01
+ mpi_lan.h 01.05.01 01.05.01
+ mpi_raid.h 01.05.05 01.05.05
+ mpi_tool.h 01.05.03 01.05.03
+ mpi_inb.h 01.05.01 01.05.01
+ mpi_sas.h 01.05.05 01.05.05
+ mpi_type.h 01.05.02 01.05.02
+ mpi_history.txt 01.05.19 01.05.18
+
+
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+
+mpi.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH definition.
+ * 06-06-00 01.00.01 Update MPI_VERSION_MAJOR and MPI_VERSION_MINOR.
+ * 06-22-00 01.00.02 Added MPI_IOCSTATUS_LAN_ definitions.
+ * Removed LAN_SUSPEND function definition.
+ * Added MPI_MSGFLAGS_CONTINUATION_REPLY definition.
+ * 06-30-00 01.00.03 Added MPI_CONTEXT_REPLY_TYPE_LAN definition.
+ * Added MPI_GET/SET_CONTEXT_REPLY_TYPE macros.
+ * 07-27-00 01.00.04 Added MPI_FAULT_ definitions.
+ * Removed MPI_IOCSTATUS_MSG/DATA_XFER_ERROR definitions.
+ * Added MPI_IOCSTATUS_INTERNAL_ERROR definition.
+ * Added MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 12-04-00 01.01.02 Added new function codes.
+ * 01-09-01 01.01.03 Added more definitions to the system interface section
+ * Added MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT.
+ * 01-25-01 01.01.04 Changed MPI_VERSION_MINOR from 0x00 to 0x01.
+ * 02-20-01 01.01.05 Started using MPI_POINTER.
+ * Added defines for MPI_DIAG_PREVENT_IOC_BOOT and
+ * MPI_DIAG_CLEAR_FLASH_BAD_SIG.
+ * Obsoleted MPI_IOCSTATUS_TARGET_FC_ defines.
+ * 02-27-01 01.01.06 Removed MPI_HOST_INDEX_REGISTER define.
+ * Added function codes for RAID.
+ * 04-09-01 01.01.07 Added alternate define for MPI_DOORBELL_ACTIVE,
+ * MPI_DOORBELL_USED, to better match the spec.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * Changed MPI_VERSION_MINOR from 0x01 to 0x02.
+ * Added define MPI_FUNCTION_TOOLBOX.
+ * 09-28-01 01.02.02 New function code MPI_SCSI_ENCLOSURE_PROCESSOR.
+ * 11-01-01 01.02.03 Changed name to MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR.
+ * 03-14-02 01.02.04 Added MPI_HEADER_VERSION_ defines.
+ * 05-31-02 01.02.05 Bumped MPI_HEADER_VERSION_UNIT.
+ * 07-12-02 01.02.06 Added define for MPI_FUNCTION_MAILBOX.
+ * 09-16-02 01.02.07 Bumped value for MPI_HEADER_VERSION_UNIT.
+ * 11-15-02 01.02.08 Added define MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX and
+ * obsoleted define MPI_IOCSTATUS_TARGET_INVALID_IOCINDEX.
+ * 04-01-03 01.02.09 New IOCStatus code: MPI_IOCSTATUS_FC_EXCHANGE_CANCELED
+ * 06-26-03 01.02.10 Bumped MPI_HEADER_VERSION_UNIT value.
+ * 01-16-04 01.02.11 Added define for MPI_IOCLOGINFO_TYPE_SHIFT.
+ * 04-29-04 01.02.12 Added function codes for MPI_FUNCTION_DIAG_BUFFER_POST
+ * and MPI_FUNCTION_DIAG_RELEASE.
+ * Added MPI_IOCSTATUS_DIAGNOSTIC_RELEASED define.
+ * Bumped MPI_HEADER_VERSION_UNIT value.
+ * 05-11-04 01.03.01 Bumped MPI_VERSION_MINOR for MPI v1.3.
+ * Added codes for Inband.
+ * 08-19-04 01.05.01 Added defines for Host Buffer Access Control doorbell.
+ * Added define for offset of High Priority Request Queue.
+ * Added new function codes and new IOCStatus codes.
+ * Added a IOCLogInfo type of SAS.
+ * 12-07-04 01.05.02 Bumped MPI_HEADER_VERSION_UNIT.
+ * 12-09-04 01.05.03 Bumped MPI_HEADER_VERSION_UNIT.
+ * 01-15-05 01.05.04 Bumped MPI_HEADER_VERSION_UNIT.
+ * 02-09-05 01.05.05 Bumped MPI_HEADER_VERSION_UNIT.
+ * 02-22-05 01.05.06 Bumped MPI_HEADER_VERSION_UNIT.
+ * 03-11-05 01.05.07 Removed function codes for SCSI IO 32 and
+ * TargetAssistExtended requests.
+ * Removed EEDP IOCStatus codes.
+ * 06-24-05 01.05.08 Added function codes for SCSI IO 32 and
+ * TargetAssistExtended requests.
+ * Added EEDP IOCStatus codes.
+ * 08-03-05 01.05.09 Bumped MPI_HEADER_VERSION_UNIT.
+ * 08-30-05 01.05.10 Added 2 new IOCStatus codes for Target.
+ * 03-27-06 01.05.11 Bumped MPI_HEADER_VERSION_UNIT.
+ * 10-11-06 01.05.12 Bumped MPI_HEADER_VERSION_UNIT.
+ * 05-24-07 01.05.13 Bumped MPI_HEADER_VERSION_UNIT.
+ * 08-07-07 01.05.14 Bumped MPI_HEADER_VERSION_UNIT.
+ * 01-15-08 01.05.15 Bumped MPI_HEADER_VERSION_UNIT.
+ * 03-28-08 01.05.16 Bumped MPI_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+
+mpi_ioc.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added _MSG_IOC_INIT_REPLY structure.
+ * 06-06-00 01.00.01 Added CurReplyFrameSize field to _MSG_IOC_FACTS_REPLY.
+ * 06-12-00 01.00.02 Added _MSG_PORT_ENABLE_REPLY structure.
+ * Added _MSG_EVENT_ACK_REPLY structure.
+ * Added _MSG_FW_DOWNLOAD_REPLY structure.
+ * Added _MSG_TOOLBOX_REPLY structure.
+ * 06-30-00 01.00.03 Added MaxLanBuckets to _PORT_FACT_REPLY structure.
+ * 07-27-00 01.00.04 Added _EVENT_DATA structure definitions for _SCSI,
+ * _LINK_STATUS, _LOOP_STATE and _LOGOUT.
+ * 08-11-00 01.00.05 Switched positions of MsgLength and Function fields in
+ * _MSG_EVENT_ACK_REPLY structure to match specification.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * Added a value for Manufacturer to WhoInit
+ * 12-04-00 01.01.02 Modified IOCFacts reply, added FWUpload messages, and
+ * removed toolbox message.
+ * 01-09-01 01.01.03 Added event enabled and disabled defines.
+ * Added structures for FwHeader and DataHeader.
+ * Added ImageType to FwUpload reply.
+ * 02-20-01 01.01.04 Started using MPI_POINTER.
+ * 02-27-01 01.01.05 Added event for RAID status change and its event data.
+ * Added IocNumber field to MSG_IOC_FACTS_REPLY.
+ * 03-27-01 01.01.06 Added defines for ProductId field of MPI_FW_HEADER.
+ * Added structure offset comments.
+ * 04-09-01 01.01.07 Added structure EVENT_DATA_EVENT_CHANGE.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * New format for FWVersion and ProductId in
+ * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
+ * 08-31-01 01.02.02 Added event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
+ * related structure and defines.
+ * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
+ * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
+ * Replaced a reserved field in MSG_IOC_FACTS_REPLY with
+ * IOCExceptions and changed DataImageSize to reserved.
+ * Added MPI_FW_DOWNLOAD_ITYPE_NVSTORE_DATA and
+ * MPI_FW_UPLOAD_ITYPE_NVDATA.
+ * 09-28-01 01.02.03 Modified Event Data for Integrated RAID.
+ * 11-01-01 01.02.04 Added defines for MPI_EXT_IMAGE_HEADER ImageType field.
+ * 03-14-02 01.02.05 Added HeaderVersion field to MSG_IOC_FACTS_REPLY.
+ * 05-31-02 01.02.06 Added define for
+ * MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID.
+ * Added AliasIndex to EVENT_DATA_LOGOUT structure.
+ * 04-01-03 01.02.07 Added defines for MPI_FW_HEADER_SIGNATURE_.
+ * 06-26-03 01.02.08 Added new values to the product family defines.
+ * 04-29-04 01.02.09 Added IOCCapabilities field to MSG_IOC_FACTS_REPLY and
+ * added related defines.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Added four new fields to MSG_IOC_INIT.
+ * Added three new fields to MSG_IOC_FACTS_REPLY.
+ * Defined four new bits for the IOCCapabilities field of
+ * the IOCFacts reply.
+ * Added two new PortTypes for the PortFacts reply.
+ * Added six new events along with their EventData
+ * structures.
+ * Added a new MsgFlag to the FwDownload request to
+ * indicate last segment.
+ * Defined a new image type of boot loader.
+ * Added FW family codes for SAS product families.
+ * 10-05-04 01.05.02 Added ReplyFifoHostSignalingAddr field to
+ * MSG_IOC_FACTS_REPLY.
+ * 12-07-04 01.05.03 Added more defines for SAS Discovery Error event.
+ * 12-09-04 01.05.04 Added Unsupported device to SAS Device event.
+ * 01-15-05 01.05.05 Added event data for SAS SES Event.
+ * 02-09-05 01.05.06 Added MPI_FW_UPLOAD_ITYPE_FW_BACKUP define.
+ * 02-22-05 01.05.07 Added Host Page Buffer Persistent flag to IOC Facts
+ * Reply and IOC Init Request.
+ * 03-11-05 01.05.08 Added family code for 1068E family.
+ * Removed IOCFacts Reply EEDP Capability bit.
+ * 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
+ * Added Max SATA Targets to SAS Discovery Error event.
+ * 08-30-05 01.05.10 Added 4 new events and their event data structures.
+ * Added new ReasonCode value for SAS Device Status Change
+ * event.
+ * Added new family code for FC949E.
+ * 03-27-06 01.05.11 Added MPI_IOCFACTS_CAPABILITY_TLR.
+ * Added additional Reason Codes and more event data fields
+ * to EVENT_DATA_SAS_DEVICE_STATUS_CHANGE.
+ * Added EVENT_DATA_SAS_BROADCAST_PRIMITIVE structure and
+ * new event.
+ * Added MPI_EVENT_SAS_SMP_ERROR and event data structure.
+ * Added MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE and event
+ * data structure.
+ * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
+ * data structure.
+ * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
+ * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
+ * Added MaxInitiators field to PortFacts reply.
+ * Added SAS Device Status Change ReasonCode for
+ * asynchronous notification.
+ * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
+ * data structure.
+ * Added new ImageType values for FWDownload and FWUpload
+ * requests.
+ * 02-28-07 01.05.13 Added MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT for SAS
+ * Broadcast Event Data (replacing _RESERVED2).
+ * For Discovery Error Event Data DiscoveryStatus field,
+ * replaced _MULTPL_PATHS with _UNSUPPORTED_DEVICE and
+ * added _MULTI_PORT_DOMAIN.
+ * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
+ * Added Common Boot Block type to FWUpload Request.
+ * 08-07-07 01.05.15 Added MPI_EVENT_SAS_INIT_RC_REMOVED define.
+ * Added MPI_EVENT_IR2_RC_DUAL_PORT_ADDED and
+ * MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED for IR2 event data.
+ * Added SASAddress field to SAS Initiator Device Table
+ * Overflow event data structure.
+ * 03-28-08 01.05.16 Added two new ReasonCode values to SAS Device Status
+ * Change Event data to indicate completion of internally
+ * generated task management.
+ * Added MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE define.
+ * Added MPI_EVENT_SAS_INIT_RC_INACCESSIBLE define.
+ * --------------------------------------------------------------------------
+
+mpi_cnfg.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-08-00 01.00.02 Added _PAGEVERSION definitions for all pages.
+ * Added FcPhLowestVersion, FcPhHighestVersion, Reserved2
+ * fields to FC_DEVICE_0 page, updated the page version.
+ * Changed _FREE_RUNNING_CLOCK to _PACING_TRANSFERS in
+ * SCSI_PORT_0, SCSI_DEVICE_0 and SCSI_DEVICE_1 pages
+ * and updated the page versions.
+ * Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
+ * page and updated the page version.
+ * Added Information field and _INFO_PARAMS_NEGOTIATED
+ * definitionto SCSI_DEVICE_0 page.
+ * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
+ * page version.
+ * Added BucketsRemaining to LAN_1 page, redefined the
+ * state values, and updated the page version.
+ * Revised bus width definitions in SCSI_PORT_0,
+ * SCSI_DEVICE_0 and SCSI_DEVICE_1 pages.
+ * 06-30-00 01.00.04 Added MaxReplySize to LAN_1 page and updated the page
+ * version.
+ * Moved FC_DEVICE_0 PageAddress description to spec.
+ * 07-27-00 01.00.05 Corrected the SubsystemVendorID and SubsystemID field
+ * widths in IOC_0 page and updated the page version.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * Added Manufacturing pages, IO Unit Page 2, SCSI SPI
+ * Port Page 2, FC Port Page 4, FC Port Page 5
+ * 12-04-00 01.01.03 Config page changes to match MPI rev 1.00.01.
+ * 12-05-00 01.01.04 Modified config page actions.
+ * 01-09-01 01.01.05 Added defines for page address formats.
+ * Data size for Manufacturing pages 2 and 3 no longer
+ * defined here.
+ * Io Unit Page 2 size is fixed at 4 adapters and some
+ * flags were changed.
+ * SCSI Port Page 2 Device Settings modified.
+ * New fields added to FC Port Page 0 and some flags
+ * cleaned up.
+ * Removed impedance flash from FC Port Page 1.
+ * Added FC Port pages 6 and 7.
+ * 01-25-01 01.01.06 Added MaxInitiators field to FcPortPage0.
+ * 01-29-01 01.01.07 Changed some defines to make them 32 character unique.
+ * Added some LinkType defines for FcPortPage0.
+ * 02-20-01 01.01.08 Started using MPI_POINTER.
+ * 02-27-01 01.01.09 Replaced MPI_CONFIG_PAGETYPE_SCSI_LUN with
+ * MPI_CONFIG_PAGETYPE_RAID_VOLUME.
+ * Added definitions and structures for IOC Page 2 and
+ * RAID Volume Page 2.
+ * 03-27-01 01.01.10 Added CONFIG_PAGE_FC_PORT_8 and CONFIG_PAGE_FC_PORT_9.
+ * CONFIG_PAGE_FC_PORT_3 now supports persistent by DID.
+ * Added VendorId and ProductRevLevel fields to
+ * RAIDVOL2_IM_PHYS_ID struct.
+ * Modified values for MPI_FCPORTPAGE0_FLAGS_ATTACH_
+ * defines to make them compatible to MPI version 1.0.
+ * Added structure offset comments.
+ * 04-09-01 01.01.11 Added some new defines for the PageAddress field and
+ * removed some obsolete ones.
+ * Added IO Unit Page 3.
+ * Modified defines for Scsi Port Page 2.
+ * Modified RAID Volume Pages.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * Added SepID and SepBus to RVP2 IMPhysicalDisk struct.
+ * Added defines for the SEP bits in RVP2 VolumeSettings.
+ * Modified the DeviceSettings field in RVP2 to use the
+ * proper structure.
+ * Added defines for SES, SAF-TE, and cross channel for
+ * IOCPage2 CapabilitiesFlags.
+ * Removed define for MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE.
+ * Removed define for
+ * MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE.
+ * Added define for MPI_CONFIG_PAGEATTR_RO_PERSISTENT.
+ * 08-29-01 01.02.02 Fixed value for MPI_MANUFACTPAGE_DEVID_53C1035.
+ * Added defines for MPI_FCPORTPAGE1_FLAGS_HARD_ALPA_ONLY
+ * and MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY.
+ * Removed MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS,
+ * MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS, and
+ * MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS, and
+ * MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED.
+ * Added defines for MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED
+ * and MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED.
+ * Added OnBusTimerValue to CONFIG_PAGE_SCSI_PORT_1.
+ * Added rejected bits to SCSI Device Page 0 Information.
+ * Increased size of ALPA array in FC Port Page 2 by one
+ * and removed a one byte reserved field.
+ * 09-28-01 01.02.03 Swapped NegWireSpeedLow and NegWireSpeedLow in
+ * CONFIG_PAGE_LAN_1 to match preferred 64-bit ordering.
+ * Added structures for Manufacturing Page 4, IO Unit
+ * Page 3, IOC Page 3, IOC Page 4, RAID Volume Page 0, and
+ * RAID PhysDisk Page 0.
+ * 10-04-01 01.02.04 Added define for MPI_CONFIG_PAGETYPE_RAID_PHYSDISK.
+ * Modified some of the new defines to make them 32
+ * character unique.
+ * Modified how variable length pages (arrays) are defined.
+ * Added generic defines for hot spare pools and RAID
+ * volume types.
+ * 11-01-01 01.02.05 Added define for MPI_IOUNITPAGE1_DISABLE_IR.
+ * 03-14-02 01.02.06 Added PCISlotNum field to CONFIG_PAGE_IOC_1 along with
+ * related define, and bumped the page version define.
+ * 05-31-02 01.02.07 Added a Flags field to CONFIG_PAGE_IOC_2_RAID_VOL in a
+ * reserved byte and added a define.
+ * Added define for
+ * MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE.
+ * Added new config page: CONFIG_PAGE_IOC_5.
+ * Added MaxAliases, MaxHardAliases, and NumCurrentAliases
+ * fields to CONFIG_PAGE_FC_PORT_0.
+ * Added AltConnector and NumRequestedAliases fields to
+ * CONFIG_PAGE_FC_PORT_1.
+ * Added new config page: CONFIG_PAGE_FC_PORT_10.
+ * 07-12-02 01.02.08 Added more MPI_MANUFACTPAGE_DEVID_ defines.
+ * Added additional MPI_SCSIDEVPAGE0_NP_ defines.
+ * Added more MPI_SCSIDEVPAGE1_RP_ defines.
+ * Added define for
+ * MPI_SCSIDEVPAGE1_CONF_EXTENDED_PARAMS_ENABLE.
+ * Added new config page: CONFIG_PAGE_SCSI_DEVICE_3.
+ * Modified MPI_FCPORTPAGE5_FLAGS_ defines.
+ * 09-16-02 01.02.09 Added MPI_SCSIDEVPAGE1_CONF_FORCE_PPR_MSG define.
+ * 11-15-02 01.02.10 Added ConnectedID defines for CONFIG_PAGE_SCSI_PORT_0.
+ * Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
+ * Added more Flags defines for CONFIG_PAGE_FC_DEVICE_0.
+ * 04-01-03 01.02.11 Added RR_TOV field and additional Flags defines for
+ * CONFIG_PAGE_FC_PORT_1.
+ * Added define MPI_FCPORTPAGE5_FLAGS_DISABLE to disable
+ * an alias.
+ * Added more device id defines.
+ * 06-26-03 01.02.12 Added MPI_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID define.
+ * Added TargetConfig and IDConfig fields to
+ * CONFIG_PAGE_SCSI_PORT_1.
+ * Added more PortFlags defines for CONFIG_PAGE_SCSI_PORT_2
+ * to control DV.
+ * Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
+ * In CONFIG_PAGE_FC_DEVICE_0, replaced Reserved1 field
+ * with ADISCHardALPA.
+ * Added MPI_FC_DEVICE_PAGE0_PROT_FCP_RETRY define.
+ * 01-16-04 01.02.13 Added InitiatorDeviceTimeout and InitiatorIoPendTimeout
+ * fields and related defines to CONFIG_PAGE_FC_PORT_1.
+ * Added define for
+ * MPI_FCPORTPAGE1_FLAGS_SOFT_ALPA_FALLBACK.
+ * Added new fields to the substructures of
+ * CONFIG_PAGE_FC_PORT_10.
+ * 04-29-04 01.02.14 Added define for IDP bit for CONFIG_PAGE_SCSI_PORT_0,
+ * CONFIG_PAGE_SCSI_DEVICE_0, and
+ * CONFIG_PAGE_SCSI_DEVICE_1. Also bumped Page Version for
+ * these pages.
+ * 05-11-04 01.03.01 Added structure for CONFIG_PAGE_INBAND_0.
+ * 08-19-04 01.05.01 Modified MSG_CONFIG request to support extended config
+ * pages.
+ * Added a new structure for extended config page header.
+ * Added new extended config pages types and structures for
+ * SAS IO Unit, SAS Expander, SAS Device, and SAS PHY.
+ * Replaced a reserved byte in CONFIG_PAGE_MANUFACTURING_4
+ * to add a Flags field.
+ * Two new Manufacturing config pages (5 and 6).
+ * Two new bits defined for IO Unit Page 1 Flags field.
+ * Modified CONFIG_PAGE_IO_UNIT_2 to add three new fields
+ * to specify the BIOS boot device.
+ * Four new Flags bits defined for IO Unit Page 2.
+ * Added IO Unit Page 4.
+ * Added EEDP Flags settings to IOC Page 1.
+ * Added new BIOS Page 1 config page.
+ * 10-05-04 01.05.02 Added define for
+ * MPI_IOCPAGE1_INITIATOR_CONTEXT_REPLY_DISABLE.
+ * Added new Flags field to CONFIG_PAGE_MANUFACTURING_5 and
+ * associated defines.
+ * Added more defines for SAS IO Unit Page 0
+ * DiscoveryStatus field.
+ * Added define for MPI_SAS_IOUNIT0_DS_SUBTRACTIVE_LINK
+ * and MPI_SAS_IOUNIT0_DS_TABLE_LINK.
+ * Added defines for Physical Mapping Modes to SAS IO Unit
+ * Page 2.
+ * Added define for
+ * MPI_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH.
+ * 10-27-04 01.05.03 Added defines for new SAS PHY page addressing mode.
+ * Added defines for MaxTargetSpinUp to BIOS Page 1.
+ * Added 5 new ControlFlags defines for SAS IO Unit
+ * Page 1.
+ * Added MaxNumPhysicalMappedIDs field to SAS IO Unit
+ * Page 2.
+ * Added AccessStatus field to SAS Device Page 0 and added
+ * new Flags bits for supported SATA features.
+ * 12-07-04 01.05.04 Added config page structures for BIOS Page 2, RAID
+ * Volume Page 1, and RAID Physical Disk Page 1.
+ * Replaced IO Unit Page 1 BootTargetID,BootBus, and
+ * BootAdapterNum with reserved field.
+ * Added DataScrubRate and ResyncRate to RAID Volume
+ * Page 0.
+ * Added MPI_SAS_IOUNIT2_FLAGS_RESERVE_ID_0_FOR_BOOT
+ * define.
+ * 12-09-04 01.05.05 Added Target Mode Large CDB Enable to FC Port Page 1
+ * Flags field.
+ * Added Auto Port Config flag define for SAS IOUNIT
+ * Page 1 ControlFlags.
+ * Added Disabled bad Phy define to Expander Page 1
+ * Discovery Info field.
+ * Added SAS/SATA device support to SAS IOUnit Page 1
+ * ControlFlags.
+ * Added Unsupported device to SAS Dev Page 0 Flags field
+ * Added disable use SATA Hash Address for SAS IOUNIT
+ * page 1 in ControlFields.
+ * 01-15-05 01.05.06 Added defaults for data scrub rate and resync rate to
+ * Manufacturing Page 4.
+ * Added new defines for BIOS Page 1 IOCSettings field.
+ * Added ExtDiskIdentifier field to RAID Physical Disk
+ * Page 0.
+ * Added new defines for SAS IO Unit Page 1 ControlFlags
+ * and to SAS Device Page 0 Flags to control SATA devices.
+ * Added defines and structures for the new Log Page 0, a
+ * new type of configuration page.
+ * 02-09-05 01.05.07 Added InactiveStatus field to RAID Volume Page 0.
+ * Added WWID field to RAID Volume Page 1.
+ * Added PhysicalPort field to SAS Expander pages 0 and 1.
+ * 03-11-05 01.05.08 Removed the EEDP flags from IOC Page 1.
+ * Added Enclosure/Slot boot device format to BIOS Page 2.
+ * New status value for RAID Volume Page 0 VolumeStatus
+ * (VolumeState subfield).
+ * New value for RAID Physical Page 0 InactiveStatus.
+ * Added Inactive Volume Member flag RAID Physical Disk
+ * Page 0 PhysDiskStatus field.
+ * New physical mapping mode in SAS IO Unit Page 2.
+ * Added CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * Added Slot and Enclosure fields to SAS Device Page 0.
+ * 06-24-05 01.05.09 Added EEDP defines to IOC Page 1.
+ * Added more RAID type defines to IOC Page 2.
+ * Added Port Enable Delay settings to BIOS Page 1.
+ * Added Bad Block Table Full define to RAID Volume Page 0.
+ * Added Previous State defines to RAID Physical Disk
+ * Page 0.
+ * Added Max Sata Targets define for DiscoveryStatus field
+ * of SAS IO Unit Page 0.
+ * Added Device Self Test to Control Flags of SAS IO Unit
+ * Page 1.
+ * Added Direct Attach Starting Slot Number define for SAS
+ * IO Unit Page 2.
+ * Added new fields in SAS Device Page 2 for enclosure
+ * mapping.
+ * Added OwnerDevHandle and Flags field to SAS PHY Page 0.
+ * Added IOC GPIO Flags define to SAS Enclosure Page 0.
+ * Fixed the value for MPI_SAS_IOUNIT1_CONTROL_DEV_SATA_SUPPORT.
+ * 08-03-05 01.05.10 Removed ISDataScrubRate and ISResyncRate from
+ * Manufacturing Page 4.
+ * Added MPI_IOUNITPAGE1_SATA_WRITE_CACHE_DISABLE bit.
+ * Added NumDevsPerEnclosure field to SAS IO Unit page 2.
+ * Added MPI_SAS_IOUNIT2_FLAGS_HOST_ASSIGNED_PHYS_MAP
+ * define.
+ * Added EnclosureHandle field to SAS Expander page 0.
+ * Removed redundant NumTableEntriesProg field from SAS
+ * Expander Page 1.
+ * 08-30-05 01.05.11 Added DeviceID for FC949E and changed the DeviceID for
+ * SAS1078.
+ * Added more defines for Manufacturing Page 4 Flags field.
+ * Added more defines for IOCSettings and added
+ * ExpanderSpinup field to Bios Page 1.
+ * Added postpone SATA Init bit to SAS IO Unit Page 1
+ * ControlFlags.
+ * Changed LogEntry format for Log Page 0.
+ * 03-27-06 01.05.12 Added two new Flags defines for Manufacturing Page 4.
+ * Added Manufacturing Page 7.
+ * Added MPI_IOCPAGE2_CAP_FLAGS_RAID_64_BIT_ADDRESSING.
+ * Added IOC Page 6.
+ * Added PrevBootDeviceForm field to CONFIG_PAGE_BIOS_2.
+ * Added MaxLBAHigh field to RAID Volume Page 0.
+ * Added Nvdata version fields to SAS IO Unit Page 0.
+ * Added AdditionalControlFlags, MaxTargetPortConnectTime,
+ * ReportDeviceMissingDelay, and IODeviceMissingDelay
+ * fields to SAS IO Unit Page 1.
+ * 10-11-06 01.05.13 Added NumForceWWID field and ForceWWID array to
+ * Manufacturing Page 5.
+ * Added Manufacturing pages 8 through 10.
+ * Added defines for supported metadata size bits in
+ * CapabilitiesFlags field of IOC Page 6.
+ * Added defines for metadata size bits in VolumeSettings
+ * field of RAID Volume Page 0.
+ * Added SATA Link Reset settings, Enable SATA Asynchronous
+ * Notification bit, and HideNonZeroAttachedPhyIdentifiers
+ * bit to AdditionalControlFlags field of SAS IO Unit
+ * Page 1.
+ * Added defines for Enclosure Devices Unmapped and
+ * Device Limit Exceeded bits in Status field of SAS IO
+ * Unit Page 2.
+ * Added more AccessStatus values for SAS Device Page 0.
+ * Added bit for SATA Asynchronous Notification Support in
+ * Flags field of SAS Device Page 0.
+ * 02-28-07 01.05.14 Added ExtFlags field to Manufacturing Page 4.
+ * Added Disable SMART Polling for CapabilitiesFlags of
+ * IOC Page 6.
+ * Added Disable SMART Polling to DeviceSettings of BIOS
+ * Page 1.
+ * Added Multi-Port Domain bit for DiscoveryStatus field
+ * of SAS IO Unit Page.
+ * Added Multi-Port Domain Illegal flag for SAS IO Unit
+ * Page 1 AdditionalControlFlags field.
+ * 05-24-07 01.05.15 Added Hide Physical Disks with Non-Integrated RAID
+ * Metadata bit to Manufacturing Page 4 ExtFlags field.
+ * Added Internal Connector to End Device Present bit to
+ * Expander Page 0 Flags field.
+ * Fixed define for
+ * MPI_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED.
+ * 08-07-07 01.05.16 Added MPI_IOCPAGE6_CAP_FLAGS_MULTIPORT_DRIVE_SUPPORT
+ * define.
+ * Added BIOS Page 4 structure.
+ * Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
+ * Physcial Disk Page 1.
+ * 01-15-07 01.05.17 Added additional bit defines for ExtFlags field of
+ * Manufacturing Page 4.
+ * Added Solid State Drives Supported bit to IOC Page 6
+ * Capabilities Flags.
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 03-28-08 01.05.18 Defined new bits in Manufacturing Page 4 ExtFlags field
+ * to control coercion size and the mixing of SAS and SATA
+ * SSD drives.
+ * --------------------------------------------------------------------------
+
+mpi_init.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added SenseBufferLength to _MSG_SCSI_IO_REPLY.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-08-00 01.00.02 Added MPI_SCSI_RSP_INFO_ definitions.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 12-04-00 01.01.02 Added MPI_SCSIIO_CONTROL_NO_DISCONNECT.
+ * 02-20-01 01.01.03 Started using MPI_POINTER.
+ * 03-27-01 01.01.04 Added structure offset comments.
+ * 04-10-01 01.01.05 Added new MsgFlag for MSG_SCSI_TASK_MGMT.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 08-29-01 01.02.02 Added MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET.
+ * Added MPI_SCSI_STATE_QUEUE_TAG_REJECTED for
+ * MSG_SCSI_IO_REPLY.
+ * 09-28-01 01.02.03 Added structures and defines for SCSI Enclosure
+ * Processor messages.
+ * 10-04-01 01.02.04 Added defines for SEP request Action field.
+ * 05-31-02 01.02.05 Added MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR define
+ * for SCSI IO requests.
+ * 11-15-02 01.02.06 Added special extended SCSI Status defines for FCP.
+ * 06-26-03 01.02.07 Added MPI_SCSI_STATUS_FCPEXT_UNASSIGNED define.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Added MsgFlags defines for EEDP to SCSI IO request.
+ * Added new word to MSG_SCSI_IO_REPLY to add TaskTag field
+ * and a reserved U16.
+ * Added new MSG_SCSI_IO32_REQUEST structure.
+ * Added a TaskType of Clear Task Set to SCSI
+ * Task Management request.
+ * 12-07-04 01.05.02 Added support for Task Management Query Task.
+ * 01-15-05 01.05.03 Modified SCSI Enclosure Processor Request to support
+ * WWID addressing.
+ * 03-11-05 01.05.04 Removed EEDP flags from SCSI IO Request.
+ * Removed SCSI IO 32 Request.
+ * Modified SCSI Enclosure Processor Request and Reply to
+ * support Enclosure/Slot addressing rather than WWID
+ * addressing.
+ * 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
+ * Added four new defines for SEP SlotStatus.
+ * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
+ * unique in the first 32 characters.
+ * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
+ * 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
+ * 02-28-07 01.05.09 Defined two new MsgFlags bits for SCSI Task Management
+ * Request: Do Not Send Task IU and Soft Reset Option.
+ * --------------------------------------------------------------------------
+
+mpi_targ.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-22-00 01.00.02 Added _MSG_TARGET_CMD_BUFFER_POST_REPLY structure.
+ * Corrected DECSRIPTOR typo to DESCRIPTOR.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * Modified target mode to use IoIndex instead of
+ * HostIndex and IocIndex. Added Alias.
+ * 01-09-01 01.01.02 Added defines for TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER
+ * and TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER.
+ * 02-20-01 01.01.03 Started using MPI_POINTER.
+ * Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and
+ * MPI_TARGET_FCP_CMD_BUFFER.
+ * 03-27-01 01.01.04 Added structure offset comments.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 09-28-01 01.02.02 Added structure for MPI_TARGET_SCSI_SPI_STATUS_IU.
+ * Added PriorityReason field to some replies and
+ * defined more PriorityReason codes.
+ * Added some defines for to support previous version
+ * of MPI.
+ * 10-04-01 01.02.03 Added PriorityReason to MSG_TARGET_ERROR_REPLY.
+ * 11-01-01 01.02.04 Added define for TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY.
+ * 03-14-02 01.02.05 Modified MPI_TARGET_FCP_RSP_BUFFER to get the proper
+ * byte ordering.
+ * 05-31-02 01.02.06 Modified TARGET_MODE_REPLY_ALIAS_MASK to only include
+ * one bit.
+ * Added AliasIndex field to MPI_TARGET_FCP_CMD_BUFFER.
+ * 09-16-02 01.02.07 Added flags for confirmed completion.
+ * Added PRIORITY_REASON_TARGET_BUSY.
+ * 11-15-02 01.02.08 Added AliasID field to MPI_TARGET_SCSI_SPI_CMD_BUFFER.
+ * 04-01-03 01.02.09 Added OptionalOxid field to MPI_TARGET_FCP_CMD_BUFFER.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Added new request message structures for
+ * MSG_TARGET_CMD_BUF_POST_BASE_REQUEST,
+ * MSG_TARGET_CMD_BUF_POST_LIST_REQUEST, and
+ * MSG_TARGET_ASSIST_EXT_REQUEST.
+ * Added new structures for SAS SSP Command buffer, SSP
+ * Task buffer, and SSP Status IU.
+ * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
+ * 02-22-05 01.05.03 Changed a comment.
+ * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
+ * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
+ * 03-27-06 01.05.06 Added a comment.
+ * --------------------------------------------------------------------------
+
+mpi_fc.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-12-00 01.00.02 Added _MSG_FC_ABORT_REPLY structure.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 12-04-00 01.01.02 Added messages for Common Transport Send and
+ * Primitive Send.
+ * 01-09-01 01.01.03 Modified some of the new flags to have an MPI prefix
+ * and modified the FcPrimitiveSend flags.
+ * 01-25-01 01.01.04 Move InitiatorIndex in LinkServiceRsp reply to a larger
+ * field.
+ * Added FC_ABORT_TYPE_CT_SEND_REQUEST and
+ * FC_ABORT_TYPE_EXLINKSEND_REQUEST for FcAbort request.
+ * Added MPI_FC_PRIM_SEND_FLAGS_STOP_SEND.
+ * 02-20-01 01.01.05 Started using MPI_POINTER.
+ * 03-27-01 01.01.06 Added Flags field to MSG_LINK_SERVICE_BUFFER_POST_REPLY
+ * and defined MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED.
+ * Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define.
+ * Added structure offset comments.
+ * 04-09-01 01.01.07 Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 09-28-01 01.02.02 Change name of reserved field in
+ * MSG_LINK_SERVICE_RSP_REPLY.
+ * 05-31-02 01.02.03 Adding AliasIndex to FC Direct Access requests.
+ * 01-16-04 01.02.04 Added define for MPI_FC_PRIM_SEND_FLAGS_ML_RESET_LINK.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * --------------------------------------------------------------------------
+
+mpi_lan.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added LANStatus field to _MSG_LAN_SEND_REPLY.
+ * Added LANStatus field to _MSG_LAN_RECEIVE_POST_REPLY.
+ * Moved ListCount field in _MSG_LAN_RECEIVE_POST_REPLY.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-12-00 01.00.02 Added MPI_ to BUCKETSTATUS_ definitions.
+ * 06-22-00 01.00.03 Major changes to match new LAN definition in 1.0 spec.
+ * 06-30-00 01.00.04 Added Context Reply definitions per revised proposal.
+ * Changed transaction context usage to bucket/buffer.
+ * 07-05-00 01.00.05 Removed LAN_RECEIVE_POST_BUCKET_CONTEXT_MASK definition
+ * to lan private header file
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 02-20-01 01.01.02 Started using MPI_POINTER.
+ * 03-27-01 01.01.03 Added structure offset comments.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * --------------------------------------------------------------------------
+
+mpi_raid.h
+ * 02-27-01 01.01.01 Original release for this file.
+ * 03-27-01 01.01.02 Added structure offset comments.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 08-29-01 01.02.02 Added DIAG_DATA_UPLOAD_HEADER and related defines.
+ * 09-28-01 01.02.02 Major rework for MPI v1.2 Integrated RAID changes.
+ * 10-04-01 01.02.03 Added ActionData defines for
+ * MPI_RAID_ACTION_DELETE_VOLUME action.
+ * 11-01-01 01.02.04 Added define for MPI_RAID_ACTION_ADATA_DO_NOT_SYNC.
+ * 03-14-02 01.02.05 Added define for MPI_RAID_ACTION_ADATA_LOW_LEVEL_INIT.
+ * 05-07-02 01.02.06 Added define for MPI_RAID_ACTION_ACTIVATE_VOLUME,
+ * MPI_RAID_ACTION_INACTIVATE_VOLUME, and
+ * MPI_RAID_ACTION_ADATA_INACTIVATE_ALL.
+ * 07-12-02 01.02.07 Added structures for Mailbox request and reply.
+ * 11-15-02 01.02.08 Added missing MsgContext field to MSG_MAILBOX_REQUEST.
+ * 04-01-03 01.02.09 New action data option flag for
+ * MPI_RAID_ACTION_DELETE_VOLUME.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * 01-15-05 01.05.02 Added defines for the two new RAID Actions for
+ * _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
+ * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
+ * associated defines.
+ * 08-07-07 01.05.04 Added Disable Full Rebuild bit to the ActionDataWord
+ * for the RAID Action MPI_RAID_ACTION_DISABLE_VOLUME.
+ * 01-15-08 01.05.05 Added define for MPI_RAID_ACTION_SET_VOLUME_NAME.
+ * --------------------------------------------------------------------------
+
+mpi_tool.h
+ * 08-08-01 01.02.01 Original release.
+ * 08-29-01 01.02.02 Added DIAG_DATA_UPLOAD_HEADER and related defines.
+ * 01-16-04 01.02.03 Added defines and structures for new tools
+ *. MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL and
+ * MPI_TOOLBOX_FC_MANAGEMENT_TOOL.
+ * 04-29-04 01.02.04 Added message structures for Diagnostic Buffer Post and
+ * Diagnostic Release requests and replies.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * 10-06-04 01.05.02 Added define for MPI_DIAG_BUF_TYPE_COUNT.
+ * 02-09-05 01.05.03 Added frame size option to FC management tool.
+ * Added Beacon tool to the Toolbox.
+ * --------------------------------------------------------------------------
+
+mpi_inb.h
+ * 05-11-04 01.03.01 Original release.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * --------------------------------------------------------------------------
+
+mpi_sas.h
+ * 08-19-04 01.05.01 Original release.
+ * 08-30-05 01.05.02 Added DeviceInfo bit for SEP.
+ * Added PrimFlags and Primitive field to SAS IO Unit
+ * Control request, and added a new operation code.
+ * 03-27-06 01.05.03 Added Force Full Discovery, Transmit Port Select Signal,
+ * and Remove Device operations to SAS IO Unit Control.
+ * Added DevHandle field to SAS IO Unit Control request and
+ * reply.
+ * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
+ * Unit Control request.
+ * 01-15-08 01.05.05 Added support for MPI_SAS_OP_SET_IOC_PARAMETER,
+ * including adding IOCParameter and IOCParameter value
+ * fields to SAS IO Unit Control Request.
+ * Added MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC define.
+ * --------------------------------------------------------------------------
+
+mpi_type.h
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 02-20-01 01.01.02 Added define and ifdef for MPI_POINTER.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * 08-30-05 01.05.02 Added PowerPC option to #ifdef's.
+ * --------------------------------------------------------------------------
+
+mpi_history.txt Parts list history
+
+Filename 01.05.19 01.05.18 01.05.17 01.05.16 01.05.15
+---------- -------- -------- -------- -------- --------
+mpi.h 01.05.16 01.05.15 01.05.14 01.05.13 01.05.12
+mpi_ioc.h 01.05.16 01.05.15 01.05.15 01.05.14 01.05.13
+mpi_cnfg.h 01.05.18 01.05.17 01.05.16 01.05.15 01.05.14
+mpi_init.h 01.05.09 01.05.09 01.05.09 01.05.09 01.05.09
+mpi_targ.h 01.05.06 01.05.06 01.05.06 01.05.06 01.05.06
+mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_raid.h 01.05.05 01.05.05 01.05.04 01.05.03 01.05.03
+mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03
+mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_sas.h 01.05.05 01.05.05 01.05.04 01.05.04 01.05.04
+mpi_type.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02
+
+Filename 01.05.14 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08 01.05.07
+mpi_ioc.h 01.05.12 01.05.11 01.05.10 01.05.09 01.05.09 01.05.08
+mpi_cnfg.h 01.05.13 01.05.12 01.05.11 01.05.10 01.05.09 01.05.08
+mpi_init.h 01.05.08 01.05.07 01.05.06 01.05.06 01.05.05 01.05.04
+mpi_targ.h 01.05.06 01.05.06 01.05.05 01.05.05 01.05.05 01.05.04
+mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02
+mpi_tool.h 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03 01.05.03
+mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_sas.h 01.05.04 01.05.03 01.05.02 01.05.01 01.05.01 01.05.01
+mpi_type.h 01.05.02 01.05.02 01.05.02 01.05.01 01.05.01 01.05.01
+
+Filename 01.05.08 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.05.06 01.05.05 01.05.04 01.05.03 01.05.02 01.05.01
+mpi_ioc.h 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03 01.05.02
+mpi_cnfg.h 01.05.07 01.05.07 01.05.06 01.05.05 01.05.04 01.05.03
+mpi_init.h 01.05.03 01.05.03 01.05.03 01.05.02 01.05.02 01.05.01
+mpi_targ.h 01.05.03 01.05.02 01.05.02 01.05.02 01.05.02 01.05.02
+mpi_fc.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_lan.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_raid.h 01.05.02 01.05.02 01.05.02 01.05.01 01.05.01 01.05.01
+mpi_tool.h 01.05.03 01.05.03 01.05.02 01.05.02 01.05.02 01.05.02
+mpi_inb.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_sas.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+mpi_type.h 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01 01.05.01
+
+Filename 01.05.02 01.05.01 01.03.01 01.02.14 01.02.13 01.02.12
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.05.01 01.05.01 01.03.01 01.02.12 01.02.11 01.02.10
+mpi_ioc.h 01.05.02 01.05.01 01.03.01 01.02.09 01.02.08 01.02.08
+mpi_cnfg.h 01.05.02 01.05.01 01.03.01 01.02.14 01.02.13 01.02.12
+mpi_init.h 01.05.01 01.05.01 01.03.01 01.02.07 01.02.07 01.02.07
+mpi_targ.h 01.05.02 01.05.01 01.03.01 01.02.09 01.02.09 01.02.09
+mpi_fc.h 01.05.01 01.05.01 01.03.01 01.02.04 01.02.04 01.02.03
+mpi_lan.h 01.05.01 01.05.01 01.03.01 01.02.01 01.02.01 01.02.01
+mpi_raid.h 01.05.01 01.05.01 01.03.01 01.02.09 01.02.09 01.02.09
+mpi_tool.h 01.05.02 01.05.01 01.03.01 01.02.01 01.02.01 01.02.01
+mpi_inb.h 01.05.01 01.05.01 01.03.01
+mpi_sas.h 01.05.01 01.05.01
+mpi_type.h 01.05.01 01.05.01 01.03.01 01.02.04 01.02.03 01.02.02
+
+Filename 01.02.11 01.02.10 01.02.09 01.02.08 01.02.07 01.02.06
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.02.09 01.02.08 01.02.07 01.02.06 01.02.05 01.02.04
+mpi_ioc.h 01.02.07 01.02.06 01.02.06 01.02.06 01.02.06 01.02.05
+mpi_cnfg.h 01.02.11 01.02.10 01.02.09 01.02.08 01.02.07 01.02.06
+mpi_init.h 01.02.06 01.02.06 01.02.05 01.02.05 01.02.05 01.02.04
+mpi_targ.h 01.02.09 01.02.08 01.02.07 01.02.06 01.02.06 01.02.05
+mpi_fc.h 01.02.03 01.02.03 01.02.03 01.02.03 01.02.03 01.02.02
+mpi_lan.h 01.02.01 01.02.01 01.02.01 01.02.01 01.02.01 01.02.01
+mpi_raid.h 01.02.09 01.02.08 01.02.07 01.02.07 01.02.06 01.02.05
+mpi_tool.h 01.02.01 01.02.01 01.02.01 01.02.01 01.02.01 01.02.01
+mpi_type.h 01.02.02 01.02.02 01.02.02 01.02.02 01.02.02 01.02.02
+
+Filename 01.02.05 01.02.04 01.02.03 01.02.02 01.02.01 01.01.10
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.02.03 01.02.02 01.02.02 01.02.01 01.02.01 01.01.07
+mpi_ioc.h 01.02.04 01.02.03 01.02.03 01.02.02 01.02.01 01.01.07
+mpi_cnfg.h 01.02.05 01.02.04 01.02.03 01.02.02 01.02.01 01.01.11
+mpi_init.h 01.02.04 01.02.04 01.02.03 01.02.02 01.02.01 01.01.05
+mpi_targ.h 01.02.04 01.02.03 01.02.02 01.02.01 01.02.01 01.01.04
+mpi_fc.h 01.02.02 01.02.02 01.02.02 01.02.01 01.02.01 01.01.07
+mpi_lan.h 01.02.01 01.02.01 01.02.01 01.02.01 01.02.01 01.01.03
+mpi_raid.h 01.02.04 01.02.03 01.02.02 01.02.01 01.02.01 01.01.02
+mpi_tool.h 01.02.02 01.02.02 01.02.02 01.02.02 01.02.01
+mpi_type.h 01.02.02 01.02.02 01.02.02 01.02.02 01.02.01 01.01.02
+
+Filename 01.01.09 01.01.08 01.01.07 01.01.06 01.01.05 01.01.04
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.01.06 01.01.06 01.01.05 01.01.04 01.01.04 01.01.03
+mpi_ioc.h 01.01.06 01.01.05 01.01.04 01.01.03 01.01.03 01.01.03
+mpi_cnfg.h 01.01.10 01.01.09 01.01.08 01.01.07 01.01.06 01.01.05
+mpi_init.h 01.01.04 01.01.03 01.01.03 01.01.02 01.01.02 01.01.02
+mpi_targ.h 01.01.04 01.01.03 01.01.03 01.01.02 01.01.02 01.01.02
+mpi_fc.h 01.01.06 01.01.05 01.01.05 01.01.04 01.01.04 01.01.03
+mpi_lan.h 01.01.03 01.01.02 01.01.02 01.01.01 01.01.01 01.01.01
+mpi_raid.h 01.01.02 01.01.01
+mpi_type.h 01.01.02 01.01.02 01.01.02 01.01.01 01.01.01 01.01.01
+
+Filename 01.01.03 01.01.02 01.01.01 01.00.07 01.00.06 01.00.05
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.01.02 01.01.02 01.01.01 01.00.04 01.00.04 01.00.03
+mpi_ioc.h 01.01.02 01.01.02 01.01.01 01.00.05 01.00.04 01.00.03
+mpi_cnfg.h 01.01.04 01.01.03 01.01.01 01.00.05 01.00.05 01.00.04
+mpi_init.h 01.01.02 01.01.02 01.01.01 01.00.02 01.00.02 01.00.02
+mpi_targ.h 01.01.01 01.01.01 01.01.01 01.00.02 01.00.02 01.00.02
+mpi_fc.h 01.01.02 01.01.02 01.01.01 01.00.02 01.00.02 01.00.02
+mpi_lan.h 01.01.01 01.01.01 01.01.01 01.00.05 01.00.05 01.00.05
+mpi_type.h 01.01.01 01.01.01 01.01.01 01.00.01 01.00.01 01.00.01
+
+Filename 01.00.04 01.00.03 01.00.02 01.00.01 00.10.02 00.10.01
+---------- -------- -------- -------- -------- -------- --------
+mpi.h 01.00.02 01.00.01 01.00.01 01.00.01 00.10.02 00.10.01
+mpi_ioc.h 01.00.02 01.00.02 01.00.01 01.00.01 00.10.02 00.10.01
+mpi_cnfg.h 01.00.03 01.00.02 01.00.02 01.00.01 00.10.01 00.10.01
+mpi_init.h 01.00.02 01.00.02 01.00.02 01.00.01 00.10.02 00.10.01
+mpi_targ.h 01.00.02 01.00.01 01.00.01 01.00.01 00.10.01 00.10.01
+mpi_fc.h 01.00.02 01.00.02 01.00.01 01.00.01 00.10.01 00.10.01
+mpi_lan.h 01.00.03 01.00.02 01.00.01 01.00.01 00.10.02 00.10.01
+mpi_type.h 01.00.01 01.00.01 01.00.01 01.00.01 00.10.01 00.10.01
+
+
+ * --------------------------------------------------------------------------
+
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
new file mode 100644
index 00000000..4295d062
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -0,0 +1,580 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_init.h
+ * Title: MPI initiator mode messages and structures
+ * Creation Date: June 8, 2000
+ *
+ * mpi_init.h Version: 01.05.09
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added SenseBufferLength to _MSG_SCSI_IO_REPLY.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-08-00 01.00.02 Added MPI_SCSI_RSP_INFO_ definitions.
+ * 11-02-00 01.01.01 Original release for post 1.0 work.
+ * 12-04-00 01.01.02 Added MPI_SCSIIO_CONTROL_NO_DISCONNECT.
+ * 02-20-01 01.01.03 Started using MPI_POINTER.
+ * 03-27-01 01.01.04 Added structure offset comments.
+ * 04-10-01 01.01.05 Added new MsgFlag for MSG_SCSI_TASK_MGMT.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 08-29-01 01.02.02 Added MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET.
+ * Added MPI_SCSI_STATE_QUEUE_TAG_REJECTED for
+ * MSG_SCSI_IO_REPLY.
+ * 09-28-01 01.02.03 Added structures and defines for SCSI Enclosure
+ * Processor messages.
+ * 10-04-01 01.02.04 Added defines for SEP request Action field.
+ * 05-31-02 01.02.05 Added MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR define
+ * for SCSI IO requests.
+ * 11-15-02 01.02.06 Added special extended SCSI Status defines for FCP.
+ * 06-26-03 01.02.07 Added MPI_SCSI_STATUS_FCPEXT_UNASSIGNED define.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Added MsgFlags defines for EEDP to SCSI IO request.
+ * Added new word to MSG_SCSI_IO_REPLY to add TaskTag field
+ * and a reserved U16.
+ * Added new MSG_SCSI_IO32_REQUEST structure.
+ * Added a TaskType of Clear Task Set to SCSI
+ * Task Management request.
+ * 12-07-04 01.05.02 Added support for Task Management Query Task.
+ * 01-15-05 01.05.03 Modified SCSI Enclosure Processor Request to support
+ * WWID addressing.
+ * 03-11-05 01.05.04 Removed EEDP flags from SCSI IO Request.
+ * Removed SCSI IO 32 Request.
+ * Modified SCSI Enclosure Processor Request and Reply to
+ * support Enclosure/Slot addressing rather than WWID
+ * addressing.
+ * 06-24-05 01.05.05 Added SCSI IO 32 structures and defines.
+ * Added four new defines for SEP SlotStatus.
+ * 08-03-05 01.05.06 Fixed some MPI_SCSIIO32_MSGFLGS_ defines to make them
+ * unique in the first 32 characters.
+ * 03-27-06 01.05.07 Added Task Management type of Clear ACA.
+ * 10-11-06 01.05.08 Shortened define for Task Management type of Clear ACA.
+ * 02-28-07 01.05.09 Defined two new MsgFlags bits for SCSI Task Management
+ * Request: Do Not Send Task IU and Soft Reset Option.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_INIT_H
+#define MPI_INIT_H
+
+
+/*****************************************************************************
+*
+* S C S I I n i t i a t o r M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* SCSI IO messages and associated structures */
+/****************************************************************************/
+
+typedef struct _MSG_SCSI_IO_REQUEST
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 CDBLength; /* 04h */
+ U8 SenseBufferLength; /* 05h */
+ U8 Reserved; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 LUN[8]; /* 0Ch */
+ U32 Control; /* 14h */
+ U8 CDB[16]; /* 18h */
+ U32 DataLength; /* 28h */
+ U32 SenseBufferLowAddr; /* 2Ch */
+ SGE_IO_UNION SGL; /* 30h */
+} MSG_SCSI_IO_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_REQUEST,
+ SCSIIORequest_t, MPI_POINTER pSCSIIORequest_t;
+
+
+/* SCSI IO MsgFlags bits */
+
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH (0x01)
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 (0x00)
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 (0x01)
+
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOCATION (0x02)
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_HOST (0x00)
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_IOC (0x02)
+
+#define MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04)
+
+/* SCSI IO LUN fields */
+
+#define MPI_SCSIIO_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI_SCSIIO_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI_SCSIIO_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI_SCSIIO_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI_SCSIIO_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI_SCSIIO_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+/* SCSI IO Control bits */
+
+#define MPI_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI_SCSIIO_CONTROL_READ (0x02000000)
+
+#define MPI_SCSIIO_CONTROL_ADDCDBLEN_MASK (0x3C000000)
+#define MPI_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI_SCSIIO_CONTROL_ACAQ (0x00000400)
+#define MPI_SCSIIO_CONTROL_UNTAGGED (0x00000500)
+#define MPI_SCSIIO_CONTROL_NO_DISCONNECT (0x00000700)
+
+#define MPI_SCSIIO_CONTROL_TASKMANAGE_MASK (0x00FF0000)
+#define MPI_SCSIIO_CONTROL_OBSOLETE (0x00800000)
+#define MPI_SCSIIO_CONTROL_CLEAR_ACA_RSV (0x00400000)
+#define MPI_SCSIIO_CONTROL_TARGET_RESET (0x00200000)
+#define MPI_SCSIIO_CONTROL_LUN_RESET_RSV (0x00100000)
+#define MPI_SCSIIO_CONTROL_RESERVED (0x00080000)
+#define MPI_SCSIIO_CONTROL_CLR_TASK_SET_RSV (0x00040000)
+#define MPI_SCSIIO_CONTROL_ABORT_TASK_SET (0x00020000)
+#define MPI_SCSIIO_CONTROL_RESERVED2 (0x00010000)
+
+
+/* SCSI IO reply structure */
+typedef struct _MSG_SCSI_IO_REPLY
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 CDBLength; /* 04h */
+ U8 SenseBufferLength; /* 05h */
+ U8 Reserved; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 SCSIStatus; /* 0Ch */
+ U8 SCSIState; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 TransferCount; /* 14h */
+ U32 SenseCount; /* 18h */
+ U32 ResponseInfo; /* 1Ch */
+ U16 TaskTag; /* 20h */
+ U16 Reserved1; /* 22h */
+} MSG_SCSI_IO_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_REPLY,
+ SCSIIOReply_t, MPI_POINTER pSCSIIOReply_t;
+
+
+/* SCSI IO Reply SCSIStatus values (SAM-2 status codes) */
+
+#define MPI_SCSI_STATUS_SUCCESS (0x00)
+#define MPI_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI_SCSI_STATUS_BUSY (0x08)
+#define MPI_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI_SCSI_STATUS_COMMAND_TERMINATED (0x22)
+#define MPI_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI_SCSI_STATUS_ACA_ACTIVE (0x30)
+
+#define MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT (0x80)
+#define MPI_SCSI_STATUS_FCPEXT_NO_LINK (0x81)
+#define MPI_SCSI_STATUS_FCPEXT_UNASSIGNED (0x82)
+
+
+/* SCSI IO Reply SCSIState values */
+
+#define MPI_SCSI_STATE_AUTOSENSE_VALID (0x01)
+#define MPI_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI_SCSI_STATE_TERMINATED (0x08)
+#define MPI_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI_SCSI_STATE_QUEUE_TAG_REJECTED (0x20)
+
+/* SCSI IO Reply ResponseInfo values */
+/* (FCP-1 RSP_CODE values and SPI-3 Packetized Failure codes) */
+
+#define MPI_SCSI_RSP_INFO_FUNCTION_COMPLETE (0x00000000)
+#define MPI_SCSI_RSP_INFO_FCP_BURST_LEN_ERROR (0x01000000)
+#define MPI_SCSI_RSP_INFO_CMND_FIELDS_INVALID (0x02000000)
+#define MPI_SCSI_RSP_INFO_FCP_DATA_RO_ERROR (0x03000000)
+#define MPI_SCSI_RSP_INFO_TASK_MGMT_UNSUPPORTED (0x04000000)
+#define MPI_SCSI_RSP_INFO_TASK_MGMT_FAILED (0x05000000)
+#define MPI_SCSI_RSP_INFO_SPI_LQ_INVALID_TYPE (0x06000000)
+
+#define MPI_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+
+/****************************************************************************/
+/* SCSI IO 32 messages and associated structures */
+/****************************************************************************/
+
+typedef struct
+{
+ U8 CDB[20]; /* 00h */
+ U32 PrimaryReferenceTag; /* 14h */
+ U16 PrimaryApplicationTag; /* 18h */
+ U16 PrimaryApplicationTagMask; /* 1Ah */
+ U32 TransferLength; /* 1Ch */
+} MPI_SCSI_IO32_CDB_EEDP32, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP32,
+ MpiScsiIo32CdbEedp32_t, MPI_POINTER pMpiScsiIo32CdbEedp32_t;
+
+typedef struct
+{
+ U8 CDB[16]; /* 00h */
+ U32 DataLength; /* 10h */
+ U32 PrimaryReferenceTag; /* 14h */
+ U16 PrimaryApplicationTag; /* 18h */
+ U16 PrimaryApplicationTagMask; /* 1Ah */
+ U32 TransferLength; /* 1Ch */
+} MPI_SCSI_IO32_CDB_EEDP16, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_EEDP16,
+ MpiScsiIo32CdbEedp16_t, MPI_POINTER pMpiScsiIo32CdbEedp16_t;
+
+typedef union
+{
+ U8 CDB32[32];
+ MPI_SCSI_IO32_CDB_EEDP32 EEDP32;
+ MPI_SCSI_IO32_CDB_EEDP16 EEDP16;
+ SGE_SIMPLE_UNION SGE;
+} MPI_SCSI_IO32_CDB_UNION, MPI_POINTER PTR_MPI_SCSI_IO32_CDB_UNION,
+ MpiScsiIo32Cdb_t, MPI_POINTER pMpiScsiIo32Cdb_t;
+
+typedef struct
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U16 Reserved1; /* 02h */
+ U32 Reserved2; /* 04h */
+} MPI_SCSI_IO32_BUS_TARGET_ID_FORM, MPI_POINTER PTR_MPI_SCSI_IO32_BUS_TARGET_ID_FORM,
+ MpiScsiIo32BusTargetIdForm_t, MPI_POINTER pMpiScsiIo32BusTargetIdForm_t;
+
+typedef union
+{
+ MPI_SCSI_IO32_BUS_TARGET_ID_FORM SCSIID;
+ U64 WWID;
+} MPI_SCSI_IO32_ADDRESS, MPI_POINTER PTR_MPI_SCSI_IO32_ADDRESS,
+ MpiScsiIo32Address_t, MPI_POINTER pMpiScsiIo32Address_t;
+
+typedef struct _MSG_SCSI_IO32_REQUEST
+{
+ U8 Port; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 CDBLength; /* 04h */
+ U8 SenseBufferLength; /* 05h */
+ U8 Flags; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 LUN[8]; /* 0Ch */
+ U32 Control; /* 14h */
+ MPI_SCSI_IO32_CDB_UNION CDB; /* 18h */
+ U32 DataLength; /* 38h */
+ U32 BidirectionalDataLength; /* 3Ch */
+ U32 SecondaryReferenceTag; /* 40h */
+ U16 SecondaryApplicationTag; /* 44h */
+ U16 Reserved2; /* 46h */
+ U16 EEDPFlags; /* 48h */
+ U16 ApplicationTagTranslationMask; /* 4Ah */
+ U32 EEDPBlockSize; /* 4Ch */
+ MPI_SCSI_IO32_ADDRESS DeviceAddress; /* 50h */
+ U8 SGLOffset0; /* 58h */
+ U8 SGLOffset1; /* 59h */
+ U8 SGLOffset2; /* 5Ah */
+ U8 SGLOffset3; /* 5Bh */
+ U32 Reserved3; /* 5Ch */
+ U32 Reserved4; /* 60h */
+ U32 SenseBufferLowAddr; /* 64h */
+ SGE_IO_UNION SGL; /* 68h */
+} MSG_SCSI_IO32_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO32_REQUEST,
+ SCSIIO32Request_t, MPI_POINTER pSCSIIO32Request_t;
+
+/* SCSI IO 32 MsgFlags bits */
+#define MPI_SCSIIO32_MSGFLGS_SENSE_WIDTH (0x01)
+#define MPI_SCSIIO32_MSGFLGS_32_SENSE_WIDTH (0x00)
+#define MPI_SCSIIO32_MSGFLGS_64_SENSE_WIDTH (0x01)
+
+#define MPI_SCSIIO32_MSGFLGS_SENSE_LOCATION (0x02)
+#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_HOST (0x00)
+#define MPI_SCSIIO32_MSGFLGS_SENSE_LOC_IOC (0x02)
+
+#define MPI_SCSIIO32_MSGFLGS_CMD_DETERMINES_DATA_DIR (0x04)
+#define MPI_SCSIIO32_MSGFLGS_SGL_OFFSETS_CHAINS (0x08)
+#define MPI_SCSIIO32_MSGFLGS_MULTICAST (0x10)
+#define MPI_SCSIIO32_MSGFLGS_BIDIRECTIONAL (0x20)
+#define MPI_SCSIIO32_MSGFLGS_LARGE_CDB (0x40)
+
+/* SCSI IO 32 Flags bits */
+#define MPI_SCSIIO32_FLAGS_FORM_MASK (0x03)
+#define MPI_SCSIIO32_FLAGS_FORM_SCSIID (0x00)
+#define MPI_SCSIIO32_FLAGS_FORM_WWID (0x01)
+
+/* SCSI IO 32 LUN fields */
+#define MPI_SCSIIO32_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI_SCSIIO32_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI_SCSIIO32_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI_SCSIIO32_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI_SCSIIO32_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI_SCSIIO32_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+/* SCSI IO 32 Control bits */
+#define MPI_SCSIIO32_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI_SCSIIO32_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI_SCSIIO32_CONTROL_WRITE (0x01000000)
+#define MPI_SCSIIO32_CONTROL_READ (0x02000000)
+#define MPI_SCSIIO32_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI_SCSIIO32_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI_SCSIIO32_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI_SCSIIO32_CONTROL_SIMPLEQ (0x00000000)
+#define MPI_SCSIIO32_CONTROL_HEADOFQ (0x00000100)
+#define MPI_SCSIIO32_CONTROL_ORDEREDQ (0x00000200)
+#define MPI_SCSIIO32_CONTROL_ACAQ (0x00000400)
+#define MPI_SCSIIO32_CONTROL_UNTAGGED (0x00000500)
+#define MPI_SCSIIO32_CONTROL_NO_DISCONNECT (0x00000700)
+
+#define MPI_SCSIIO32_CONTROL_TASKMANAGE_MASK (0x00FF0000)
+#define MPI_SCSIIO32_CONTROL_OBSOLETE (0x00800000)
+#define MPI_SCSIIO32_CONTROL_CLEAR_ACA_RSV (0x00400000)
+#define MPI_SCSIIO32_CONTROL_TARGET_RESET (0x00200000)
+#define MPI_SCSIIO32_CONTROL_LUN_RESET_RSV (0x00100000)
+#define MPI_SCSIIO32_CONTROL_RESERVED (0x00080000)
+#define MPI_SCSIIO32_CONTROL_CLR_TASK_SET_RSV (0x00040000)
+#define MPI_SCSIIO32_CONTROL_ABORT_TASK_SET (0x00020000)
+#define MPI_SCSIIO32_CONTROL_RESERVED2 (0x00010000)
+
+/* SCSI IO 32 EEDPFlags */
+#define MPI_SCSIIO32_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI_SCSIIO32_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI_SCSIIO32_EEDPFLAGS_CHK_OP (0x0001)
+#define MPI_SCSIIO32_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI_SCSIIO32_EEDPFLAGS_CHKRM_OP (0x0003)
+#define MPI_SCSIIO32_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI_SCSIIO32_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI_SCSIIO32_EEDPFLAGS_CHKREGEN_OP (0x0007)
+
+#define MPI_SCSIIO32_EEDPFLAGS_PASS_REF_TAG (0x0008)
+#define MPI_SCSIIO32_EEDPFLAGS_8_9THS_MODE (0x0010)
+
+#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_MASK (0x0700)
+#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_GUARD (0x0100)
+#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_REFTAG (0x0200)
+#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_LBATAG (0x0400)
+#define MPI_SCSIIO32_EEDPFLAGS_T10_CHK_SHIFT (8)
+
+#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI_SCSIIO32_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI_SCSIIO32_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+
+
+/* SCSIIO32 IO reply structure */
+typedef struct _MSG_SCSIIO32_IO_REPLY
+{
+ U8 Port; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 CDBLength; /* 04h */
+ U8 SenseBufferLength; /* 05h */
+ U8 Flags; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 SCSIStatus; /* 0Ch */
+ U8 SCSIState; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 TransferCount; /* 14h */
+ U32 SenseCount; /* 18h */
+ U32 ResponseInfo; /* 1Ch */
+ U16 TaskTag; /* 20h */
+ U16 Reserved2; /* 22h */
+ U32 BidirectionalTransferCount; /* 24h */
+} MSG_SCSIIO32_IO_REPLY, MPI_POINTER PTR_MSG_SCSIIO32_IO_REPLY,
+ SCSIIO32Reply_t, MPI_POINTER pSCSIIO32Reply_t;
+
+
+/****************************************************************************/
+/* SCSI Task Management messages */
+/****************************************************************************/
+
+typedef struct _MSG_SCSI_TASK_MGMT
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved; /* 04h */
+ U8 TaskType; /* 05h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 LUN[8]; /* 0Ch */
+ U32 Reserved2[7]; /* 14h */
+ U32 TaskMsgContext; /* 30h */
+} MSG_SCSI_TASK_MGMT, MPI_POINTER PTR_SCSI_TASK_MGMT,
+ SCSITaskMgmt_t, MPI_POINTER pSCSITaskMgmt_t;
+
+/* TaskType values */
+
+#define MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS (0x04)
+#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+
+/* MsgFlags bits */
+#define MPI_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
+#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION (0x00)
+#define MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION (0x02)
+#define MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION (0x04)
+
+#define MPI_SCSITASKMGMT_MSGFLAGS_SOFT_RESET_OPTION (0x08)
+
+/* SCSI Task Management Reply */
+typedef struct _MSG_SCSI_TASK_MGMT_REPLY
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 ResponseCode; /* 04h */
+ U8 TaskType; /* 05h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Reserved2[2]; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 TerminationCount; /* 14h */
+} MSG_SCSI_TASK_MGMT_REPLY, MPI_POINTER PTR_MSG_SCSI_TASK_MGMT_REPLY,
+ SCSITaskMgmtReply_t, MPI_POINTER pSCSITaskMgmtReply_t;
+
+/* ResponseCode values */
+#define MPI_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
+
+/****************************************************************************/
+/* SCSI Enclosure Processor messages */
+/****************************************************************************/
+
+typedef struct _MSG_SEP_REQUEST
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Action; /* 04h */
+ U8 Flags; /* 05h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 SlotStatus; /* 0Ch */
+ U32 Reserved2; /* 10h */
+ U32 Reserved3; /* 14h */
+ U32 Reserved4; /* 18h */
+ U16 Slot; /* 1Ch */
+ U16 EnclosureHandle; /* 1Eh */
+} MSG_SEP_REQUEST, MPI_POINTER PTR_MSG_SEP_REQUEST,
+ SEPRequest_t, MPI_POINTER pSEPRequest_t;
+
+/* Action defines */
+#define MPI_SEP_REQ_ACTION_WRITE_STATUS (0x00)
+#define MPI_SEP_REQ_ACTION_READ_STATUS (0x01)
+
+/* Flags defines */
+#define MPI_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
+#define MPI_SEP_REQ_FLAGS_BUS_TARGETID_ADDRESS (0x00)
+
+/* SlotStatus bits for MSG_SEP_REQUEST */
+#define MPI_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
+#define MPI_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI_SEP_REQ_SLOTSTATUS_PARITY_CHECK (0x00000020)
+#define MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI_SEP_REQ_SLOTSTATUS_REQ_CONSISTENCY_CHECK (0x00001000)
+#define MPI_SEP_REQ_SLOTSTATUS_DISABLE (0x00002000)
+#define MPI_SEP_REQ_SLOTSTATUS_REQ_RESERVED_DEVICE (0x00004000)
+#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
+#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT (0x00080000)
+#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE (0x00400000)
+#define MPI_SEP_REQ_SLOTSTATUS_ACTIVE (0x00800000)
+#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
+#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000)
+#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF (0x10000000)
+#define MPI_SEP_REQ_SLOTSTATUS_SWAP_RESET (0x80000000)
+
+
+typedef struct _MSG_SEP_REPLY
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Action; /* 04h */
+ U8 Reserved1; /* 05h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 SlotStatus; /* 14h */
+ U32 Reserved4; /* 18h */
+ U16 Slot; /* 1Ch */
+ U16 EnclosureHandle; /* 1Eh */
+} MSG_SEP_REPLY, MPI_POINTER PTR_MSG_SEP_REPLY,
+ SEPReply_t, MPI_POINTER pSEPReply_t;
+
+/* SlotStatus bits for MSG_SEP_REPLY */
+#define MPI_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
+#define MPI_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI_SEP_REPLY_SLOTSTATUS_PARITY_CHECK (0x00000020)
+#define MPI_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI_SEP_REPLY_SLOTSTATUS_CONSISTENCY_CHECK (0x00001000)
+#define MPI_SEP_REPLY_SLOTSTATUS_DISABLE (0x00002000)
+#define MPI_SEP_REPLY_SLOTSTATUS_RESERVED_DEVICE (0x00004000)
+#define MPI_SEP_REPLY_SLOTSTATUS_REPORT (0x00010000)
+#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
+#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY (0x00080000)
+#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE (0x00400000)
+#define MPI_SEP_REPLY_SLOTSTATUS_ACTIVE (0x00800000)
+#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED (0x01000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED (0x02000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS (0x04000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_A_ENABLE_BYPASS (0x08000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_DEV_OFF (0x10000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_FAULT_SENSED (0x40000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_SWAPPED (0x80000000)
+
+#endif
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
new file mode 100644
index 00000000..fd622288
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -0,0 +1,1207 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_ioc.h
+ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
+ * Creation Date: August 11, 2000
+ *
+ * mpi_ioc.h Version: 01.05.16
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added _MSG_IOC_INIT_REPLY structure.
+ * 06-06-00 01.00.01 Added CurReplyFrameSize field to _MSG_IOC_FACTS_REPLY.
+ * 06-12-00 01.00.02 Added _MSG_PORT_ENABLE_REPLY structure.
+ * Added _MSG_EVENT_ACK_REPLY structure.
+ * Added _MSG_FW_DOWNLOAD_REPLY structure.
+ * Added _MSG_TOOLBOX_REPLY structure.
+ * 06-30-00 01.00.03 Added MaxLanBuckets to _PORT_FACT_REPLY structure.
+ * 07-27-00 01.00.04 Added _EVENT_DATA structure definitions for _SCSI,
+ * _LINK_STATUS, _LOOP_STATE and _LOGOUT.
+ * 08-11-00 01.00.05 Switched positions of MsgLength and Function fields in
+ * _MSG_EVENT_ACK_REPLY structure to match specification.
+ * 11-02-00 01.01.01 Original release for post 1.0 work.
+ * Added a value for Manufacturer to WhoInit.
+ * 12-04-00 01.01.02 Modified IOCFacts reply, added FWUpload messages, and
+ * removed toolbox message.
+ * 01-09-01 01.01.03 Added event enabled and disabled defines.
+ * Added structures for FwHeader and DataHeader.
+ * Added ImageType to FwUpload reply.
+ * 02-20-01 01.01.04 Started using MPI_POINTER.
+ * 02-27-01 01.01.05 Added event for RAID status change and its event data.
+ * Added IocNumber field to MSG_IOC_FACTS_REPLY.
+ * 03-27-01 01.01.06 Added defines for ProductId field of MPI_FW_HEADER.
+ * Added structure offset comments.
+ * 04-09-01 01.01.07 Added structure EVENT_DATA_EVENT_CHANGE.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * New format for FWVersion and ProductId in
+ * MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
+ * 08-31-01 01.02.02 Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
+ * related structure and defines.
+ * Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
+ * Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
+ * Replaced a reserved field in MSG_IOC_FACTS_REPLY with
+ * IOCExceptions and changed DataImageSize to reserved.
+ * Added MPI_FW_DOWNLOAD_ITYPE_NVSTORE_DATA and
+ * MPI_FW_UPLOAD_ITYPE_NVDATA.
+ * 09-28-01 01.02.03 Modified Event Data for Integrated RAID.
+ * 11-01-01 01.02.04 Added defines for MPI_EXT_IMAGE_HEADER ImageType field.
+ * 03-14-02 01.02.05 Added HeaderVersion field to MSG_IOC_FACTS_REPLY.
+ * 05-31-02 01.02.06 Added define for
+ * MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID.
+ * Added AliasIndex to EVENT_DATA_LOGOUT structure.
+ * 04-01-03 01.02.07 Added defines for MPI_FW_HEADER_SIGNATURE_.
+ * 06-26-03 01.02.08 Added new values to the product family defines.
+ * 04-29-04 01.02.09 Added IOCCapabilities field to MSG_IOC_FACTS_REPLY and
+ * added related defines.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Added four new fields to MSG_IOC_INIT.
+ * Added three new fields to MSG_IOC_FACTS_REPLY.
+ * Defined four new bits for the IOCCapabilities field of
+ * the IOCFacts reply.
+ * Added two new PortTypes for the PortFacts reply.
+ * Added six new events along with their EventData
+ * structures.
+ * Added a new MsgFlag to the FwDownload request to
+ * indicate last segment.
+ * Defined a new image type of boot loader.
+ * Added FW family codes for SAS product families.
+ * 10-05-04 01.05.02 Added ReplyFifoHostSignalingAddr field to
+ * MSG_IOC_FACTS_REPLY.
+ * 12-07-04 01.05.03 Added more defines for SAS Discovery Error event.
+ * 12-09-04 01.05.04 Added Unsupported device to SAS Device event.
+ * 01-15-05 01.05.05 Added event data for SAS SES Event.
+ * 02-09-05 01.05.06 Added MPI_FW_UPLOAD_ITYPE_FW_BACKUP define.
+ * 02-22-05 01.05.07 Added Host Page Buffer Persistent flag to IOC Facts
+ * Reply and IOC Init Request.
+ * 03-11-05 01.05.08 Added family code for 1068E family.
+ * Removed IOCFacts Reply EEDP Capability bit.
+ * 06-24-05 01.05.09 Added 5 new IOCFacts Reply IOCCapabilities bits.
+ * Added Max SATA Targets to SAS Discovery Error event.
+ * 08-30-05 01.05.10 Added 4 new events and their event data structures.
+ * Added new ReasonCode value for SAS Device Status Change
+ * event.
+ * Added new family code for FC949E.
+ * 03-27-06 01.05.11 Added MPI_IOCFACTS_CAPABILITY_TLR.
+ * Added additional Reason Codes and more event data fields
+ * to EVENT_DATA_SAS_DEVICE_STATUS_CHANGE.
+ * Added EVENT_DATA_SAS_BROADCAST_PRIMITIVE structure and
+ * new event.
+ * Added MPI_EVENT_SAS_SMP_ERROR and event data structure.
+ * Added MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE and event
+ * data structure.
+ * Added MPI_EVENT_SAS_INIT_TABLE_OVERFLOW and event
+ * data structure.
+ * Added MPI_EXT_IMAGE_TYPE_INITIALIZATION.
+ * 10-11-06 01.05.12 Added MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED.
+ * Added MaxInitiators field to PortFacts reply.
+ * Added SAS Device Status Change ReasonCode for
+ * asynchronous notificaiton.
+ * Added MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE and event
+ * data structure.
+ * Added new ImageType values for FWDownload and FWUpload
+ * requests.
+ * 02-28-07 01.05.13 Added MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT for SAS
+ * Broadcast Event Data (replacing _RESERVED2).
+ * For Discovery Error Event Data DiscoveryStatus field,
+ * replaced _MULTPL_PATHS with _UNSUPPORTED_DEVICE and
+ * added _MULTI_PORT_DOMAIN.
+ * 05-24-07 01.05.14 Added Common Boot Block type to FWDownload Request.
+ * Added Common Boot Block type to FWUpload Request.
+ * 08-07-07 01.05.15 Added MPI_EVENT_SAS_INIT_RC_REMOVED define.
+ * Added MPI_EVENT_IR2_RC_DUAL_PORT_ADDED and
+ * MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED for IR2 event data.
+ * Added SASAddress field to SAS Initiator Device Table
+ * Overflow event data structure.
+ * 03-28-08 01.05.16 Added two new ReasonCode values to SAS Device Status
+ * Change Event data to indicate completion of internally
+ * generated task management.
+ * Added MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE define.
+ * Added MPI_EVENT_SAS_INIT_RC_INACCESSIBLE define.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_IOC_H
+#define MPI_IOC_H
+
+
+/*****************************************************************************
+*
+* I O C M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* IOCInit message */
+/****************************************************************************/
+
+typedef struct _MSG_IOC_INIT
+{
+ U8 WhoInit; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Flags; /* 04h */
+ U8 MaxDevices; /* 05h */
+ U8 MaxBuses; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 ReplyFrameSize; /* 0Ch */
+ U8 Reserved1[2]; /* 0Eh */
+ U32 HostMfaHighAddr; /* 10h */
+ U32 SenseBufferHighAddr; /* 14h */
+ U32 ReplyFifoHostSignalingAddr; /* 18h */
+ SGE_SIMPLE_UNION HostPageBufferSGE; /* 1Ch */
+ U16 MsgVersion; /* 28h */
+ U16 HeaderVersion; /* 2Ah */
+} MSG_IOC_INIT, MPI_POINTER PTR_MSG_IOC_INIT,
+ IOCInit_t, MPI_POINTER pIOCInit_t;
+
+/* WhoInit values */
+#define MPI_WHOINIT_NO_ONE (0x00)
+#define MPI_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI_WHOINIT_ROM_BIOS (0x02)
+#define MPI_WHOINIT_PCI_PEER (0x03)
+#define MPI_WHOINIT_HOST_DRIVER (0x04)
+#define MPI_WHOINIT_MANUFACTURER (0x05)
+
+/* Flags values */
+#define MPI_IOCINIT_FLAGS_HOST_PAGE_BUFFER_PERSISTENT (0x04)
+#define MPI_IOCINIT_FLAGS_REPLY_FIFO_HOST_SIGNAL (0x02)
+#define MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE (0x01)
+
+/* MsgVersion */
+#define MPI_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
+
+/* HeaderVersion */
+#define MPI_IOCINIT_HEADERVERSION_UNIT_MASK (0xFF00)
+#define MPI_IOCINIT_HEADERVERSION_UNIT_SHIFT (8)
+#define MPI_IOCINIT_HEADERVERSION_DEV_MASK (0x00FF)
+#define MPI_IOCINIT_HEADERVERSION_DEV_SHIFT (0)
+
+
+typedef struct _MSG_IOC_INIT_REPLY
+{
+ U8 WhoInit; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Flags; /* 04h */
+ U8 MaxDevices; /* 05h */
+ U8 MaxBuses; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_IOC_INIT_REPLY, MPI_POINTER PTR_MSG_IOC_INIT_REPLY,
+ IOCInitReply_t, MPI_POINTER pIOCInitReply_t;
+
+
+
+/****************************************************************************/
+/* IOC Facts message */
+/****************************************************************************/
+
+typedef struct _MSG_IOC_FACTS
+{
+ U8 Reserved[2]; /* 00h */
+ U8 ChainOffset; /* 01h */
+ U8 Function; /* 02h */
+ U8 Reserved1[3]; /* 03h */
+ U8 MsgFlags; /* 04h */
+ U32 MsgContext; /* 08h */
+} MSG_IOC_FACTS, MPI_POINTER PTR_IOC_FACTS,
+ IOCFacts_t, MPI_POINTER pIOCFacts_t;
+
+typedef struct _MPI_FW_VERSION_STRUCT
+{
+ U8 Dev; /* 00h */
+ U8 Unit; /* 01h */
+ U8 Minor; /* 02h */
+ U8 Major; /* 03h */
+} MPI_FW_VERSION_STRUCT;
+
+typedef union _MPI_FW_VERSION
+{
+ MPI_FW_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI_FW_VERSION;
+
+/* IOC Facts Reply */
+typedef struct _MSG_IOC_FACTS_REPLY
+{
+ U16 MsgVersion; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 HeaderVersion; /* 04h */
+ U8 IOCNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 IOCExceptions; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U8 MaxChainDepth; /* 14h */
+ U8 WhoInit; /* 15h */
+ U8 BlockSize; /* 16h */
+ U8 Flags; /* 17h */
+ U16 ReplyQueueDepth; /* 18h */
+ U16 RequestFrameSize; /* 1Ah */
+ U16 Reserved_0101_FWVersion; /* 1Ch */ /* obsolete 16-bit FWVersion */
+ U16 ProductID; /* 1Eh */
+ U32 CurrentHostMfaHighAddr; /* 20h */
+ U16 GlobalCredits; /* 24h */
+ U8 NumberOfPorts; /* 26h */
+ U8 EventState; /* 27h */
+ U32 CurrentSenseBufferHighAddr; /* 28h */
+ U16 CurReplyFrameSize; /* 2Ch */
+ U8 MaxDevices; /* 2Eh */
+ U8 MaxBuses; /* 2Fh */
+ U32 FWImageSize; /* 30h */
+ U32 IOCCapabilities; /* 34h */
+ MPI_FW_VERSION FWVersion; /* 38h */
+ U16 HighPriorityQueueDepth; /* 3Ch */
+ U16 Reserved2; /* 3Eh */
+ SGE_SIMPLE_UNION HostPageBufferSGE; /* 40h */
+ U32 ReplyFifoHostSignalingAddr; /* 4Ch */
+} MSG_IOC_FACTS_REPLY, MPI_POINTER PTR_MSG_IOC_FACTS_REPLY,
+ IOCFactsReply_t, MPI_POINTER pIOCFactsReply_t;
+
+#define MPI_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
+
+#define MPI_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
+#define MPI_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
+
+#define MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
+#define MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
+#define MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
+#define MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL (0x0008)
+#define MPI_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
+
+#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT (0x01)
+#define MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL (0x02)
+#define MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT (0x04)
+
+#define MPI_IOCFACTS_EVENTSTATE_DISABLED (0x00)
+#define MPI_IOCFACTS_EVENTSTATE_ENABLED (0x01)
+
+#define MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q (0x00000001)
+#define MPI_IOCFACTS_CAPABILITY_REPLY_HOST_SIGNAL (0x00000002)
+#define MPI_IOCFACTS_CAPABILITY_QUEUE_FULL_HANDLING (0x00000004)
+#define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
+#define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
+#define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
+#define MPI_IOCFACTS_CAPABILITY_EEDP (0x00000040)
+#define MPI_IOCFACTS_CAPABILITY_BIDIRECTIONAL (0x00000080)
+#define MPI_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
+#define MPI_IOCFACTS_CAPABILITY_SCSIIO32 (0x00000200)
+#define MPI_IOCFACTS_CAPABILITY_NO_SCSIIO16 (0x00000400)
+#define MPI_IOCFACTS_CAPABILITY_TLR (0x00000800)
+
+
+/*****************************************************************************
+*
+* P o r t M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Port Facts message and Reply */
+/****************************************************************************/
+
+typedef struct _MSG_PORT_FACTS
+{
+ U8 Reserved[2]; /* 00h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[2]; /* 04h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+} MSG_PORT_FACTS, MPI_POINTER PTR_MSG_PORT_FACTS,
+ PortFacts_t, MPI_POINTER pPortFacts_t;
+
+typedef struct _MSG_PORT_FACTS_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U8 Reserved3; /* 14h */
+ U8 PortType; /* 15h */
+ U16 MaxDevices; /* 16h */
+ U16 PortSCSIID; /* 18h */
+ U16 ProtocolFlags; /* 1Ah */
+ U16 MaxPostedCmdBuffers; /* 1Ch */
+ U16 MaxPersistentIDs; /* 1Eh */
+ U16 MaxLanBuckets; /* 20h */
+ U8 MaxInitiators; /* 22h */
+ U8 Reserved4; /* 23h */
+ U32 Reserved5; /* 24h */
+} MSG_PORT_FACTS_REPLY, MPI_POINTER PTR_MSG_PORT_FACTS_REPLY,
+ PortFactsReply_t, MPI_POINTER pPortFactsReply_t;
+
+
+/* PortTypes values */
+
+#define MPI_PORTFACTS_PORTTYPE_INACTIVE (0x00)
+#define MPI_PORTFACTS_PORTTYPE_SCSI (0x01)
+#define MPI_PORTFACTS_PORTTYPE_FC (0x10)
+#define MPI_PORTFACTS_PORTTYPE_ISCSI (0x20)
+#define MPI_PORTFACTS_PORTTYPE_SAS (0x30)
+
+/* ProtocolFlags values */
+
+#define MPI_PORTFACTS_PROTOCOL_LOGBUSADDR (0x01)
+#define MPI_PORTFACTS_PROTOCOL_LAN (0x02)
+#define MPI_PORTFACTS_PROTOCOL_TARGET (0x04)
+#define MPI_PORTFACTS_PROTOCOL_INITIATOR (0x08)
+
+
+/****************************************************************************/
+/* Port Enable Message */
+/****************************************************************************/
+
+typedef struct _MSG_PORT_ENABLE
+{
+ U8 Reserved[2]; /* 00h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[2]; /* 04h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+} MSG_PORT_ENABLE, MPI_POINTER PTR_MSG_PORT_ENABLE,
+ PortEnable_t, MPI_POINTER pPortEnable_t;
+
+typedef struct _MSG_PORT_ENABLE_REPLY
+{
+ U8 Reserved[2]; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[2]; /* 04h */
+ U8 PortNumber; /* 05h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_PORT_ENABLE_REPLY, MPI_POINTER PTR_MSG_PORT_ENABLE_REPLY,
+ PortEnableReply_t, MPI_POINTER pPortEnableReply_t;
+
+
+/*****************************************************************************
+*
+* E v e n t M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Event Notification messages */
+/****************************************************************************/
+
+typedef struct _MSG_EVENT_NOTIFY
+{
+ U8 Switch; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[3]; /* 04h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+} MSG_EVENT_NOTIFY, MPI_POINTER PTR_MSG_EVENT_NOTIFY,
+ EventNotification_t, MPI_POINTER pEventNotification_t;
+
+/* Event Notification Reply */
+
+typedef struct _MSG_EVENT_NOTIFY_REPLY
+{
+ U16 EventDataLength; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[2]; /* 04h */
+ U8 AckRequired; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Reserved2[2]; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 Event; /* 14h */
+ U32 EventContext; /* 18h */
+ U32 Data[1]; /* 1Ch */
+} MSG_EVENT_NOTIFY_REPLY, MPI_POINTER PTR_MSG_EVENT_NOTIFY_REPLY,
+ EventNotificationReply_t, MPI_POINTER pEventNotificationReply_t;
+
+/* Event Acknowledge */
+
+typedef struct _MSG_EVENT_ACK
+{
+ U8 Reserved[2]; /* 00h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[3]; /* 04h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Event; /* 0Ch */
+ U32 EventContext; /* 10h */
+} MSG_EVENT_ACK, MPI_POINTER PTR_MSG_EVENT_ACK,
+ EventAck_t, MPI_POINTER pEventAck_t;
+
+typedef struct _MSG_EVENT_ACK_REPLY
+{
+ U8 Reserved[2]; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[3]; /* 04h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_EVENT_ACK_REPLY, MPI_POINTER PTR_MSG_EVENT_ACK_REPLY,
+ EventAckReply_t, MPI_POINTER pEventAckReply_t;
+
+/* Switch */
+
+#define MPI_EVENT_NOTIFICATION_SWITCH_OFF (0x00)
+#define MPI_EVENT_NOTIFICATION_SWITCH_ON (0x01)
+
+/* Event */
+
+#define MPI_EVENT_NONE (0x00000000)
+#define MPI_EVENT_LOG_DATA (0x00000001)
+#define MPI_EVENT_STATE_CHANGE (0x00000002)
+#define MPI_EVENT_UNIT_ATTENTION (0x00000003)
+#define MPI_EVENT_IOC_BUS_RESET (0x00000004)
+#define MPI_EVENT_EXT_BUS_RESET (0x00000005)
+#define MPI_EVENT_RESCAN (0x00000006)
+#define MPI_EVENT_LINK_STATUS_CHANGE (0x00000007)
+#define MPI_EVENT_LOOP_STATE_CHANGE (0x00000008)
+#define MPI_EVENT_LOGOUT (0x00000009)
+#define MPI_EVENT_EVENT_CHANGE (0x0000000A)
+#define MPI_EVENT_INTEGRATED_RAID (0x0000000B)
+#define MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE (0x0000000C)
+#define MPI_EVENT_ON_BUS_TIMER_EXPIRED (0x0000000D)
+#define MPI_EVENT_QUEUE_FULL (0x0000000E)
+#define MPI_EVENT_SAS_DEVICE_STATUS_CHANGE (0x0000000F)
+#define MPI_EVENT_SAS_SES (0x00000010)
+#define MPI_EVENT_PERSISTENT_TABLE_FULL (0x00000011)
+#define MPI_EVENT_SAS_PHY_LINK_STATUS (0x00000012)
+#define MPI_EVENT_SAS_DISCOVERY_ERROR (0x00000013)
+#define MPI_EVENT_IR_RESYNC_UPDATE (0x00000014)
+#define MPI_EVENT_IR2 (0x00000015)
+#define MPI_EVENT_SAS_DISCOVERY (0x00000016)
+#define MPI_EVENT_SAS_BROADCAST_PRIMITIVE (0x00000017)
+#define MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x00000018)
+#define MPI_EVENT_SAS_INIT_TABLE_OVERFLOW (0x00000019)
+#define MPI_EVENT_SAS_SMP_ERROR (0x0000001A)
+#define MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE (0x0000001B)
+#define MPI_EVENT_LOG_ENTRY_ADDED (0x00000021)
+
+/* AckRequired field values */
+
+#define MPI_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
+
+/* EventChange Event data */
+
+typedef struct _EVENT_DATA_EVENT_CHANGE
+{
+ U8 EventState; /* 00h */
+ U8 Reserved; /* 01h */
+ U16 Reserved1; /* 02h */
+} EVENT_DATA_EVENT_CHANGE, MPI_POINTER PTR_EVENT_DATA_EVENT_CHANGE,
+ EventDataEventChange_t, MPI_POINTER pEventDataEventChange_t;
+
+/* LogEntryAdded Event data */
+
+/* this structure matches MPI_LOG_0_ENTRY in mpi_cnfg.h */
+#define MPI_EVENT_DATA_LOG_ENTRY_DATA_LENGTH (0x1C)
+typedef struct _EVENT_DATA_LOG_ENTRY
+{
+ U32 TimeStamp; /* 00h */
+ U32 Reserved1; /* 04h */
+ U16 LogSequence; /* 08h */
+ U16 LogEntryQualifier; /* 0Ah */
+ U8 LogData[MPI_EVENT_DATA_LOG_ENTRY_DATA_LENGTH]; /* 0Ch */
+} EVENT_DATA_LOG_ENTRY, MPI_POINTER PTR_EVENT_DATA_LOG_ENTRY,
+ MpiEventDataLogEntry_t, MPI_POINTER pMpiEventDataLogEntry_t;
+
+typedef struct _EVENT_DATA_LOG_ENTRY_ADDED
+{
+ U16 LogSequence; /* 00h */
+ U16 Reserved1; /* 02h */
+ U32 Reserved2; /* 04h */
+ EVENT_DATA_LOG_ENTRY LogEntry; /* 08h */
+} EVENT_DATA_LOG_ENTRY_ADDED, MPI_POINTER PTR_EVENT_DATA_LOG_ENTRY_ADDED,
+ MpiEventDataLogEntryAdded_t, MPI_POINTER pMpiEventDataLogEntryAdded_t;
+
+/* SCSI Event data for Port, Bus and Device forms */
+
+typedef struct _EVENT_DATA_SCSI
+{
+ U8 TargetID; /* 00h */
+ U8 BusPort; /* 01h */
+ U16 Reserved; /* 02h */
+} EVENT_DATA_SCSI, MPI_POINTER PTR_EVENT_DATA_SCSI,
+ EventDataScsi_t, MPI_POINTER pEventDataScsi_t;
+
+/* SCSI Device Status Change Event data */
+
+typedef struct _EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 ReasonCode; /* 02h */
+ U8 LUN; /* 03h */
+ U8 ASC; /* 04h */
+ U8 ASCQ; /* 05h */
+ U16 Reserved; /* 06h */
+} EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE,
+ MPI_POINTER PTR_EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE,
+ MpiEventDataScsiDeviceStatusChange_t,
+ MPI_POINTER pMpiEventDataScsiDeviceStatusChange_t;
+
+/* MPI SCSI Device Status Change Event data ReasonCode values */
+#define MPI_EVENT_SCSI_DEV_STAT_RC_ADDED (0x03)
+#define MPI_EVENT_SCSI_DEV_STAT_RC_NOT_RESPONDING (0x04)
+#define MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA (0x05)
+
+/* SAS Device Status Change Event data */
+
+typedef struct _EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 ReasonCode; /* 02h */
+ U8 Reserved; /* 03h */
+ U8 ASC; /* 04h */
+ U8 ASCQ; /* 05h */
+ U16 DevHandle; /* 06h */
+ U32 DeviceInfo; /* 08h */
+ U16 ParentDevHandle; /* 0Ch */
+ U8 PhyNum; /* 0Eh */
+ U8 Reserved1; /* 0Fh */
+ U64 SASAddress; /* 10h */
+ U8 LUN[8]; /* 18h */
+ U16 TaskTag; /* 20h */
+ U16 Reserved2; /* 22h */
+} EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ MPI_POINTER PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ MpiEventDataSasDeviceStatusChange_t,
+ MPI_POINTER pMpiEventDataSasDeviceStatusChange_t;
+
+/* MPI SAS Device Status Change Event data ReasonCode values */
+#define MPI_EVENT_SAS_DEV_STAT_RC_ADDED (0x03)
+#define MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING (0x04)
+#define MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
+#define MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED (0x06)
+#define MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET (0x0E)
+#define MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL (0x0F)
+
+
+/* SCSI Event data for Queue Full event */
+
+typedef struct _EVENT_DATA_QUEUE_FULL
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U16 CurrentDepth; /* 02h */
+} EVENT_DATA_QUEUE_FULL, MPI_POINTER PTR_EVENT_DATA_QUEUE_FULL,
+ EventDataQueueFull_t, MPI_POINTER pEventDataQueueFull_t;
+
+/* MPI Integrated RAID Event data */
+
+typedef struct _EVENT_DATA_RAID
+{
+ U8 VolumeID; /* 00h */
+ U8 VolumeBus; /* 01h */
+ U8 ReasonCode; /* 02h */
+ U8 PhysDiskNum; /* 03h */
+ U8 ASC; /* 04h */
+ U8 ASCQ; /* 05h */
+ U16 Reserved; /* 06h */
+ U32 SettingsStatus; /* 08h */
+} EVENT_DATA_RAID, MPI_POINTER PTR_EVENT_DATA_RAID,
+ MpiEventDataRaid_t, MPI_POINTER pMpiEventDataRaid_t;
+
+/* MPI Integrated RAID Event data ReasonCode values */
+#define MPI_EVENT_RAID_RC_VOLUME_CREATED (0x00)
+#define MPI_EVENT_RAID_RC_VOLUME_DELETED (0x01)
+#define MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED (0x02)
+#define MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED (0x03)
+#define MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED (0x04)
+#define MPI_EVENT_RAID_RC_PHYSDISK_CREATED (0x05)
+#define MPI_EVENT_RAID_RC_PHYSDISK_DELETED (0x06)
+#define MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED (0x07)
+#define MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED (0x08)
+#define MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED (0x09)
+#define MPI_EVENT_RAID_RC_SMART_DATA (0x0A)
+#define MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED (0x0B)
+
+
+/* MPI Integrated RAID Resync Update Event data */
+
+typedef struct _MPI_EVENT_DATA_IR_RESYNC_UPDATE
+{
+ U8 VolumeID; /* 00h */
+ U8 VolumeBus; /* 01h */
+ U8 ResyncComplete; /* 02h */
+ U8 Reserved1; /* 03h */
+ U32 Reserved2; /* 04h */
+} MPI_EVENT_DATA_IR_RESYNC_UPDATE,
+ MPI_POINTER PTR_MPI_EVENT_DATA_IR_RESYNC_UPDATE,
+ MpiEventDataIrResyncUpdate_t, MPI_POINTER pMpiEventDataIrResyncUpdate_t;
+
+/* MPI IR2 Event data */
+
+/* MPI_LD_STATE or MPI_PD_STATE */
+typedef struct _IR2_STATE_CHANGED
+{
+ U16 PreviousState; /* 00h */
+ U16 NewState; /* 02h */
+} IR2_STATE_CHANGED, MPI_POINTER PTR_IR2_STATE_CHANGED;
+
+typedef struct _IR2_PD_INFO
+{
+ U16 DeviceHandle; /* 00h */
+ U8 TruncEnclosureHandle; /* 02h */
+ U8 TruncatedSlot; /* 03h */
+} IR2_PD_INFO, MPI_POINTER PTR_IR2_PD_INFO;
+
+typedef union _MPI_IR2_RC_EVENT_DATA
+{
+ IR2_STATE_CHANGED StateChanged;
+ U32 Lba;
+ IR2_PD_INFO PdInfo;
+} MPI_IR2_RC_EVENT_DATA, MPI_POINTER PTR_MPI_IR2_RC_EVENT_DATA;
+
+typedef struct _MPI_EVENT_DATA_IR2
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 ReasonCode; /* 02h */
+ U8 PhysDiskNum; /* 03h */
+ MPI_IR2_RC_EVENT_DATA IR2EventData; /* 04h */
+} MPI_EVENT_DATA_IR2, MPI_POINTER PTR_MPI_EVENT_DATA_IR2,
+ MpiEventDataIR2_t, MPI_POINTER pMpiEventDataIR2_t;
+
+/* MPI IR2 Event data ReasonCode values */
+#define MPI_EVENT_IR2_RC_LD_STATE_CHANGED (0x01)
+#define MPI_EVENT_IR2_RC_PD_STATE_CHANGED (0x02)
+#define MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL (0x03)
+#define MPI_EVENT_IR2_RC_PD_INSERTED (0x04)
+#define MPI_EVENT_IR2_RC_PD_REMOVED (0x05)
+#define MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED (0x06)
+#define MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR (0x07)
+#define MPI_EVENT_IR2_RC_DUAL_PORT_ADDED (0x08)
+#define MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED (0x09)
+
+/* defines for logical disk states */
+#define MPI_LD_STATE_OPTIMAL (0x00)
+#define MPI_LD_STATE_DEGRADED (0x01)
+#define MPI_LD_STATE_FAILED (0x02)
+#define MPI_LD_STATE_MISSING (0x03)
+#define MPI_LD_STATE_OFFLINE (0x04)
+
+/* defines for physical disk states */
+#define MPI_PD_STATE_ONLINE (0x00)
+#define MPI_PD_STATE_MISSING (0x01)
+#define MPI_PD_STATE_NOT_COMPATIBLE (0x02)
+#define MPI_PD_STATE_FAILED (0x03)
+#define MPI_PD_STATE_INITIALIZING (0x04)
+#define MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST (0x05)
+#define MPI_PD_STATE_FAILED_AT_HOST_REQUEST (0x06)
+#define MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON (0xFF)
+
+/* MPI Link Status Change Event data */
+
+typedef struct _EVENT_DATA_LINK_STATUS
+{
+ U8 State; /* 00h */
+ U8 Reserved; /* 01h */
+ U16 Reserved1; /* 02h */
+ U8 Reserved2; /* 04h */
+ U8 Port; /* 05h */
+ U16 Reserved3; /* 06h */
+} EVENT_DATA_LINK_STATUS, MPI_POINTER PTR_EVENT_DATA_LINK_STATUS,
+ EventDataLinkStatus_t, MPI_POINTER pEventDataLinkStatus_t;
+
+#define MPI_EVENT_LINK_STATUS_FAILURE (0x00000000)
+#define MPI_EVENT_LINK_STATUS_ACTIVE (0x00000001)
+
+/* MPI Loop State Change Event data */
+
+typedef struct _EVENT_DATA_LOOP_STATE
+{
+ U8 Character4; /* 00h */
+ U8 Character3; /* 01h */
+ U8 Type; /* 02h */
+ U8 Reserved; /* 03h */
+ U8 Reserved1; /* 04h */
+ U8 Port; /* 05h */
+ U16 Reserved2; /* 06h */
+} EVENT_DATA_LOOP_STATE, MPI_POINTER PTR_EVENT_DATA_LOOP_STATE,
+ EventDataLoopState_t, MPI_POINTER pEventDataLoopState_t;
+
+#define MPI_EVENT_LOOP_STATE_CHANGE_LIP (0x0001)
+#define MPI_EVENT_LOOP_STATE_CHANGE_LPE (0x0002)
+#define MPI_EVENT_LOOP_STATE_CHANGE_LPB (0x0003)
+
+/* MPI LOGOUT Event data */
+
+typedef struct _EVENT_DATA_LOGOUT
+{
+ U32 NPortID; /* 00h */
+ U8 AliasIndex; /* 04h */
+ U8 Port; /* 05h */
+ U16 Reserved1; /* 06h */
+} EVENT_DATA_LOGOUT, MPI_POINTER PTR_EVENT_DATA_LOGOUT,
+ EventDataLogout_t, MPI_POINTER pEventDataLogout_t;
+
+#define MPI_EVENT_LOGOUT_ALL_ALIASES (0xFF)
+
+/* SAS SES Event data */
+
+typedef struct _EVENT_DATA_SAS_SES
+{
+ U8 PhyNum; /* 00h */
+ U8 Port; /* 01h */
+ U8 PortWidth; /* 02h */
+ U8 Reserved1; /* 04h */
+} EVENT_DATA_SAS_SES, MPI_POINTER PTR_EVENT_DATA_SAS_SES,
+ MpiEventDataSasSes_t, MPI_POINTER pMpiEventDataSasSes_t;
+
+/* SAS Broadcast Primitive Event data */
+
+typedef struct _EVENT_DATA_SAS_BROADCAST_PRIMITIVE
+{
+ U8 PhyNum; /* 00h */
+ U8 Port; /* 01h */
+ U8 PortWidth; /* 02h */
+ U8 Primitive; /* 04h */
+} EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ MPI_POINTER PTR_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ MpiEventDataSasBroadcastPrimitive_t,
+ MPI_POINTER pMpiEventDataSasBroadcastPrimitive_t;
+
+#define MPI_EVENT_PRIMITIVE_CHANGE (0x01)
+#define MPI_EVENT_PRIMITIVE_EXPANDER (0x03)
+#define MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI_EVENT_PRIMITIVE_RESERVED3 (0x05)
+#define MPI_EVENT_PRIMITIVE_RESERVED4 (0x06)
+#define MPI_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+
+/* SAS Phy Link Status Event data */
+
+typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS
+{
+ U8 PhyNum; /* 00h */
+ U8 LinkRates; /* 01h */
+ U16 DevHandle; /* 02h */
+ U64 SASAddress; /* 04h */
+} EVENT_DATA_SAS_PHY_LINK_STATUS, MPI_POINTER PTR_EVENT_DATA_SAS_PHY_LINK_STATUS,
+ MpiEventDataSasPhyLinkStatus_t, MPI_POINTER pMpiEventDataSasPhyLinkStatus_t;
+
+/* defines for the LinkRates field of the SAS PHY Link Status event */
+#define MPI_EVENT_SAS_PLS_LR_CURRENT_MASK (0xF0)
+#define MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT (4)
+#define MPI_EVENT_SAS_PLS_LR_PREVIOUS_MASK (0x0F)
+#define MPI_EVENT_SAS_PLS_LR_PREVIOUS_SHIFT (0)
+#define MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN (0x00)
+#define MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED (0x01)
+#define MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION (0x02)
+#define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08)
+#define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09)
+#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A)
+
+/* SAS Discovery Event data */
+
+typedef struct _EVENT_DATA_SAS_DISCOVERY
+{
+ U32 DiscoveryStatus; /* 00h */
+ U32 Reserved1; /* 04h */
+} EVENT_DATA_SAS_DISCOVERY, MPI_POINTER PTR_EVENT_DATA_SAS_DISCOVERY,
+ EventDataSasDiscovery_t, MPI_POINTER pEventDataSasDiscovery_t;
+
+#define MPI_EVENT_SAS_DSCVRY_COMPLETE (0x00000000)
+#define MPI_EVENT_SAS_DSCVRY_IN_PROGRESS (0x00000001)
+#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_MASK (0xFFFF0000)
+#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_SHIFT (16)
+
+/* SAS Discovery Errror Event data */
+
+typedef struct _EVENT_DATA_DISCOVERY_ERROR
+{
+ U32 DiscoveryStatus; /* 00h */
+ U8 Port; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 06h */
+} EVENT_DATA_DISCOVERY_ERROR, MPI_POINTER PTR_EVENT_DATA_DISCOVERY_ERROR,
+ EventDataDiscoveryError_t, MPI_POINTER pEventDataDiscoveryError_t;
+
+#define MPI_EVENT_DSCVRY_ERR_DS_LOOP_DETECTED (0x00000001)
+#define MPI_EVENT_DSCVRY_ERR_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI_EVENT_DSCVRY_ERR_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI_EVENT_DSCVRY_ERR_DS_EXPANDER_ERR (0x00000008)
+#define MPI_EVENT_DSCVRY_ERR_DS_SMP_TIMEOUT (0x00000010)
+#define MPI_EVENT_DSCVRY_ERR_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI_EVENT_DSCVRY_ERR_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI_EVENT_DSCVRY_ERR_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI_EVENT_DSCVRY_ERR_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI_EVENT_DSCVRY_ERR_DS_MULTPL_SUBTRACTIVE (0x00000200)
+#define MPI_EVENT_DSCVRY_ERR_DS_TABLE_TO_TABLE (0x00000400)
+#define MPI_EVENT_DSCVRY_ERR_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI_EVENT_DSCVRY_ERR_DS_MAX_SATA_TARGETS (0x00001000)
+#define MPI_EVENT_DSCVRY_ERR_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI_EVENT_DSCVRY_ERR_DS_SATA_INIT_FAILURE (0x00004000)
+
+/* SAS SMP Error Event data */
+
+typedef struct _EVENT_DATA_SAS_SMP_ERROR
+{
+ U8 Status; /* 00h */
+ U8 Port; /* 01h */
+ U8 SMPFunctionResult; /* 02h */
+ U8 Reserved1; /* 03h */
+ U64 SASAddress; /* 04h */
+} EVENT_DATA_SAS_SMP_ERROR, MPI_POINTER PTR_EVENT_DATA_SAS_SMP_ERROR,
+ MpiEventDataSasSmpError_t, MPI_POINTER pMpiEventDataSasSmpError_t;
+
+/* defines for the Status field of the SAS SMP Error event */
+#define MPI_EVENT_SAS_SMP_FUNCTION_RESULT_VALID (0x00)
+#define MPI_EVENT_SAS_SMP_CRC_ERROR (0x01)
+#define MPI_EVENT_SAS_SMP_TIMEOUT (0x02)
+#define MPI_EVENT_SAS_SMP_NO_DESTINATION (0x03)
+#define MPI_EVENT_SAS_SMP_BAD_DESTINATION (0x04)
+
+/* SAS Initiator Device Status Change Event data */
+
+typedef struct _EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE
+{
+ U8 ReasonCode; /* 00h */
+ U8 Port; /* 01h */
+ U16 DevHandle; /* 02h */
+ U64 SASAddress; /* 04h */
+} EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ MPI_POINTER PTR_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ MpiEventDataSasInitDevStatusChange_t,
+ MPI_POINTER pMpiEventDataSasInitDevStatusChange_t;
+
+/* defines for the ReasonCode field of the SAS Initiator Device Status Change event */
+#define MPI_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI_EVENT_SAS_INIT_RC_REMOVED (0x02)
+#define MPI_EVENT_SAS_INIT_RC_INACCESSIBLE (0x03)
+
+/* SAS Initiator Device Table Overflow Event data */
+
+typedef struct _EVENT_DATA_SAS_INIT_TABLE_OVERFLOW
+{
+ U8 MaxInit; /* 00h */
+ U8 CurrentInit; /* 01h */
+ U16 Reserved1; /* 02h */
+ U64 SASAddress; /* 04h */
+} EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ MPI_POINTER PTR_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ MpiEventDataSasInitTableOverflow_t,
+ MPI_POINTER pMpiEventDataSasInitTableOverflow_t;
+
+/* SAS Expander Status Change Event data */
+
+typedef struct _EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE
+{
+ U8 ReasonCode; /* 00h */
+ U8 Reserved1; /* 01h */
+ U16 Reserved2; /* 02h */
+ U8 PhysicalPort; /* 04h */
+ U8 Reserved3; /* 05h */
+ U16 EnclosureHandle; /* 06h */
+ U64 SASAddress; /* 08h */
+ U32 DiscoveryStatus; /* 10h */
+ U16 DevHandle; /* 14h */
+ U16 ParentDevHandle; /* 16h */
+ U16 ExpanderChangeCount; /* 18h */
+ U16 ExpanderRouteIndexes; /* 1Ah */
+ U8 NumPhys; /* 1Ch */
+ U8 SASLevel; /* 1Dh */
+ U8 Flags; /* 1Eh */
+ U8 Reserved4; /* 1Fh */
+} EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE,
+ MPI_POINTER PTR_EVENT_DATA_SAS_EXPANDER_STATUS_CHANGE,
+ MpiEventDataSasExpanderStatusChange_t,
+ MPI_POINTER pMpiEventDataSasExpanderStatusChange_t;
+
+/* values for ReasonCode field of SAS Expander Status Change Event data */
+#define MPI_EVENT_SAS_EXP_RC_ADDED (0x00)
+#define MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING (0x01)
+
+/* values for DiscoveryStatus field of SAS Expander Status Change Event data */
+#define MPI_EVENT_SAS_EXP_DS_LOOP_DETECTED (0x00000001)
+#define MPI_EVENT_SAS_EXP_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI_EVENT_SAS_EXP_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI_EVENT_SAS_EXP_DS_EXPANDER_ERR (0x00000008)
+#define MPI_EVENT_SAS_EXP_DS_SMP_TIMEOUT (0x00000010)
+#define MPI_EVENT_SAS_EXP_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI_EVENT_SAS_EXP_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI_EVENT_SAS_EXP_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI_EVENT_SAS_EXP_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI_EVENT_SAS_EXP_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI_EVENT_SAS_EXP_DS_TABLE_LINK (0x00000400)
+#define MPI_EVENT_SAS_EXP_DS_UNSUPPORTED_DEVICE (0x00000800)
+
+/* values for Flags field of SAS Expander Status Change Event data */
+#define MPI_EVENT_SAS_EXP_FLAGS_ROUTE_TABLE_CONFIG (0x02)
+#define MPI_EVENT_SAS_EXP_FLAGS_CONFIG_IN_PROGRESS (0x01)
+
+
+
+/*****************************************************************************
+*
+* F i r m w a r e L o a d M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Firmware Download message and associated structures */
+/****************************************************************************/
+
+typedef struct _MSG_FW_DOWNLOAD
+{
+ U8 ImageType; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[3]; /* 04h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ SGE_MPI_UNION SGL; /* 0Ch */
+} MSG_FW_DOWNLOAD, MPI_POINTER PTR_MSG_FW_DOWNLOAD,
+ FWDownload_t, MPI_POINTER pFWDownload_t;
+
+#define MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
+
+#define MPI_FW_DOWNLOAD_ITYPE_RESERVED (0x00)
+#define MPI_FW_DOWNLOAD_ITYPE_FW (0x01)
+#define MPI_FW_DOWNLOAD_ITYPE_BIOS (0x02)
+#define MPI_FW_DOWNLOAD_ITYPE_NVDATA (0x03)
+#define MPI_FW_DOWNLOAD_ITYPE_BOOTLOADER (0x04)
+#define MPI_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+
+typedef struct _FWDownloadTCSGE
+{
+ U8 Reserved; /* 00h */
+ U8 ContextSize; /* 01h */
+ U8 DetailsLength; /* 02h */
+ U8 Flags; /* 03h */
+ U32 Reserved_0100_Checksum; /* 04h */ /* obsolete Checksum */
+ U32 ImageOffset; /* 08h */
+ U32 ImageSize; /* 0Ch */
+} FW_DOWNLOAD_TCSGE, MPI_POINTER PTR_FW_DOWNLOAD_TCSGE,
+ FWDownloadTCSGE_t, MPI_POINTER pFWDownloadTCSGE_t;
+
+/* Firmware Download reply */
+typedef struct _MSG_FW_DOWNLOAD_REPLY
+{
+ U8 ImageType; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[3]; /* 04h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_FW_DOWNLOAD_REPLY, MPI_POINTER PTR_MSG_FW_DOWNLOAD_REPLY,
+ FWDownloadReply_t, MPI_POINTER pFWDownloadReply_t;
+
+
+/****************************************************************************/
+/* Firmware Upload message and associated structures */
+/****************************************************************************/
+
+typedef struct _MSG_FW_UPLOAD
+{
+ U8 ImageType; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[3]; /* 04h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ SGE_MPI_UNION SGL; /* 0Ch */
+} MSG_FW_UPLOAD, MPI_POINTER PTR_MSG_FW_UPLOAD,
+ FWUpload_t, MPI_POINTER pFWUpload_t;
+
+#define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM (0x00)
+#define MPI_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI_FW_UPLOAD_ITYPE_NVDATA (0x03)
+#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER (0x04)
+#define MPI_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+typedef struct _FWUploadTCSGE
+{
+ U8 Reserved; /* 00h */
+ U8 ContextSize; /* 01h */
+ U8 DetailsLength; /* 02h */
+ U8 Flags; /* 03h */
+ U32 Reserved1; /* 04h */
+ U32 ImageOffset; /* 08h */
+ U32 ImageSize; /* 0Ch */
+} FW_UPLOAD_TCSGE, MPI_POINTER PTR_FW_UPLOAD_TCSGE,
+ FWUploadTCSGE_t, MPI_POINTER pFWUploadTCSGE_t;
+
+/* Firmware Upload reply */
+typedef struct _MSG_FW_UPLOAD_REPLY
+{
+ U8 ImageType; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved1[3]; /* 04h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 ActualImageSize; /* 14h */
+} MSG_FW_UPLOAD_REPLY, MPI_POINTER PTR_MSG_FW_UPLOAD_REPLY,
+ FWUploadReply_t, MPI_POINTER pFWUploadReply_t;
+
+
+typedef struct _MPI_FW_HEADER
+{
+ U32 ArmBranchInstruction0; /* 00h */
+ U32 Signature0; /* 04h */
+ U32 Signature1; /* 08h */
+ U32 Signature2; /* 0Ch */
+ U32 ArmBranchInstruction1; /* 10h */
+ U32 ArmBranchInstruction2; /* 14h */
+ U32 Reserved; /* 18h */
+ U32 Checksum; /* 1Ch */
+ U16 VendorId; /* 20h */
+ U16 ProductId; /* 22h */
+ MPI_FW_VERSION FWVersion; /* 24h */
+ U32 SeqCodeVersion; /* 28h */
+ U32 ImageSize; /* 2Ch */
+ U32 NextImageHeaderOffset; /* 30h */
+ U32 LoadStartAddress; /* 34h */
+ U32 IopResetVectorValue; /* 38h */
+ U32 IopResetRegAddr; /* 3Ch */
+ U32 VersionNameWhat; /* 40h */
+ U8 VersionName[32]; /* 44h */
+ U32 VendorNameWhat; /* 64h */
+ U8 VendorName[32]; /* 68h */
+} MPI_FW_HEADER, MPI_POINTER PTR_MPI_FW_HEADER,
+ MpiFwHeader_t, MPI_POINTER pMpiFwHeader_t;
+
+#define MPI_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+/* defines for using the ProductId field */
+#define MPI_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI_FW_HEADER_PID_TYPE_SCSI (0x0000)
+#define MPI_FW_HEADER_PID_TYPE_FC (0x1000)
+#define MPI_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI_FW_HEADER_SIGNATURE_0 (0x5AEAA55A)
+#define MPI_FW_HEADER_SIGNATURE_1 (0xA55AEAA5)
+#define MPI_FW_HEADER_SIGNATURE_2 (0x5AA55AEA)
+
+#define MPI_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI (0x0100)
+#define MPI_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI_FW_HEADER_PID_PROD_TARGET_SCSI (0x0300)
+#define MPI_FW_HEADER_PID_PROD_IM_SCSI (0x0400)
+#define MPI_FW_HEADER_PID_PROD_IS_SCSI (0x0500)
+#define MPI_FW_HEADER_PID_PROD_CTX_SCSI (0x0600)
+#define MPI_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+#define MPI_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/* SCSI */
+#define MPI_FW_HEADER_PID_FAMILY_1030A0_SCSI (0x0001)
+#define MPI_FW_HEADER_PID_FAMILY_1030B0_SCSI (0x0002)
+#define MPI_FW_HEADER_PID_FAMILY_1030B1_SCSI (0x0003)
+#define MPI_FW_HEADER_PID_FAMILY_1030C0_SCSI (0x0004)
+#define MPI_FW_HEADER_PID_FAMILY_1020A0_SCSI (0x0005)
+#define MPI_FW_HEADER_PID_FAMILY_1020B0_SCSI (0x0006)
+#define MPI_FW_HEADER_PID_FAMILY_1020B1_SCSI (0x0007)
+#define MPI_FW_HEADER_PID_FAMILY_1020C0_SCSI (0x0008)
+#define MPI_FW_HEADER_PID_FAMILY_1035A0_SCSI (0x0009)
+#define MPI_FW_HEADER_PID_FAMILY_1035B0_SCSI (0x000A)
+#define MPI_FW_HEADER_PID_FAMILY_1030TA0_SCSI (0x000B)
+#define MPI_FW_HEADER_PID_FAMILY_1020TA0_SCSI (0x000C)
+/* Fibre Channel */
+#define MPI_FW_HEADER_PID_FAMILY_909_FC (0x0000)
+#define MPI_FW_HEADER_PID_FAMILY_919_FC (0x0001) /* 919 and 929 */
+#define MPI_FW_HEADER_PID_FAMILY_919X_FC (0x0002) /* 919X and 929X */
+#define MPI_FW_HEADER_PID_FAMILY_919XL_FC (0x0003) /* 919XL and 929XL */
+#define MPI_FW_HEADER_PID_FAMILY_939X_FC (0x0004) /* 939X and 949X */
+#define MPI_FW_HEADER_PID_FAMILY_959_FC (0x0005)
+#define MPI_FW_HEADER_PID_FAMILY_949E_FC (0x0006)
+/* SAS */
+#define MPI_FW_HEADER_PID_FAMILY_1064_SAS (0x0001)
+#define MPI_FW_HEADER_PID_FAMILY_1068_SAS (0x0002)
+#define MPI_FW_HEADER_PID_FAMILY_1078_SAS (0x0003)
+#define MPI_FW_HEADER_PID_FAMILY_106xE_SAS (0x0004) /* 1068E, 1066E, and 1064E */
+
+typedef struct _MPI_EXT_IMAGE_HEADER
+{
+ U8 ImageType; /* 00h */
+ U8 Reserved; /* 01h */
+ U16 Reserved1; /* 02h */
+ U32 Checksum; /* 04h */
+ U32 ImageSize; /* 08h */
+ U32 NextImageHeaderOffset; /* 0Ch */
+ U32 LoadStartAddress; /* 10h */
+ U32 Reserved2; /* 14h */
+} MPI_EXT_IMAGE_HEADER, MPI_POINTER PTR_MPI_EXT_IMAGE_HEADER,
+ MpiExtImageHeader_t, MPI_POINTER pMpiExtImageHeader_t;
+
+/* defines for the ImageType field */
+#define MPI_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+
+#endif
diff --git a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h
new file mode 100644
index 00000000..f41fcb69
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_lan.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_lan.h
+ * Title: MPI LAN messages and structures
+ * Creation Date: June 30, 2000
+ *
+ * mpi_lan.h Version: 01.05.01
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 05-24-00 00.10.02 Added LANStatus field to _MSG_LAN_SEND_REPLY.
+ * Added LANStatus field to _MSG_LAN_RECEIVE_POST_REPLY.
+ * Moved ListCount field in _MSG_LAN_RECEIVE_POST_REPLY.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-12-00 01.00.02 Added MPI_ to BUCKETSTATUS_ definitions.
+ * 06-22-00 01.00.03 Major changes to match new LAN definition in 1.0 spec.
+ * 06-30-00 01.00.04 Added Context Reply definitions per revised proposal.
+ * Changed transaction context usage to bucket/buffer.
+ * 07-05-00 01.00.05 Removed LAN_RECEIVE_POST_BUCKET_CONTEXT_MASK definition
+ * to lan private header file
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 02-20-01 01.01.02 Started using MPI_POINTER.
+ * 03-27-01 01.01.03 Added structure offset comments.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_LAN_H
+#define MPI_LAN_H
+
+
+/******************************************************************************
+*
+* L A N M e s s a g e s
+*
+*******************************************************************************/
+
+/* LANSend messages */
+
+typedef struct _MSG_LAN_SEND_REQUEST
+{
+ U16 Reserved; /* 00h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ SGE_MPI_UNION SG_List[1]; /* 0Ch */
+} MSG_LAN_SEND_REQUEST, MPI_POINTER PTR_MSG_LAN_SEND_REQUEST,
+ LANSendRequest_t, MPI_POINTER pLANSendRequest_t;
+
+
+typedef struct _MSG_LAN_SEND_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved2; /* 04h */
+ U8 NumberOfContexts; /* 05h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 BufferContext; /* 14h */
+} MSG_LAN_SEND_REPLY, MPI_POINTER PTR_MSG_LAN_SEND_REPLY,
+ LANSendReply_t, MPI_POINTER pLANSendReply_t;
+
+
+/* LANReceivePost */
+
+typedef struct _MSG_LAN_RECEIVE_POST_REQUEST
+{
+ U16 Reserved; /* 00h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 BucketCount; /* 0Ch */
+ SGE_MPI_UNION SG_List[1]; /* 10h */
+} MSG_LAN_RECEIVE_POST_REQUEST, MPI_POINTER PTR_MSG_LAN_RECEIVE_POST_REQUEST,
+ LANReceivePostRequest_t, MPI_POINTER pLANReceivePostRequest_t;
+
+
+typedef struct _MSG_LAN_RECEIVE_POST_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 Reserved2; /* 04h */
+ U8 NumberOfContexts; /* 05h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 BucketsRemaining; /* 14h */
+ U32 PacketOffset; /* 18h */
+ U32 PacketLength; /* 1Ch */
+ U32 BucketContext[1]; /* 20h */
+} MSG_LAN_RECEIVE_POST_REPLY, MPI_POINTER PTR_MSG_LAN_RECEIVE_POST_REPLY,
+ LANReceivePostReply_t, MPI_POINTER pLANReceivePostReply_t;
+
+
+/* LANReset */
+
+typedef struct _MSG_LAN_RESET_REQUEST
+{
+ U16 Reserved; /* 00h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 PortNumber; /* 05h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+} MSG_LAN_RESET_REQUEST, MPI_POINTER PTR_MSG_LAN_RESET_REQUEST,
+ LANResetRequest_t, MPI_POINTER pLANResetRequest_t;
+
+
+typedef struct _MSG_LAN_RESET_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 PortNumber; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_LAN_RESET_REPLY, MPI_POINTER PTR_MSG_LAN_RESET_REPLY,
+ LANResetReply_t, MPI_POINTER pLANResetReply_t;
+
+
+/****************************************************************************/
+/* LAN Context Reply defines and macros */
+/****************************************************************************/
+
+#define LAN_REPLY_PACKET_LENGTH_MASK (0x0000FFFF)
+#define LAN_REPLY_PACKET_LENGTH_SHIFT (0)
+#define LAN_REPLY_BUCKET_CONTEXT_MASK (0x07FF0000)
+#define LAN_REPLY_BUCKET_CONTEXT_SHIFT (16)
+#define LAN_REPLY_BUFFER_CONTEXT_MASK (0x07FFFFFF)
+#define LAN_REPLY_BUFFER_CONTEXT_SHIFT (0)
+#define LAN_REPLY_FORM_MASK (0x18000000)
+#define LAN_REPLY_FORM_RECEIVE_SINGLE (0x00)
+#define LAN_REPLY_FORM_RECEIVE_MULTIPLE (0x01)
+#define LAN_REPLY_FORM_SEND_SINGLE (0x02)
+#define LAN_REPLY_FORM_MESSAGE_CONTEXT (0x03)
+#define LAN_REPLY_FORM_SHIFT (27)
+
+#define GET_LAN_PACKET_LENGTH(x) (((x) & LAN_REPLY_PACKET_LENGTH_MASK) \
+ >> LAN_REPLY_PACKET_LENGTH_SHIFT)
+
+#define SET_LAN_PACKET_LENGTH(x, lth) \
+ ((x) = ((x) & ~LAN_REPLY_PACKET_LENGTH_MASK) | \
+ (((lth) << LAN_REPLY_PACKET_LENGTH_SHIFT) & \
+ LAN_REPLY_PACKET_LENGTH_MASK))
+
+#define GET_LAN_BUCKET_CONTEXT(x) (((x) & LAN_REPLY_BUCKET_CONTEXT_MASK) \
+ >> LAN_REPLY_BUCKET_CONTEXT_SHIFT)
+
+#define SET_LAN_BUCKET_CONTEXT(x, ctx) \
+ ((x) = ((x) & ~LAN_REPLY_BUCKET_CONTEXT_MASK) | \
+ (((ctx) << LAN_REPLY_BUCKET_CONTEXT_SHIFT) & \
+ LAN_REPLY_BUCKET_CONTEXT_MASK))
+
+#define GET_LAN_BUFFER_CONTEXT(x) (((x) & LAN_REPLY_BUFFER_CONTEXT_MASK) \
+ >> LAN_REPLY_BUFFER_CONTEXT_SHIFT)
+
+#define SET_LAN_BUFFER_CONTEXT(x, ctx) \
+ ((x) = ((x) & ~LAN_REPLY_BUFFER_CONTEXT_MASK) | \
+ (((ctx) << LAN_REPLY_BUFFER_CONTEXT_SHIFT) & \
+ LAN_REPLY_BUFFER_CONTEXT_MASK))
+
+#define GET_LAN_FORM(x) (((x) & LAN_REPLY_FORM_MASK) \
+ >> LAN_REPLY_FORM_SHIFT)
+
+#define SET_LAN_FORM(x, frm) \
+ ((x) = ((x) & ~LAN_REPLY_FORM_MASK) | \
+ (((frm) << LAN_REPLY_FORM_SHIFT) & \
+ LAN_REPLY_FORM_MASK))
+
+
+/****************************************************************************/
+/* LAN Current Device State defines */
+/****************************************************************************/
+
+#define MPI_LAN_DEVICE_STATE_RESET (0x00)
+#define MPI_LAN_DEVICE_STATE_OPERATIONAL (0x01)
+
+
+/****************************************************************************/
+/* LAN Loopback defines */
+/****************************************************************************/
+
+#define MPI_LAN_TX_MODES_ENABLE_LOOPBACK_SUPPRESSION (0x01)
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_log_fc.h b/drivers/message/fusion/lsi/mpi_log_fc.h
new file mode 100644
index 00000000..03be8b21
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_log_fc.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation. All rights reserved.
+ *
+ * NAME: fc_log.h
+ * SUMMARY: MPI IocLogInfo definitions for the SYMFC9xx chips
+ * DESCRIPTION: Contains the enumerated list of values that may be returned
+ * in the IOCLogInfo field of a MPI Default Reply Message.
+ *
+ * CREATION DATE: 6/02/2000
+ * ID: $Id: fc_log.h,v 4.6 2001/07/26 14:41:33 sschremm Exp $
+ */
+
+
+/*
+ * MpiIocLogInfo_t enum
+ *
+ * These 32 bit values are used in the IOCLogInfo field of the MPI reply
+ * messages.
+ * The value is 0xabcccccc where
+ * a = The type of log info as per the MPI spec. Since these codes are
+ * all for Fibre Channel this value will always be 2.
+ * b = Specifies a subclass of the firmware where
+ * 0 = FCP Initiator
+ * 1 = FCP Target
+ * 2 = LAN
+ * 3 = MPI Message Layer
+ * 4 = FC Link
+ * 5 = Context Manager
+ * 6 = Invalid Field Offset
+ * 7 = State Change Info
+ * all others are reserved for future use
+ * c = A specific value within the subclass.
+ *
+ * NOTE: Any new values should be added to the end of each subclass so that the
+ * codes remain consistent across firmware releases.
+ */
+typedef enum _MpiIocLogInfoFc
+{
+ MPI_IOCLOGINFO_FC_INIT_BASE = 0x20000000,
+ MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME = 0x20000001, /* received an out of order frame - unsupported */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_START_OF_FRAME = 0x20000002, /* Bad Rx Frame, bad start of frame primitive */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_END_OF_FRAME = 0x20000003, /* Bad Rx Frame, bad end of frame primitive */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_OVER_RUN = 0x20000004, /* Bad Rx Frame, overrun */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OTHER = 0x20000005, /* Other errors caught by IOC which require retries */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_SUBPROC_DEAD = 0x20000006, /* Main processor could not initialize sub-processor */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OVERRUN = 0x20000007, /* Scatter Gather overrun */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_RX_BAD_STATUS = 0x20000008, /* Receiver detected context mismatch via invalid header */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_RX_UNEXPECTED_FRAME= 0x20000009, /* CtxMgr detected unsupported frame type */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_LINK_FAILURE = 0x2000000A, /* Link failure occurred */
+ MPI_IOCLOGINFO_FC_INIT_ERROR_TX_TIMEOUT = 0x2000000B, /* Transmitter timeout error */
+
+ MPI_IOCLOGINFO_FC_TARGET_BASE = 0x21000000,
+ MPI_IOCLOGINFO_FC_TARGET_NO_PDISC = 0x21000001, /* not sent because we are waiting for a PDISC from the initiator */
+ MPI_IOCLOGINFO_FC_TARGET_NO_LOGIN = 0x21000002, /* not sent because we are not logged in to the remote node */
+ MPI_IOCLOGINFO_FC_TARGET_DOAR_KILLED_BY_LIP = 0x21000003, /* Data Out, Auto Response, not sent due to a LIP */
+ MPI_IOCLOGINFO_FC_TARGET_DIAR_KILLED_BY_LIP = 0x21000004, /* Data In, Auto Response, not sent due to a LIP */
+ MPI_IOCLOGINFO_FC_TARGET_DIAR_MISSING_DATA = 0x21000005, /* Data In, Auto Response, missing data frames */
+ MPI_IOCLOGINFO_FC_TARGET_DONR_KILLED_BY_LIP = 0x21000006, /* Data Out, No Response, not sent due to a LIP */
+ MPI_IOCLOGINFO_FC_TARGET_WRSP_KILLED_BY_LIP = 0x21000007, /* Auto-response after a write not sent due to a LIP */
+ MPI_IOCLOGINFO_FC_TARGET_DINR_KILLED_BY_LIP = 0x21000008, /* Data In, No Response, not completed due to a LIP */
+ MPI_IOCLOGINFO_FC_TARGET_DINR_MISSING_DATA = 0x21000009, /* Data In, No Response, missing data frames */
+ MPI_IOCLOGINFO_FC_TARGET_MRSP_KILLED_BY_LIP = 0x2100000a, /* Manual Response not sent due to a LIP */
+ MPI_IOCLOGINFO_FC_TARGET_NO_CLASS_3 = 0x2100000b, /* not sent because remote node does not support Class 3 */
+ MPI_IOCLOGINFO_FC_TARGET_LOGIN_NOT_VALID = 0x2100000c, /* not sent because login to remote node not validated */
+ MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND = 0x2100000e, /* cleared from the outbound queue after a logout */
+ MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN = 0x2100000f, /* cleared waiting for data after a logout */
+
+ MPI_IOCLOGINFO_FC_LAN_BASE = 0x22000000,
+ MPI_IOCLOGINFO_FC_LAN_TRANS_SGL_MISSING = 0x22000001, /* Transaction Context Sgl Missing */
+ MPI_IOCLOGINFO_FC_LAN_TRANS_WRONG_PLACE = 0x22000002, /* Transaction Context found before an EOB */
+ MPI_IOCLOGINFO_FC_LAN_TRANS_RES_BITS_SET = 0x22000003, /* Transaction Context value has reserved bits set */
+ MPI_IOCLOGINFO_FC_LAN_WRONG_SGL_FLAG = 0x22000004, /* Invalid SGL Flags */
+
+ MPI_IOCLOGINFO_FC_MSG_BASE = 0x23000000,
+
+ MPI_IOCLOGINFO_FC_LINK_BASE = 0x24000000,
+ MPI_IOCLOGINFO_FC_LINK_LOOP_INIT_TIMEOUT = 0x24000001, /* Loop initialization timed out */
+ MPI_IOCLOGINFO_FC_LINK_ALREADY_INITIALIZED = 0x24000002, /* Another system controller already initialized the loop */
+ MPI_IOCLOGINFO_FC_LINK_LINK_NOT_ESTABLISHED = 0x24000003, /* Not synchronized to signal or still negotiating (possible cable problem) */
+ MPI_IOCLOGINFO_FC_LINK_CRC_ERROR = 0x24000004, /* CRC check detected error on received frame */
+
+ MPI_IOCLOGINFO_FC_CTX_BASE = 0x25000000,
+
+ MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET = 0x26000000, /* The lower 24 bits give the byte offset of the field in the request message that is invalid */
+ MPI_IOCLOGINFO_FC_INVALID_FIELD_MAX_OFFSET = 0x26ffffff,
+
+ MPI_IOCLOGINFO_FC_STATE_CHANGE = 0x27000000 /* The lower 24 bits give additional information concerning state change */
+
+} MpiIocLogInfoFc_t;
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
new file mode 100644
index 00000000..f62960b5
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -0,0 +1,322 @@
+/***************************************************************************
+ * *
+ * Copyright (c) 2000-2008 LSI Corporation. All rights reserved. *
+ * *
+ * Description *
+ * ------------ *
+ * This include file contains SAS firmware interface IOC Log Info codes *
+ * *
+ *-------------------------------------------------------------------------*
+ */
+
+#ifndef IOPI_IOCLOGINFO_H_INCLUDED
+#define IOPI_IOCLOGINFO_H_INCLUDED
+
+#define SAS_LOGINFO_NEXUS_LOSS 0x31170000
+#define SAS_LOGINFO_MASK 0xFFFF0000
+
+/****************************************************************************/
+/* IOC LOGINFO defines, 0x00000000 - 0x0FFFFFFF */
+/* Format: */
+/* Bits 31-28: MPI_IOCLOGINFO_TYPE_SAS (3) */
+/* Bits 27-24: IOC_LOGINFO_ORIGINATOR: 0=IOP, 1=PL, 2=IR */
+/* Bits 23-16: LOGINFO_CODE */
+/* Bits 15-0: LOGINFO_CODE Specific */
+/****************************************************************************/
+
+/****************************************************************************/
+/* IOC_LOGINFO_ORIGINATOR defines */
+/****************************************************************************/
+#define IOC_LOGINFO_ORIGINATOR_IOP (0x00000000)
+#define IOC_LOGINFO_ORIGINATOR_PL (0x01000000)
+#define IOC_LOGINFO_ORIGINATOR_IR (0x02000000)
+
+#define IOC_LOGINFO_ORIGINATOR_MASK (0x0F000000)
+
+/****************************************************************************/
+/* LOGINFO_CODE defines */
+/****************************************************************************/
+#define IOC_LOGINFO_CODE_MASK (0x00FF0000)
+#define IOC_LOGINFO_CODE_SHIFT (16)
+
+/****************************************************************************/
+/* IOP LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IOP */
+/****************************************************************************/
+#define IOP_LOGINFO_CODE_INVALID_SAS_ADDRESS (0x00010000)
+#define IOP_LOGINFO_CODE_UNUSED2 (0x00020000)
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x00030000)
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_RT (0x00030100) /* Route Table Entry not found */
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PN (0x00030200) /* Invalid Page Number */
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x00030300) /* Invalid FORM */
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x00030400) /* Invalid Page Type */
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DNM (0x00030500) /* Device Not Mapped */
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_PERSIST (0x00030600) /* Persistent Page not found */
+#define IOP_LOGINFO_CODE_CONFIG_INVALID_PAGE_DEFAULT (0x00030700) /* Default Page not found */
+
+#define IOP_LOGINFO_CODE_FWUPLOAD_NO_FLASH_AVAILABLE (0x0003E000) /* Tried to upload from flash, but there is none */
+#define IOP_LOGINFO_CODE_FWUPLOAD_UNKNOWN_IMAGE_TYPE (0x0003E001) /* ImageType field contents were invalid */
+#define IOP_LOGINFO_CODE_FWUPLOAD_WRONG_IMAGE_SIZE (0x0003E002) /* ImageSize field in TCSGE was bad/offset in MfgPg 4 was wrong */
+#define IOP_LOGINFO_CODE_FWUPLOAD_ENTIRE_FLASH_UPLOAD_FAILED (0x0003E003) /* Error occurred while attempting to upload the entire flash */
+#define IOP_LOGINFO_CODE_FWUPLOAD_REGION_UPLOAD_FAILED (0x0003E004) /* Error occurred while attempting to upload single flash region */
+#define IOP_LOGINFO_CODE_FWUPLOAD_DMA_FAILURE (0x0003E005) /* Problem occurred while DMAing FW to host memory */
+
+#define IOP_LOGINFO_CODE_DIAG_MSG_ERROR (0x00040000) /* Error handling diag msg - or'd with diag status */
+
+#define IOP_LOGINFO_CODE_TASK_TERMINATED (0x00050000)
+
+#define IOP_LOGINFO_CODE_ENCL_MGMT_READ_ACTION_ERR0R (0x00060001) /* Read Action not supported for SEP msg */
+#define IOP_LOGINFO_CODE_ENCL_MGMT_INVALID_BUS_ID_ERR0R (0x00060002) /* Invalid Bus/ID in SEP msg */
+
+#define IOP_LOGINFO_CODE_TARGET_ASSIST_TERMINATED (0x00070001)
+#define IOP_LOGINFO_CODE_TARGET_STATUS_SEND_TERMINATED (0x00070002)
+#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_ALL_IO (0x00070003)
+#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO (0x00070004)
+#define IOP_LOGINFO_CODE_TARGET_MODE_ABORT_EXACT_IO_REQ (0x00070005)
+
+#define IOP_LOGINFO_CODE_LOG_TIMESTAMP_EVENT (0x00080000)
+
+/****************************************************************************/
+/* PL LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = PL */
+/****************************************************************************/
+#define PL_LOGINFO_CODE_OPEN_FAILURE (0x00010000) /* see SUB_CODE_OPEN_FAIL_ below */
+
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_NO_DEST_TIME_OUT (0x00000001)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PATHWAY_BLOCKED (0x00000002)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_CONTINUE0 (0x00000003)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_CONTINUE1 (0x00000004)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_INITIALIZE0 (0x00000005)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_INITIALIZE1 (0x00000006)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_STOP0 (0x00000007)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RES_STOP1 (0x00000008)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RETRY (0x00000009)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_BREAK (0x0000000A)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0B (0x0000000B)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_OPEN_TIMEOUT_EXP (0x0000000C)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_UNUSED_0D (0x0000000D)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_DVTBLE_ACCSS_FAIL (0x0000000E)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_BAD_DEST (0x00000011)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RATE_NOT_SUPP (0x00000012)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PROT_NOT_SUPP (0x00000013)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON0 (0x00000014)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON1 (0x00000015)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON2 (0x00000016)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_RESERVED_ABANDON3 (0x00000017)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_STP_RESOURCES_BSY (0x00000018)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_WRONG_DESTINATION (0x00000019)
+
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_PATH_BLOCKED (0x0000001B) /* Retry Timeout */
+#define PL_LOGINFO_SUB_CODE_OPEN_FAIL_AWT_MAXED (0x0000001C) /* Retry Timeout */
+
+
+
+#define PL_LOGINFO_CODE_INVALID_SGL (0x00020000)
+#define PL_LOGINFO_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00030000)
+#define PL_LOGINFO_CODE_FRAME_XFER_ERROR (0x00040000)
+#define PL_LOGINFO_CODE_TX_FM_CONNECTED_LOW (0x00050000)
+#define PL_LOGINFO_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00060000)
+#define PL_LOGINFO_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00070000)
+#define PL_LOGINFO_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00080000)
+#define PL_LOGINFO_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00090000)
+#define PL_LOGINFO_CODE_RX_FM_INVALID_MESSAGE (0x000A0000)
+#define PL_LOGINFO_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x000B0000)
+#define PL_LOGINFO_CODE_RX_FM_CURRENT_FRAME_ERROR (0x000C0000)
+#define PL_LOGINFO_CODE_SATA_LINK_DOWN (0x000D0000)
+#define PL_LOGINFO_CODE_DISCOVERY_SATA_INIT_W_IOS (0x000E0000)
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE (0x000F0000)
+#define PL_LOGINFO_CODE_CONFIG_PL_NOT_INITIALIZED (0x000F0001) /* PL not yet initialized, can't do config page req. */
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PT (0x000F0100) /* Invalid Page Type */
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NUM_PHYS (0x000F0200) /* Invalid Number of Phys */
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NOT_IMP (0x000F0300) /* Case Not Handled */
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_DEV (0x000F0400) /* No Device Found */
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_FORM (0x000F0500) /* Invalid FORM */
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_PHY (0x000F0600) /* Invalid Phy */
+#define PL_LOGINFO_CODE_CONFIG_INVALID_PAGE_NO_OWNER (0x000F0700) /* No Owner Found */
+#define PL_LOGINFO_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00100000)
+#define PL_LOGINFO_CODE_RESET (0x00110000) /* See Sub-Codes below (PL_LOGINFO_SUB_CODE) */
+#define PL_LOGINFO_CODE_ABORT (0x00120000) /* See Sub-Codes below (PL_LOGINFO_SUB_CODE)*/
+#define PL_LOGINFO_CODE_IO_NOT_YET_EXECUTED (0x00130000)
+#define PL_LOGINFO_CODE_IO_EXECUTED (0x00140000)
+#define PL_LOGINFO_CODE_PERS_RESV_OUT_NOT_AFFIL_OWNER (0x00150000)
+#define PL_LOGINFO_CODE_OPEN_TXDMA_ABORT (0x00160000)
+#define PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY (0x00170000)
+#define PL_LOGINFO_CODE_IO_CANCELLED_DUE_TO_R_ERR (0x00180000)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE (0x00000100)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_NO_DEST_TIMEOUT (0x00000101)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_SATA_NEG_RATE_2HI (0x00000102)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_RATE_NOT_SUPPORTED (0x00000103)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_BREAK (0x00000104)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ZONE_VIOLATION (0x00000114)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON0 (0x00000114) /* Open Reject (Zone Violation) - available on SAS-2 devices */
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON1 (0x00000115)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON2 (0x00000116)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ABANDON3 (0x00000117)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_ORR_TIMEOUT (0x0000011A) /* Open Reject (Retry) Timeout */
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_PATH_BLOCKED (0x0000011B)
+#define PL_LOGINFO_SUB_CODE_OPEN_FAILURE_AWT_MAXED (0x0000011C) /* Arbitration Wait Timer Maxed */
+
+#define PL_LOGINFO_SUB_CODE_TARGET_BUS_RESET (0x00000120)
+#define PL_LOGINFO_SUB_CODE_TRANSPORT_LAYER (0x00000130) /* Leave lower nibble (1-f) reserved. */
+#define PL_LOGINFO_SUB_CODE_PORT_LAYER (0x00000140) /* Leave lower nibble (1-f) reserved. */
+
+
+#define PL_LOGINFO_SUB_CODE_INVALID_SGL (0x00000200)
+#define PL_LOGINFO_SUB_CODE_WRONG_REL_OFF_OR_FRAME_LENGTH (0x00000300)
+#define PL_LOGINFO_SUB_CODE_FRAME_XFER_ERROR (0x00000400)
+/* Bits 0-3 encode Transport Status Register (offset 0x08) */
+/* Bit 0 is Status Bit 0: FrameXferErr */
+/* Bit 1 & 2 are Status Bits 16 and 17: FrameXmitErrStatus */
+/* Bit 3 is Status Bit 18 WriteDataLenghtGTDataLengthErr */
+
+#define PL_LOGINFO_SUB_CODE_TX_FM_CONNECTED_LOW (0x00000500)
+#define PL_LOGINFO_SUB_CODE_SATA_NON_NCQ_RW_ERR_BIT_SET (0x00000600)
+#define PL_LOGINFO_SUB_CODE_SATA_READ_LOG_RECEIVE_DATA_ERR (0x00000700)
+#define PL_LOGINFO_SUB_CODE_SATA_NCQ_FAIL_ALL_CMDS_AFTR_ERR (0x00000800)
+#define PL_LOGINFO_SUB_CODE_SATA_ERR_IN_RCV_SET_DEV_BIT_FIS (0x00000900)
+#define PL_LOGINFO_SUB_CODE_RX_FM_INVALID_MESSAGE (0x00000A00)
+#define PL_LOGINFO_SUB_CODE_RX_CTX_MESSAGE_VALID_ERROR (0x00000B00)
+#define PL_LOGINFO_SUB_CODE_RX_FM_CURRENT_FRAME_ERROR (0x00000C00)
+#define PL_LOGINFO_SUB_CODE_SATA_LINK_DOWN (0x00000D00)
+#define PL_LOGINFO_SUB_CODE_DISCOVERY_SATA_INIT_W_IOS (0x00000E00)
+#define PL_LOGINFO_SUB_CODE_DISCOVERY_REMOTE_SEP_RESET (0x00000E01)
+#define PL_LOGINFO_SUB_CODE_SECOND_OPEN (0x00000F00)
+#define PL_LOGINFO_SUB_CODE_DSCVRY_SATA_INIT_TIMEOUT (0x00001000)
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_SATA_CONNECTION (0x00002000)
+/* not currently used in mainline */
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK (0x00003000)
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_STUCK_LINK_AIP (0x00004000)
+#define PL_LOGINFO_SUB_CODE_BREAK_ON_INCOMPLETE_BREAK_RCVD (0x00005000)
+
+#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_FAILURE (0x00200000) /* Can't get SMP Frame */
+#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_READ_ERROR (0x00200010) /* Error occurred on SMP Read */
+#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_WRITE_ERROR (0x00200020) /* Error occurred on SMP Write */
+#define PL_LOGINFO_CODE_ENCL_MGMT_NOT_SUPPORTED_ON_ENCL (0x00200040) /* Encl Mgmt services not available for this WWID */
+#define PL_LOGINFO_CODE_ENCL_MGMT_ADDR_MODE_NOT_SUPPORTED (0x00200050) /* Address Mode not suppored */
+#define PL_LOGINFO_CODE_ENCL_MGMT_BAD_SLOT_NUM (0x00200060) /* Invalid Slot Number in SEP Msg */
+#define PL_LOGINFO_CODE_ENCL_MGMT_SGPIO_NOT_PRESENT (0x00200070) /* SGPIO not present/enabled */
+#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_NOT_CONFIGURED (0x00200080) /* GPIO not configured */
+#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_FRAME_ERROR (0x00200090) /* GPIO can't allocate a frame */
+#define PL_LOGINFO_CODE_ENCL_MGMT_GPIO_CONFIG_PAGE_ERROR (0x002000A0) /* GPIO failed config page request */
+#define PL_LOGINFO_CODE_ENCL_MGMT_SES_FRAME_ALLOC_ERROR (0x002000B0) /* Can't get frame for SES command */
+#define PL_LOGINFO_CODE_ENCL_MGMT_SES_IO_ERROR (0x002000C0) /* I/O execution error */
+#define PL_LOGINFO_CODE_ENCL_MGMT_SES_RETRIES_EXHAUSTED (0x002000D0) /* SEP I/O retries exhausted */
+#define PL_LOGINFO_CODE_ENCL_MGMT_SMP_FRAME_ALLOC_ERROR (0x002000E0) /* Can't get frame for SMP command */
+
+#define PL_LOGINFO_DA_SEP_NOT_PRESENT (0x00200100) /* SEP not present when msg received */
+#define PL_LOGINFO_DA_SEP_SINGLE_THREAD_ERROR (0x00200101) /* Can only accept 1 msg at a time */
+#define PL_LOGINFO_DA_SEP_ISTWI_INTR_IN_IDLE_STATE (0x00200102) /* ISTWI interrupt recvd. while IDLE */
+#define PL_LOGINFO_DA_SEP_RECEIVED_NACK_FROM_SLAVE (0x00200103) /* SEP NACK'd, it is busy */
+#define PL_LOGINFO_DA_SEP_DID_NOT_RECEIVE_ACK (0x00200104) /* SEP didn't rcv. ACK (Last Rcvd Bit = 1) */
+#define PL_LOGINFO_DA_SEP_BAD_STATUS_HDR_CHKSUM (0x00200105) /* SEP stopped or sent bad chksum in Hdr */
+#define PL_LOGINFO_DA_SEP_STOP_ON_DATA (0x00200106) /* SEP stopped while transferring data */
+#define PL_LOGINFO_DA_SEP_STOP_ON_SENSE_DATA (0x00200107) /* SEP stopped while transferring sense data */
+#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_1 (0x00200108) /* SEP returned unknown scsi status */
+#define PL_LOGINFO_DA_SEP_UNSUPPORTED_SCSI_STATUS_2 (0x00200109) /* SEP returned unknown scsi status */
+#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP (0x0020010A) /* SEP returned bad chksum after STOP */
+#define PL_LOGINFO_DA_SEP_CHKSUM_ERROR_AFTER_STOP_GETDATA (0x0020010B) /* SEP returned bad chksum after STOP while gettin data*/
+#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND (0x0020010C) /* SEP doesn't support CDB opcode f/w location 1 */
+#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND_2 (0x0020010D) /* SEP doesn't support CDB opcode f/w location 2 */
+#define PL_LOGINFO_DA_SEP_UNSUPPORTED_COMMAND_3 (0x0020010E) /* SEP doesn't support CDB opcode f/w location 3 */
+
+
+/****************************************************************************/
+/* IR LOGINFO_CODE defines, valid if IOC_LOGINFO_ORIGINATOR = IR */
+/****************************************************************************/
+#define IR_LOGINFO_RAID_ACTION_ERROR (0x00010000)
+#define IR_LOGINFO_CODE_UNUSED2 (0x00020000)
+
+/* Amount of information passed down for Create Volume is too large */
+#define IR_LOGINFO_VOLUME_CREATE_INVALID_LENGTH (0x00010001)
+/* Creation of duplicate volume attempted (Bus/Target ID checked) */
+#define IR_LOGINFO_VOLUME_CREATE_DUPLICATE (0x00010002)
+/* Creation failed due to maximum number of supported volumes exceeded */
+#define IR_LOGINFO_VOLUME_CREATE_NO_SLOTS (0x00010003)
+/* Creation failed due to DMA error in trying to read from host */
+#define IR_LOGINFO_VOLUME_CREATE_DMA_ERROR (0x00010004)
+/* Creation failed due to invalid volume type passed down */
+#define IR_LOGINFO_VOLUME_CREATE_INVALID_VOLUME_TYPE (0x00010005)
+/* Creation failed due to error reading MFG Page 4 */
+#define IR_LOGINFO_VOLUME_MFG_PAGE4_ERROR (0x00010006)
+/* Creation failed when trying to create internal structures */
+#define IR_LOGINFO_VOLUME_INTERNAL_CONFIG_STRUCTURE_ERROR (0x00010007)
+
+/* Activation failed due to trying to activate an already active volume */
+#define IR_LOGINFO_VOLUME_ACTIVATING_AN_ACTIVE_VOLUME (0x00010010)
+/* Activation failed due to trying to active unsupported volume type */
+#define IR_LOGINFO_VOLUME_ACTIVATING_INVALID_VOLUME_TYPE (0x00010011)
+/* Activation failed due to trying to active too many volumes */
+#define IR_LOGINFO_VOLUME_ACTIVATING_TOO_MANY_VOLUMES (0x00010012)
+/* Activation failed due to Volume ID in use already */
+#define IR_LOGINFO_VOLUME_ACTIVATING_VOLUME_ID_IN_USE (0x00010013)
+/* Activation failed call to activateVolume returned failure */
+#define IR_LOGINFO_VOLUME_ACTIVATE_VOLUME_FAILED (0x00010014)
+/* Activation failed trying to import the volume */
+#define IR_LOGINFO_VOLUME_ACTIVATING_IMPORT_VOLUME_FAILED (0x00010015)
+/* Activation failed trying to import the volume */
+#define IR_LOGINFO_VOLUME_ACTIVATING_TOO_MANY_PHYS_DISKS (0x00010016)
+
+/* Phys Disk failed, too many phys disks */
+#define IR_LOGINFO_PHYSDISK_CREATE_TOO_MANY_DISKS (0x00010020)
+/* Amount of information passed down for Create Pnysdisk is too large */
+#define IR_LOGINFO_PHYSDISK_CREATE_INVALID_LENGTH (0x00010021)
+/* Creation failed due to DMA error in trying to read from host */
+#define IR_LOGINFO_PHYSDISK_CREATE_DMA_ERROR (0x00010022)
+/* Creation failed due to invalid Bus TargetID passed down */
+#define IR_LOGINFO_PHYSDISK_CREATE_BUS_TID_INVALID (0x00010023)
+/* Creation failed due to error in creating RAID Phys Disk Config Page */
+#define IR_LOGINFO_PHYSDISK_CREATE_CONFIG_PAGE_ERROR (0x00010024)
+
+
+/* Compatibility Error : IR Disabled */
+#define IR_LOGINFO_COMPAT_ERROR_RAID_DISABLED (0x00010030)
+/* Compatibility Error : Inquiry Command failed */
+#define IR_LOGINFO_COMPAT_ERROR_INQUIRY_FAILED (0x00010031)
+/* Compatibility Error : Device not direct access device */
+#define IR_LOGINFO_COMPAT_ERROR_NOT_DIRECT_ACCESS (0x00010032)
+/* Compatibility Error : Removable device found */
+#define IR_LOGINFO_COMPAT_ERROR_REMOVABLE_FOUND (0x00010033)
+/* Compatibility Error : Device SCSI Version not 2 or higher */
+#define IR_LOGINFO_COMPAT_ERROR_NEED_SCSI_2_OR_HIGHER (0x00010034)
+/* Compatibility Error : SATA device, 48 BIT LBA not supported */
+#define IR_LOGINFO_COMPAT_ERROR_SATA_48BIT_LBA_NOT_SUPPORTED (0x00010035)
+/* Compatibility Error : Device does not have 512 byte block sizes */
+#define IR_LOGINFO_COMPAT_ERROR_DEVICE_NOT_512_BYTE_BLOCK (0x00010036)
+/* Compatibility Error : Volume Type check failed */
+#define IR_LOGINFO_COMPAT_ERROR_VOLUME_TYPE_CHECK_FAILED (0x00010037)
+/* Compatibility Error : Volume Type is unsupported by FW */
+#define IR_LOGINFO_COMPAT_ERROR_UNSUPPORTED_VOLUME_TYPE (0x00010038)
+/* Compatibility Error : Disk drive too small for use in volume */
+#define IR_LOGINFO_COMPAT_ERROR_DISK_TOO_SMALL (0x00010039)
+/* Compatibility Error : Phys disk for Create Volume not found */
+#define IR_LOGINFO_COMPAT_ERROR_PHYS_DISK_NOT_FOUND (0x0001003A)
+/* Compatibility Error : membership count error, too many or too few disks for volume type */
+#define IR_LOGINFO_COMPAT_ERROR_MEMBERSHIP_COUNT (0x0001003B)
+/* Compatibility Error : Disk stripe sizes must be 64KB */
+#define IR_LOGINFO_COMPAT_ERROR_NON_64K_STRIPE_SIZE (0x0001003C)
+/* Compatibility Error : IME size limited to < 2TB */
+#define IR_LOGINFO_COMPAT_ERROR_IME_VOL_NOT_CURRENTLY_SUPPORTED (0x0001003D)
+
+/* Device Firmware Update: DFU can only be started once */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_DFU_IN_PROGRESS (0x00010050)
+/* Device Firmware Update: Volume must be Optimal/Active/non-Quiesced */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_DEVICE_IN_INVALID_STATE (0x00010051)
+/* Device Firmware Update: DFU Timeout cannot be zero */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_INVALID_TIMEOUT (0x00010052)
+/* Device Firmware Update: CREATE TIMER FAILED */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_NO_TIMERS (0x00010053)
+/* Device Firmware Update: Failed to read SAS_IO_UNIT_PG_1 */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_READING_CFG_PAGE (0x00010054)
+/* Device Firmware Update: Invalid SAS_IO_UNIT_PG_1 value(s) */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_PORT_IO_TIMEOUTS_REQUIRED (0x00010055)
+/* Device Firmware Update: Unable to allocate memory for page */
+#define IR_LOGINFO_DEV_FW_UPDATE_ERR_ALLOC_CFG_PAGE (0x00010056)
+
+
+/****************************************************************************/
+/* Defines for convenience */
+/****************************************************************************/
+#define IOC_LOGINFO_PREFIX_IOP ((MPI_IOCLOGINFO_TYPE_SAS << MPI_IOCLOGINFO_TYPE_SHIFT) | IOC_LOGINFO_ORIGINATOR_IOP)
+#define IOC_LOGINFO_PREFIX_PL ((MPI_IOCLOGINFO_TYPE_SAS << MPI_IOCLOGINFO_TYPE_SHIFT) | IOC_LOGINFO_ORIGINATOR_PL)
+#define IOC_LOGINFO_PREFIX_IR ((MPI_IOCLOGINFO_TYPE_SAS << MPI_IOCLOGINFO_TYPE_SHIFT) | IOC_LOGINFO_ORIGINATOR_IR)
+
+#endif /* end of file */
+
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
new file mode 100644
index 00000000..add60cc8
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2001-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_raid.h
+ * Title: MPI RAID message and structures
+ * Creation Date: February 27, 2001
+ *
+ * mpi_raid.h Version: 01.05.05
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 02-27-01 01.01.01 Original release for this file.
+ * 03-27-01 01.01.02 Added structure offset comments.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 09-28-01 01.02.02 Major rework for MPI v1.2 Integrated RAID changes.
+ * 10-04-01 01.02.03 Added ActionData defines for
+ * MPI_RAID_ACTION_DELETE_VOLUME action.
+ * 11-01-01 01.02.04 Added define for MPI_RAID_ACTION_ADATA_DO_NOT_SYNC.
+ * 03-14-02 01.02.05 Added define for MPI_RAID_ACTION_ADATA_LOW_LEVEL_INIT.
+ * 05-07-02 01.02.06 Added define for MPI_RAID_ACTION_ACTIVATE_VOLUME,
+ * MPI_RAID_ACTION_INACTIVATE_VOLUME, and
+ * MPI_RAID_ACTION_ADATA_INACTIVATE_ALL.
+ * 07-12-02 01.02.07 Added structures for Mailbox request and reply.
+ * 11-15-02 01.02.08 Added missing MsgContext field to MSG_MAILBOX_REQUEST.
+ * 04-01-03 01.02.09 New action data option flag for
+ * MPI_RAID_ACTION_DELETE_VOLUME.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * 01-15-05 01.05.02 Added defines for the two new RAID Actions for
+ * _SET_RESYNC_RATE and _SET_DATA_SCRUB_RATE.
+ * 02-28-07 01.05.03 Added new RAID Action, Device FW Update Mode, and
+ * associated defines.
+ * 08-07-07 01.05.04 Added Disable Full Rebuild bit to the ActionDataWord
+ * for the RAID Action MPI_RAID_ACTION_DISABLE_VOLUME.
+ * 01-15-08 01.05.05 Added define for MPI_RAID_ACTION_SET_VOLUME_NAME.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_RAID_H
+#define MPI_RAID_H
+
+
+/******************************************************************************
+*
+* R A I D M e s s a g e s
+*
+*******************************************************************************/
+
+
+/****************************************************************************/
+/* RAID Action Request */
+/****************************************************************************/
+
+typedef struct _MSG_RAID_ACTION
+{
+ U8 Action; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 VolumeID; /* 04h */
+ U8 VolumeBus; /* 05h */
+ U8 PhysDiskNum; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Reserved2; /* 0Ch */
+ U32 ActionDataWord; /* 10h */
+ SGE_SIMPLE_UNION ActionDataSGE; /* 14h */
+} MSG_RAID_ACTION_REQUEST, MPI_POINTER PTR_MSG_RAID_ACTION_REQUEST,
+ MpiRaidActionRequest_t , MPI_POINTER pMpiRaidActionRequest_t;
+
+
+/* RAID Action request Action values */
+
+#define MPI_RAID_ACTION_STATUS (0x00)
+#define MPI_RAID_ACTION_INDICATOR_STRUCT (0x01)
+#define MPI_RAID_ACTION_CREATE_VOLUME (0x02)
+#define MPI_RAID_ACTION_DELETE_VOLUME (0x03)
+#define MPI_RAID_ACTION_DISABLE_VOLUME (0x04)
+#define MPI_RAID_ACTION_ENABLE_VOLUME (0x05)
+#define MPI_RAID_ACTION_QUIESCE_PHYS_IO (0x06)
+#define MPI_RAID_ACTION_ENABLE_PHYS_IO (0x07)
+#define MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS (0x08)
+#define MPI_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
+#define MPI_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
+#define MPI_RAID_ACTION_CHANGE_PHYSDISK_SETTINGS (0x0C)
+#define MPI_RAID_ACTION_CREATE_PHYSDISK (0x0D)
+#define MPI_RAID_ACTION_DELETE_PHYSDISK (0x0E)
+#define MPI_RAID_ACTION_FAIL_PHYSDISK (0x0F)
+#define MPI_RAID_ACTION_REPLACE_PHYSDISK (0x10)
+#define MPI_RAID_ACTION_ACTIVATE_VOLUME (0x11)
+#define MPI_RAID_ACTION_INACTIVATE_VOLUME (0x12)
+#define MPI_RAID_ACTION_SET_RESYNC_RATE (0x13)
+#define MPI_RAID_ACTION_SET_DATA_SCRUB_RATE (0x14)
+#define MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
+#define MPI_RAID_ACTION_SET_VOLUME_NAME (0x16)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC (0x00000001)
+#define MPI_RAID_ACTION_ADATA_LOW_LEVEL_INIT (0x00000002)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_DELETE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_KEEP_PHYS_DISKS (0x00000000)
+#define MPI_RAID_ACTION_ADATA_DEL_PHYS_DISKS (0x00000001)
+
+#define MPI_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
+#define MPI_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000002)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_DISABLE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_DISABLE_FULL_REBUILD (0x00000001)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_ACTIVATE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_INACTIVATE_ALL (0x00000001)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_SET_RESYNC_RATE action */
+#define MPI_RAID_ACTION_ADATA_RESYNC_RATE_MASK (0x000000FF)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_SET_DATA_SCRUB_RATE action */
+#define MPI_RAID_ACTION_ADATA_DATA_SCRUB_RATE_MASK (0x000000FF)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x00000001)
+#define MPI_RAID_ACTION_ADATA_MASK_FW_UPDATE_TIMEOUT (0x0000FF00)
+#define MPI_RAID_ACTION_ADATA_SHIFT_FW_UPDATE_TIMEOUT (8)
+
+
+/* RAID Action reply message */
+
+typedef struct _MSG_RAID_ACTION_REPLY
+{
+ U8 Action; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 VolumeID; /* 04h */
+ U8 VolumeBus; /* 05h */
+ U8 PhysDiskNum; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 ActionStatus; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 VolumeStatus; /* 14h */
+ U32 ActionData; /* 18h */
+} MSG_RAID_ACTION_REPLY, MPI_POINTER PTR_MSG_RAID_ACTION_REPLY,
+ MpiRaidActionReply_t, MPI_POINTER pMpiRaidActionReply_t;
+
+
+/* RAID Volume reply ActionStatus values */
+
+#define MPI_RAID_ACTION_ASTATUS_SUCCESS (0x0000)
+#define MPI_RAID_ACTION_ASTATUS_INVALID_ACTION (0x0001)
+#define MPI_RAID_ACTION_ASTATUS_FAILURE (0x0002)
+#define MPI_RAID_ACTION_ASTATUS_IN_PROGRESS (0x0003)
+
+
+/* RAID Volume reply RAID Volume Indicator structure */
+
+typedef struct _MPI_RAID_VOL_INDICATOR
+{
+ U64 TotalBlocks; /* 00h */
+ U64 BlocksRemaining; /* 08h */
+} MPI_RAID_VOL_INDICATOR, MPI_POINTER PTR_MPI_RAID_VOL_INDICATOR,
+ MpiRaidVolIndicator_t, MPI_POINTER pMpiRaidVolIndicator_t;
+
+
+/****************************************************************************/
+/* SCSI IO RAID Passthrough Request */
+/****************************************************************************/
+
+typedef struct _MSG_SCSI_IO_RAID_PT_REQUEST
+{
+ U8 PhysDiskNum; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 CDBLength; /* 04h */
+ U8 SenseBufferLength; /* 05h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 LUN[8]; /* 0Ch */
+ U32 Control; /* 14h */
+ U8 CDB[16]; /* 18h */
+ U32 DataLength; /* 28h */
+ U32 SenseBufferLowAddr; /* 2Ch */
+ SGE_IO_UNION SGL; /* 30h */
+} MSG_SCSI_IO_RAID_PT_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REQUEST,
+ SCSIIORaidPassthroughRequest_t, MPI_POINTER pSCSIIORaidPassthroughRequest_t;
+
+
+/* SCSI IO RAID Passthrough reply structure */
+
+typedef struct _MSG_SCSI_IO_RAID_PT_REPLY
+{
+ U8 PhysDiskNum; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 CDBLength; /* 04h */
+ U8 SenseBufferLength; /* 05h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 SCSIStatus; /* 0Ch */
+ U8 SCSIState; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 TransferCount; /* 14h */
+ U32 SenseCount; /* 18h */
+ U32 ResponseInfo; /* 1Ch */
+} MSG_SCSI_IO_RAID_PT_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REPLY,
+ SCSIIORaidPassthroughReply_t, MPI_POINTER pSCSIIORaidPassthroughReply_t;
+
+
+/****************************************************************************/
+/* Mailbox reqeust structure */
+/****************************************************************************/
+
+typedef struct _MSG_MAILBOX_REQUEST
+{
+ U16 Reserved1;
+ U8 ChainOffset;
+ U8 Function;
+ U16 Reserved2;
+ U8 Reserved3;
+ U8 MsgFlags;
+ U32 MsgContext;
+ U8 Command[10];
+ U16 Reserved4;
+ SGE_IO_UNION SGL;
+} MSG_MAILBOX_REQUEST, MPI_POINTER PTR_MSG_MAILBOX_REQUEST,
+ MailboxRequest_t, MPI_POINTER pMailboxRequest_t;
+
+
+/* Mailbox reply structure */
+typedef struct _MSG_MAILBOX_REPLY
+{
+ U16 Reserved1; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 Reserved3; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 MailboxStatus; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 Reserved4; /* 14h */
+} MSG_MAILBOX_REPLY, MPI_POINTER PTR_MSG_MAILBOX_REPLY,
+ MailboxReply_t, MPI_POINTER pMailboxReply_t;
+
+#endif
+
+
+
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
new file mode 100644
index 00000000..ab410036
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2004-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_sas.h
+ * Title: MPI Serial Attached SCSI structures and definitions
+ * Creation Date: August 19, 2004
+ *
+ * mpi_sas.h Version: 01.05.05
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 08-19-04 01.05.01 Original release.
+ * 08-30-05 01.05.02 Added DeviceInfo bit for SEP.
+ * Added PrimFlags and Primitive field to SAS IO Unit
+ * Control request, and added a new operation code.
+ * 03-27-06 01.05.03 Added Force Full Discovery, Transmit Port Select Signal,
+ * and Remove Device operations to SAS IO Unit Control.
+ * Added DevHandle field to SAS IO Unit Control request and
+ * reply.
+ * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO
+ * Unit Control request.
+ * 01-15-08 01.05.05 Added support for MPI_SAS_OP_SET_IOC_PARAMETER,
+ * including adding IOCParameter and IOCParameter value
+ * fields to SAS IO Unit Control Request.
+ * Added MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC define.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_SAS_H
+#define MPI_SAS_H
+
+
+/*
+ * Values for SASStatus.
+ */
+#define MPI_SASSTATUS_SUCCESS (0x00)
+#define MPI_SASSTATUS_UNKNOWN_ERROR (0x01)
+#define MPI_SASSTATUS_INVALID_FRAME (0x02)
+#define MPI_SASSTATUS_UTC_BAD_DEST (0x03)
+#define MPI_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
+#define MPI_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
+#define MPI_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
+#define MPI_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
+#define MPI_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
+#define MPI_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
+#define MPI_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
+#define MPI_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
+#define MPI_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
+#define MPI_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
+#define MPI_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
+#define MPI_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
+#define MPI_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
+#define MPI_SASSTATUS_DATA_OFFSET_ERROR (0x11)
+#define MPI_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
+#define MPI_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
+#define MPI_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
+
+
+/*
+ * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ * data and SAS IO Unit Configuration pages.
+ */
+#define MPI_SAS_DEVICE_INFO_PRODUCT_SPECIFIC (0xF0000000)
+
+#define MPI_SAS_DEVICE_INFO_SEP (0x00004000)
+#define MPI_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
+#define MPI_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+
+
+/*****************************************************************************
+*
+* S e r i a l A t t a c h e d S C S I M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Serial Management Protocol Passthrough Request */
+/****************************************************************************/
+
+typedef struct _MSG_SMP_PASSTHROUGH_REQUEST
+{
+ U8 PassthroughFlags; /* 00h */
+ U8 PhysicalPort; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 RequestDataLength; /* 04h */
+ U8 ConnectionRate; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Reserved1; /* 0Ch */
+ U64 SASAddress; /* 10h */
+ U32 Reserved2; /* 18h */
+ U32 Reserved3; /* 1Ch */
+ SGE_SIMPLE_UNION SGL; /* 20h */
+} MSG_SMP_PASSTHROUGH_REQUEST, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REQUEST,
+ SmpPassthroughRequest_t, MPI_POINTER pSmpPassthroughRequest_t;
+
+/* values for PassthroughFlags field */
+#define MPI_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
+
+/* values for ConnectionRate field */
+#define MPI_SMP_PT_REQ_CONNECT_RATE_NEGOTIATED (0x00)
+#define MPI_SMP_PT_REQ_CONNECT_RATE_1_5 (0x08)
+#define MPI_SMP_PT_REQ_CONNECT_RATE_3_0 (0x09)
+
+
+/* Serial Management Protocol Passthrough Reply */
+typedef struct _MSG_SMP_PASSTHROUGH_REPLY
+{
+ U8 PassthroughFlags; /* 00h */
+ U8 PhysicalPort; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 ResponseDataLength; /* 04h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Reserved2; /* 0Ch */
+ U8 SASStatus; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 Reserved3; /* 14h */
+ U8 ResponseData[4]; /* 18h */
+} MSG_SMP_PASSTHROUGH_REPLY, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REPLY,
+ SmpPassthroughReply_t, MPI_POINTER pSmpPassthroughReply_t;
+
+#define MPI_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
+
+
+/****************************************************************************/
+/* SATA Passthrough Request */
+/****************************************************************************/
+
+typedef struct _MSG_SATA_PASSTHROUGH_REQUEST
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 PassthroughFlags; /* 04h */
+ U8 ConnectionRate; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Reserved1; /* 0Ch */
+ U32 Reserved2; /* 10h */
+ U32 Reserved3; /* 14h */
+ U32 DataLength; /* 18h */
+ U8 CommandFIS[20]; /* 1Ch */
+ SGE_SIMPLE_UNION SGL; /* 30h */
+} MSG_SATA_PASSTHROUGH_REQUEST, MPI_POINTER PTR_MSG_SATA_PASSTHROUGH_REQUEST,
+ SataPassthroughRequest_t, MPI_POINTER pSataPassthroughRequest_t;
+
+/* values for PassthroughFlags field */
+#define MPI_SATA_PT_REQ_PT_FLAGS_RESET_DEVICE (0x0200)
+#define MPI_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI_SATA_PT_REQ_PT_FLAGS_DMA_QUEUED (0x0080)
+#define MPI_SATA_PT_REQ_PT_FLAGS_PACKET_COMMAND (0x0040)
+#define MPI_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
+#define MPI_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
+#define MPI_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
+#define MPI_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
+#define MPI_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
+
+/* values for ConnectionRate field */
+#define MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED (0x00)
+#define MPI_SATA_PT_REQ_CONNECT_RATE_1_5 (0x08)
+#define MPI_SATA_PT_REQ_CONNECT_RATE_3_0 (0x09)
+
+
+/* SATA Passthrough Reply */
+typedef struct _MSG_SATA_PASSTHROUGH_REPLY
+{
+ U8 TargetID; /* 00h */
+ U8 Bus; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 PassthroughFlags; /* 04h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Reserved2; /* 0Ch */
+ U8 SASStatus; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U8 StatusFIS[20]; /* 14h */
+ U32 StatusControlRegisters; /* 28h */
+ U32 TransferCount; /* 2Ch */
+} MSG_SATA_PASSTHROUGH_REPLY, MPI_POINTER PTR_MSG_SATA_PASSTHROUGH_REPLY,
+ SataPassthroughReply_t, MPI_POINTER pSataPassthroughReply_t;
+
+
+
+
+/****************************************************************************/
+/* SAS IO Unit Control Request */
+/****************************************************************************/
+
+typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
+{
+ U8 Operation; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 DevHandle; /* 04h */
+ U8 IOCParameter; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 TargetID; /* 0Ch */
+ U8 Bus; /* 0Dh */
+ U8 PhyNum; /* 0Eh */
+ U8 PrimFlags; /* 0Fh */
+ U32 Primitive; /* 10h */
+ U64 SASAddress; /* 14h */
+ U32 IOCParameterValue; /* 1Ch */
+} MSG_SAS_IOUNIT_CONTROL_REQUEST, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REQUEST,
+ SasIoUnitControlRequest_t, MPI_POINTER pSasIoUnitControlRequest_t;
+
+/* values for the Operation field */
+#define MPI_SAS_OP_CLEAR_NOT_PRESENT (0x01)
+#define MPI_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI_SAS_OP_PHY_LINK_RESET (0x06)
+#define MPI_SAS_OP_PHY_HARD_RESET (0x07)
+#define MPI_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI_SAS_OP_MAP_CURRENT (0x09)
+#define MPI_SAS_OP_SEND_PRIMITIVE (0x0A)
+#define MPI_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
+#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) /* obsolete name */
+#define MPI_SAS_OP_REMOVE_DEVICE (0x0D)
+#define MPI_SAS_OP_SET_IOC_PARAMETER (0x0E)
+#define MPI_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/* values for the PrimFlags field */
+#define MPI_SAS_PRIMFLAGS_SINGLE (0x08)
+#define MPI_SAS_PRIMFLAGS_TRIPLE (0x02)
+#define MPI_SAS_PRIMFLAGS_REDUNDANT (0x01)
+
+
+/* SAS IO Unit Control Reply */
+typedef struct _MSG_SAS_IOUNIT_CONTROL_REPLY
+{
+ U8 Operation; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 DevHandle; /* 04h */
+ U8 IOCParameter; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved4; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_SAS_IOUNIT_CONTROL_REPLY, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REPLY,
+ SasIoUnitControlReply_t, MPI_POINTER pSasIoUnitControlReply_t;
+
+#endif
+
+
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
new file mode 100644
index 00000000..c3dea7f6
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -0,0 +1,650 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_targ.h
+ * Title: MPI Target mode messages and structures
+ * Creation Date: June 22, 2000
+ *
+ * mpi_targ.h Version: 01.05.06
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 06-22-00 01.00.02 Added _MSG_TARGET_CMD_BUFFER_POST_REPLY structure.
+ * Corrected DECSRIPTOR typo to DESCRIPTOR.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * Modified target mode to use IoIndex instead of
+ * HostIndex and IocIndex. Added Alias.
+ * 01-09-01 01.01.02 Added defines for TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER
+ * and TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER.
+ * 02-20-01 01.01.03 Started using MPI_POINTER.
+ * Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and
+ * MPI_TARGET_FCP_CMD_BUFFER.
+ * 03-27-01 01.01.04 Added structure offset comments.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 09-28-01 01.02.02 Added structure for MPI_TARGET_SCSI_SPI_STATUS_IU.
+ * Added PriorityReason field to some replies and
+ * defined more PriorityReason codes.
+ * Added some defines for to support previous version
+ * of MPI.
+ * 10-04-01 01.02.03 Added PriorityReason to MSG_TARGET_ERROR_REPLY.
+ * 11-01-01 01.02.04 Added define for TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY.
+ * 03-14-02 01.02.05 Modified MPI_TARGET_FCP_RSP_BUFFER to get the proper
+ * byte ordering.
+ * 05-31-02 01.02.06 Modified TARGET_MODE_REPLY_ALIAS_MASK to only include
+ * one bit.
+ * Added AliasIndex field to MPI_TARGET_FCP_CMD_BUFFER.
+ * 09-16-02 01.02.07 Added flags for confirmed completion.
+ * Added PRIORITY_REASON_TARGET_BUSY.
+ * 11-15-02 01.02.08 Added AliasID field to MPI_TARGET_SCSI_SPI_CMD_BUFFER.
+ * 04-01-03 01.02.09 Added OptionalOxid field to MPI_TARGET_FCP_CMD_BUFFER.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Added new request message structures for
+ * MSG_TARGET_CMD_BUF_POST_BASE_REQUEST,
+ * MSG_TARGET_CMD_BUF_POST_LIST_REQUEST, and
+ * MSG_TARGET_ASSIST_EXT_REQUEST.
+ * Added new structures for SAS SSP Command buffer, SSP
+ * Task buffer, and SSP Status IU.
+ * 10-05-04 01.05.02 MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY added.
+ * 02-22-05 01.05.03 Changed a comment.
+ * 03-11-05 01.05.04 Removed TargetAssistExtended Request.
+ * 06-24-05 01.05.05 Added TargetAssistExtended structures and defines.
+ * 03-27-06 01.05.06 Added a comment.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_TARG_H
+#define MPI_TARG_H
+
+
+/******************************************************************************
+*
+* S C S I T a r g e t M e s s a g e s
+*
+*******************************************************************************/
+
+typedef struct _CMD_BUFFER_DESCRIPTOR
+{
+ U16 IoIndex; /* 00h */
+ U16 Reserved; /* 02h */
+ union /* 04h */
+ {
+ U32 PhysicalAddress32;
+ U64 PhysicalAddress64;
+ } u;
+} CMD_BUFFER_DESCRIPTOR, MPI_POINTER PTR_CMD_BUFFER_DESCRIPTOR,
+ CmdBufferDescriptor_t, MPI_POINTER pCmdBufferDescriptor_t;
+
+
+/****************************************************************************/
+/* Target Command Buffer Post Request */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_CMD_BUFFER_POST_REQUEST
+{
+ U8 BufferPostFlags; /* 00h */
+ U8 BufferCount; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U8 BufferLength; /* 04h */
+ U8 Reserved; /* 05h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ CMD_BUFFER_DESCRIPTOR Buffer[1]; /* 0Ch */
+} MSG_TARGET_CMD_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST,
+ TargetCmdBufferPostRequest_t, MPI_POINTER pTargetCmdBufferPostRequest_t;
+
+#define CMD_BUFFER_POST_FLAGS_PORT_MASK (0x01)
+#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_MASK (0x80)
+#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_32 (0)
+#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_64 (1)
+#define CMD_BUFFER_POST_FLAGS_64_BIT_ADDR (0x80)
+
+#define CMD_BUFFER_POST_IO_INDEX_MASK (0x00003FFF)
+#define CMD_BUFFER_POST_IO_INDEX_MASK_0100 (0x000003FF) /* obsolete */
+
+
+typedef struct _MSG_TARGET_CMD_BUFFER_POST_REPLY
+{
+ U8 BufferPostFlags; /* 00h */
+ U8 BufferCount; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U8 BufferLength; /* 04h */
+ U8 Reserved; /* 05h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved2; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_TARGET_CMD_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REPLY,
+ TargetCmdBufferPostReply_t, MPI_POINTER pTargetCmdBufferPostReply_t;
+
+/* the following structure is obsolete as of MPI v1.2 */
+typedef struct _MSG_PRIORITY_CMD_RECEIVED_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 PriorityReason; /* 0Ch */
+ U8 Reserved3; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 ReplyWord; /* 14h */
+} MSG_PRIORITY_CMD_RECEIVED_REPLY, MPI_POINTER PTR_MSG_PRIORITY_CMD_RECEIVED_REPLY,
+ PriorityCommandReceivedReply_t, MPI_POINTER pPriorityCommandReceivedReply_t;
+
+
+typedef struct _MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 PriorityReason; /* 0Ch */
+ U8 Reserved3; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 ReplyWord; /* 14h */
+} MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY,
+ MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY,
+ TargetCmdBufferPostErrorReply_t, MPI_POINTER pTargetCmdBufferPostErrorReply_t;
+
+#define PRIORITY_REASON_NO_DISCONNECT (0x00)
+#define PRIORITY_REASON_SCSI_TASK_MANAGEMENT (0x01)
+#define PRIORITY_REASON_CMD_PARITY_ERR (0x02)
+#define PRIORITY_REASON_MSG_OUT_PARITY_ERR (0x03)
+#define PRIORITY_REASON_LQ_CRC_ERR (0x04)
+#define PRIORITY_REASON_CMD_CRC_ERR (0x05)
+#define PRIORITY_REASON_PROTOCOL_ERR (0x06)
+#define PRIORITY_REASON_DATA_OUT_PARITY_ERR (0x07)
+#define PRIORITY_REASON_DATA_OUT_CRC_ERR (0x08)
+#define PRIORITY_REASON_TARGET_BUSY (0x09)
+#define PRIORITY_REASON_UNKNOWN (0xFF)
+
+
+/****************************************************************************/
+/* Target Command Buffer Post Base Request */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_CMD_BUF_POST_BASE_REQUEST
+{
+ U8 BufferPostFlags; /* 00h */
+ U8 PortNumber; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 TotalCmdBuffers; /* 04h */
+ U8 Reserved; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Reserved1; /* 0Ch */
+ U16 CmdBufferLength; /* 10h */
+ U16 NextCmdBufferOffset; /* 12h */
+ U32 BaseAddressLow; /* 14h */
+ U32 BaseAddressHigh; /* 18h */
+} MSG_TARGET_CMD_BUF_POST_BASE_REQUEST,
+ MPI_POINTER PTR__MSG_TARGET_CMD_BUF_POST_BASE_REQUEST,
+ TargetCmdBufferPostBaseRequest_t,
+ MPI_POINTER pTargetCmdBufferPostBaseRequest_t;
+
+#define CMD_BUFFER_POST_BASE_FLAGS_AUTO_POST_ALL (0x01)
+
+
+typedef struct _MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY,
+ MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_BASE_LIST_REPLY,
+ TargetCmdBufferPostBaseListReply_t,
+ MPI_POINTER pTargetCmdBufferPostBaseListReply_t;
+
+
+/****************************************************************************/
+/* Target Command Buffer Post List Request */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_CMD_BUF_POST_LIST_REQUEST
+{
+ U8 Reserved; /* 00h */
+ U8 PortNumber; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 CmdBufferCount; /* 04h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Reserved2; /* 0Ch */
+ U16 IoIndex[2]; /* 10h */
+} MSG_TARGET_CMD_BUF_POST_LIST_REQUEST,
+ MPI_POINTER PTR_MSG_TARGET_CMD_BUF_POST_LIST_REQUEST,
+ TargetCmdBufferPostListRequest_t,
+ MPI_POINTER pTargetCmdBufferPostListRequest_t;
+
+
+/****************************************************************************/
+/* Command Buffer Formats (with 16 byte CDB) */
+/****************************************************************************/
+
+typedef struct _MPI_TARGET_FCP_CMD_BUFFER
+{
+ U8 FcpLun[8]; /* 00h */
+ U8 FcpCntl[4]; /* 08h */
+ U8 FcpCdb[16]; /* 0Ch */
+ U32 FcpDl; /* 1Ch */
+ U8 AliasIndex; /* 20h */
+ U8 Reserved1; /* 21h */
+ U16 OptionalOxid; /* 22h */
+} MPI_TARGET_FCP_CMD_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_CMD_BUFFER,
+ MpiTargetFcpCmdBuffer, MPI_POINTER pMpiTargetFcpCmdBuffer;
+
+
+typedef struct _MPI_TARGET_SCSI_SPI_CMD_BUFFER
+{
+ /* SPI L_Q information unit */
+ U8 L_QType; /* 00h */
+ U8 Reserved; /* 01h */
+ U16 Tag; /* 02h */
+ U8 LogicalUnitNumber[8]; /* 04h */
+ U32 DataLength; /* 0Ch */
+ /* SPI command information unit */
+ U8 ReservedFirstByteOfCommandIU; /* 10h */
+ U8 TaskAttribute; /* 11h */
+ U8 TaskManagementFlags; /* 12h */
+ U8 AdditionalCDBLength; /* 13h */
+ U8 CDB[16]; /* 14h */
+ /* Alias ID */
+ U8 AliasID; /* 24h */
+ U8 Reserved1; /* 25h */
+ U16 Reserved2; /* 26h */
+} MPI_TARGET_SCSI_SPI_CMD_BUFFER,
+ MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER,
+ MpiTargetScsiSpiCmdBuffer, MPI_POINTER pMpiTargetScsiSpiCmdBuffer;
+
+
+typedef struct _MPI_TARGET_SSP_CMD_BUFFER
+{
+ U8 FrameType; /* 00h */
+ U8 Reserved1; /* 01h */
+ U16 Reserved2; /* 02h */
+ U16 InitiatorTag; /* 04h */
+ U16 DevHandle; /* 06h */
+ /* COMMAND information unit starts here */
+ U8 LogicalUnitNumber[8]; /* 08h */
+ U8 Reserved3; /* 10h */
+ U8 TaskAttribute; /* lower 3 bits */ /* 11h */
+ U8 Reserved4; /* 12h */
+ U8 AdditionalCDBLength; /* upper 5 bits */ /* 13h */
+ U8 CDB[16]; /* 14h */
+ /* Additional CDB bytes extend past the CDB field */
+} MPI_TARGET_SSP_CMD_BUFFER, MPI_POINTER PTR_MPI_TARGET_SSP_CMD_BUFFER,
+ MpiTargetSspCmdBuffer, MPI_POINTER pMpiTargetSspCmdBuffer;
+
+typedef struct _MPI_TARGET_SSP_TASK_BUFFER
+{
+ U8 FrameType; /* 00h */
+ U8 Reserved1; /* 01h */
+ U16 Reserved2; /* 02h */
+ U16 InitiatorTag; /* 04h */
+ U16 DevHandle; /* 06h */
+ /* TASK information unit starts here */
+ U8 LogicalUnitNumber[8]; /* 08h */
+ U8 Reserved3; /* 10h */
+ U8 Reserved4; /* 11h */
+ U8 TaskManagementFunction; /* 12h */
+ U8 Reserved5; /* 13h */
+ U16 ManagedTaskTag; /* 14h */
+ U16 Reserved6; /* 16h */
+ U32 Reserved7; /* 18h */
+ U32 Reserved8; /* 1Ch */
+ U32 Reserved9; /* 20h */
+} MPI_TARGET_SSP_TASK_BUFFER, MPI_POINTER PTR_MPI_TARGET_SSP_TASK_BUFFER,
+ MpiTargetSspTaskBuffer, MPI_POINTER pMpiTargetSspTaskBuffer;
+
+
+/****************************************************************************/
+/* Target Assist Request */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_ASSIST_REQUEST
+{
+ U8 StatusCode; /* 00h */
+ U8 TargetAssistFlags; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 QueueTag; /* 04h */
+ U8 Reserved; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 ReplyWord; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U32 RelativeOffset; /* 18h */
+ U32 DataLength; /* 1Ch */
+ SGE_IO_UNION SGL[1]; /* 20h */
+} MSG_TARGET_ASSIST_REQUEST, MPI_POINTER PTR_MSG_TARGET_ASSIST_REQUEST,
+ TargetAssistRequest_t, MPI_POINTER pTargetAssistRequest_t;
+
+#define TARGET_ASSIST_FLAGS_DATA_DIRECTION (0x01)
+#define TARGET_ASSIST_FLAGS_AUTO_STATUS (0x02)
+#define TARGET_ASSIST_FLAGS_HIGH_PRIORITY (0x04)
+#define TARGET_ASSIST_FLAGS_CONFIRMED (0x08)
+#define TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x80)
+
+/* Standard Target Mode Reply message */
+typedef struct _MSG_TARGET_ERROR_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 PriorityReason; /* 0Ch */
+ U8 Reserved3; /* 0Dh */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 ReplyWord; /* 14h */
+ U32 TransferCount; /* 18h */
+} MSG_TARGET_ERROR_REPLY, MPI_POINTER PTR_MSG_TARGET_ERROR_REPLY,
+ TargetErrorReply_t, MPI_POINTER pTargetErrorReply_t;
+
+
+/****************************************************************************/
+/* Target Assist Extended Request */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_ASSIST_EXT_REQUEST
+{
+ U8 StatusCode; /* 00h */
+ U8 TargetAssistFlags; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 QueueTag; /* 04h */
+ U8 Reserved1; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 ReplyWord; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ U32 RelativeOffset; /* 18h */
+ U32 Reserved2; /* 1Ch */
+ U32 Reserved3; /* 20h */
+ U32 PrimaryReferenceTag; /* 24h */
+ U16 PrimaryApplicationTag; /* 28h */
+ U16 PrimaryApplicationTagMask; /* 2Ah */
+ U32 Reserved4; /* 2Ch */
+ U32 DataLength; /* 30h */
+ U32 BidirectionalDataLength; /* 34h */
+ U32 SecondaryReferenceTag; /* 38h */
+ U16 SecondaryApplicationTag; /* 3Ch */
+ U16 Reserved5; /* 3Eh */
+ U16 EEDPFlags; /* 40h */
+ U16 ApplicationTagTranslationMask; /* 42h */
+ U32 EEDPBlockSize; /* 44h */
+ U8 SGLOffset0; /* 48h */
+ U8 SGLOffset1; /* 49h */
+ U8 SGLOffset2; /* 4Ah */
+ U8 SGLOffset3; /* 4Bh */
+ U32 Reserved6; /* 4Ch */
+ SGE_IO_UNION SGL[1]; /* 50h */
+} MSG_TARGET_ASSIST_EXT_REQUEST, MPI_POINTER PTR_MSG_TARGET_ASSIST_EXT_REQUEST,
+ TargetAssistExtRequest_t, MPI_POINTER pTargetAssistExtRequest_t;
+
+/* see the defines after MSG_TARGET_ASSIST_REQUEST for TargetAssistFlags */
+
+/* defines for the MsgFlags field */
+#define TARGET_ASSIST_EXT_MSGFLAGS_BIDIRECTIONAL (0x20)
+#define TARGET_ASSIST_EXT_MSGFLAGS_MULTICAST (0x10)
+#define TARGET_ASSIST_EXT_MSGFLAGS_SGL_OFFSET_CHAINS (0x08)
+
+/* defines for the EEDPFlags field */
+#define TARGET_ASSIST_EXT_EEDP_MASK_OP (0x0007)
+#define TARGET_ASSIST_EXT_EEDP_NOOP_OP (0x0000)
+#define TARGET_ASSIST_EXT_EEDP_CHK_OP (0x0001)
+#define TARGET_ASSIST_EXT_EEDP_STRIP_OP (0x0002)
+#define TARGET_ASSIST_EXT_EEDP_CHKRM_OP (0x0003)
+#define TARGET_ASSIST_EXT_EEDP_INSERT_OP (0x0004)
+#define TARGET_ASSIST_EXT_EEDP_REPLACE_OP (0x0006)
+#define TARGET_ASSIST_EXT_EEDP_CHKREGEN_OP (0x0007)
+
+#define TARGET_ASSIST_EXT_EEDP_PASS_REF_TAG (0x0008)
+
+#define TARGET_ASSIST_EXT_EEDP_T10_CHK_MASK (0x0700)
+#define TARGET_ASSIST_EXT_EEDP_T10_CHK_GUARD (0x0100)
+#define TARGET_ASSIST_EXT_EEDP_T10_CHK_APPTAG (0x0200)
+#define TARGET_ASSIST_EXT_EEDP_T10_CHK_REFTAG (0x0400)
+#define TARGET_ASSIST_EXT_EEDP_T10_CHK_SHIFT (8)
+
+#define TARGET_ASSIST_EXT_EEDP_INC_SEC_APPTAG (0x1000)
+#define TARGET_ASSIST_EXT_EEDP_INC_PRI_APPTAG (0x2000)
+#define TARGET_ASSIST_EXT_EEDP_INC_SEC_REFTAG (0x4000)
+#define TARGET_ASSIST_EXT_EEDP_INC_PRI_REFTAG (0x8000)
+
+
+/****************************************************************************/
+/* Target Status Send Request */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_STATUS_SEND_REQUEST
+{
+ U8 StatusCode; /* 00h */
+ U8 StatusFlags; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 QueueTag; /* 04h */
+ U8 Reserved; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 ReplyWord; /* 0Ch */
+ U8 LUN[8]; /* 10h */
+ SGE_SIMPLE_UNION StatusDataSGE; /* 18h */
+} MSG_TARGET_STATUS_SEND_REQUEST, MPI_POINTER PTR_MSG_TARGET_STATUS_SEND_REQUEST,
+ TargetStatusSendRequest_t, MPI_POINTER pTargetStatusSendRequest_t;
+
+#define TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS (0x01)
+#define TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY (0x04)
+#define TARGET_STATUS_SEND_FLAGS_CONFIRMED (0x08)
+#define TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER (0x80)
+
+/*
+ * NOTE: FCP_RSP data is big-endian. When used on a little-endian system, this
+ * structure properly orders the bytes.
+ */
+typedef struct _MPI_TARGET_FCP_RSP_BUFFER
+{
+ U8 Reserved0[8]; /* 00h */
+ U8 Reserved1[2]; /* 08h */
+ U8 FcpFlags; /* 0Ah */
+ U8 FcpStatus; /* 0Bh */
+ U32 FcpResid; /* 0Ch */
+ U32 FcpSenseLength; /* 10h */
+ U32 FcpResponseLength; /* 14h */
+ U8 FcpResponseData[8]; /* 18h */
+ U8 FcpSenseData[32]; /* Pad to 64 bytes */ /* 20h */
+} MPI_TARGET_FCP_RSP_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_RSP_BUFFER,
+ MpiTargetFcpRspBuffer, MPI_POINTER pMpiTargetFcpRspBuffer;
+
+/*
+ * NOTE: The SPI status IU is big-endian. When used on a little-endian system,
+ * this structure properly orders the bytes.
+ */
+typedef struct _MPI_TARGET_SCSI_SPI_STATUS_IU
+{
+ U8 Reserved0; /* 00h */
+ U8 Reserved1; /* 01h */
+ U8 Valid; /* 02h */
+ U8 Status; /* 03h */
+ U32 SenseDataListLength; /* 04h */
+ U32 PktFailuresListLength; /* 08h */
+ U8 SenseData[52]; /* Pad the IU to 64 bytes */ /* 0Ch */
+} MPI_TARGET_SCSI_SPI_STATUS_IU, MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_STATUS_IU,
+ TargetScsiSpiStatusIU_t, MPI_POINTER pTargetScsiSpiStatusIU_t;
+
+/*
+ * NOTE: The SSP status IU is big-endian. When used on a little-endian system,
+ * this structure properly orders the bytes.
+ */
+typedef struct _MPI_TARGET_SSP_RSP_IU
+{
+ U32 Reserved0[6]; /* reserved for SSP header */ /* 00h */
+ /* start of RESPONSE information unit */
+ U32 Reserved1; /* 18h */
+ U32 Reserved2; /* 1Ch */
+ U16 Reserved3; /* 20h */
+ U8 DataPres; /* lower 2 bits */ /* 22h */
+ U8 Status; /* 23h */
+ U32 Reserved4; /* 24h */
+ U32 SenseDataLength; /* 28h */
+ U32 ResponseDataLength; /* 2Ch */
+ U8 ResponseSenseData[4]; /* 30h */
+} MPI_TARGET_SSP_RSP_IU, MPI_POINTER PTR_MPI_TARGET_SSP_RSP_IU,
+ MpiTargetSspRspIu_t, MPI_POINTER pMpiTargetSspRspIu_t;
+
+
+/****************************************************************************/
+/* Target Mode Abort Request */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_MODE_ABORT_REQUEST
+{
+ U8 AbortType; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 ReplyWord; /* 0Ch */
+ U32 MsgContextToAbort; /* 10h */
+} MSG_TARGET_MODE_ABORT, MPI_POINTER PTR_MSG_TARGET_MODE_ABORT,
+ TargetModeAbort_t, MPI_POINTER pTargetModeAbort_t;
+
+#define TARGET_MODE_ABORT_TYPE_ALL_CMD_BUFFERS (0x00)
+#define TARGET_MODE_ABORT_TYPE_ALL_IO (0x01)
+#define TARGET_MODE_ABORT_TYPE_EXACT_IO (0x02)
+#define TARGET_MODE_ABORT_TYPE_EXACT_IO_REQUEST (0x03)
+
+/* Target Mode Abort Reply */
+
+typedef struct _MSG_TARGET_MODE_ABORT_REPLY
+{
+ U16 Reserved; /* 00h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 AbortCount; /* 14h */
+} MSG_TARGET_MODE_ABORT_REPLY, MPI_POINTER PTR_MSG_TARGET_MODE_ABORT_REPLY,
+ TargetModeAbortReply_t, MPI_POINTER pTargetModeAbortReply_t;
+
+
+/****************************************************************************/
+/* Target Mode Context Reply */
+/****************************************************************************/
+
+#define TARGET_MODE_REPLY_IO_INDEX_MASK (0x00003FFF)
+#define TARGET_MODE_REPLY_IO_INDEX_SHIFT (0)
+#define TARGET_MODE_REPLY_INITIATOR_INDEX_MASK (0x03FFC000)
+#define TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT (14)
+#define TARGET_MODE_REPLY_ALIAS_MASK (0x04000000)
+#define TARGET_MODE_REPLY_ALIAS_SHIFT (26)
+#define TARGET_MODE_REPLY_PORT_MASK (0x10000000)
+#define TARGET_MODE_REPLY_PORT_SHIFT (28)
+
+
+#define GET_IO_INDEX(x) (((x) & TARGET_MODE_REPLY_IO_INDEX_MASK) \
+ >> TARGET_MODE_REPLY_IO_INDEX_SHIFT)
+
+#define SET_IO_INDEX(t, i) \
+ ((t) = ((t) & ~TARGET_MODE_REPLY_IO_INDEX_MASK) | \
+ (((i) << TARGET_MODE_REPLY_IO_INDEX_SHIFT) & \
+ TARGET_MODE_REPLY_IO_INDEX_MASK))
+
+#define GET_INITIATOR_INDEX(x) (((x) & TARGET_MODE_REPLY_INITIATOR_INDEX_MASK) \
+ >> TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT)
+
+#define SET_INITIATOR_INDEX(t, ii) \
+ ((t) = ((t) & ~TARGET_MODE_REPLY_INITIATOR_INDEX_MASK) | \
+ (((ii) << TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT) & \
+ TARGET_MODE_REPLY_INITIATOR_INDEX_MASK))
+
+#define GET_ALIAS(x) (((x) & TARGET_MODE_REPLY_ALIAS_MASK) \
+ >> TARGET_MODE_REPLY_ALIAS_SHIFT)
+
+#define SET_ALIAS(t, a) ((t) = ((t) & ~TARGET_MODE_REPLY_ALIAS_MASK) | \
+ (((a) << TARGET_MODE_REPLY_ALIAS_SHIFT) & \
+ TARGET_MODE_REPLY_ALIAS_MASK))
+
+#define GET_PORT(x) (((x) & TARGET_MODE_REPLY_PORT_MASK) \
+ >> TARGET_MODE_REPLY_PORT_SHIFT)
+
+#define SET_PORT(t, p) ((t) = ((t) & ~TARGET_MODE_REPLY_PORT_MASK) | \
+ (((p) << TARGET_MODE_REPLY_PORT_SHIFT) & \
+ TARGET_MODE_REPLY_PORT_MASK))
+
+/* the following obsolete values are for MPI v1.0 support */
+#define TARGET_MODE_REPLY_0100_MASK_HOST_INDEX (0x000003FF)
+#define TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX (0)
+#define TARGET_MODE_REPLY_0100_MASK_IOC_INDEX (0x001FF800)
+#define TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX (11)
+#define TARGET_MODE_REPLY_0100_PORT_MASK (0x00400000)
+#define TARGET_MODE_REPLY_0100_PORT_SHIFT (22)
+#define TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX (0x1F800000)
+#define TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX (23)
+
+#define GET_HOST_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) \
+ >> TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX)
+
+#define SET_HOST_INDEX_0100(t, hi) \
+ ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) | \
+ (((hi) << TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX) & \
+ TARGET_MODE_REPLY_0100_MASK_HOST_INDEX))
+
+#define GET_IOC_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) \
+ >> TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX)
+
+#define SET_IOC_INDEX_0100(t, ii) \
+ ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) | \
+ (((ii) << TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX) & \
+ TARGET_MODE_REPLY_0100_MASK_IOC_INDEX))
+
+#define GET_INITIATOR_INDEX_0100(x) \
+ (((x) & TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) \
+ >> TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX)
+
+#define SET_INITIATOR_INDEX_0100(t, ii) \
+ ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) | \
+ (((ii) << TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX) & \
+ TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX))
+
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_tool.h b/drivers/message/fusion/lsi/mpi_tool.h
new file mode 100644
index 00000000..53cd715a
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_tool.h
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2001-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_tool.h
+ * Title: MPI Toolbox structures and definitions
+ * Creation Date: July 30, 2001
+ *
+ * mpi_tool.h Version: 01.05.03
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 08-08-01 01.02.01 Original release.
+ * 08-29-01 01.02.02 Added DIAG_DATA_UPLOAD_HEADER and related defines.
+ * 01-16-04 01.02.03 Added defines and structures for new tools
+ *. MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL and
+ * MPI_TOOLBOX_FC_MANAGEMENT_TOOL.
+ * 04-29-04 01.02.04 Added message structures for Diagnostic Buffer Post and
+ * Diagnostic Release requests and replies.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * 10-06-04 01.05.02 Added define for MPI_DIAG_BUF_TYPE_COUNT.
+ * 02-09-05 01.05.03 Added frame size option to FC management tool.
+ * Added Beacon tool to the Toolbox.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_TOOL_H
+#define MPI_TOOL_H
+
+#define MPI_TOOLBOX_CLEAN_TOOL (0x00)
+#define MPI_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
+#define MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
+#define MPI_TOOLBOX_FC_MANAGEMENT_TOOL (0x04)
+#define MPI_TOOLBOX_BEACON_TOOL (0x05)
+
+
+/****************************************************************************/
+/* Toolbox reply */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_REPLY
+{
+ U8 Tool; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved3; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_TOOLBOX_REPLY, MPI_POINTER PTR_MSG_TOOLBOX_REPLY,
+ ToolboxReply_t, MPI_POINTER pToolboxReply_t;
+
+
+/****************************************************************************/
+/* Toolbox Clean Tool request */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_CLEAN_REQUEST
+{
+ U8 Tool; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Flags; /* 0Ch */
+} MSG_TOOLBOX_CLEAN_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_CLEAN_REQUEST,
+ ToolboxCleanRequest_t, MPI_POINTER pToolboxCleanRequest_t;
+
+#define MPI_TOOLBOX_CLEAN_NVSRAM (0x00000001)
+#define MPI_TOOLBOX_CLEAN_SEEPROM (0x00000002)
+#define MPI_TOOLBOX_CLEAN_FLASH (0x00000004)
+#define MPI_TOOLBOX_CLEAN_BOOTLOADER (0x04000000)
+#define MPI_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
+#define MPI_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
+#define MPI_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
+#define MPI_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
+
+
+/****************************************************************************/
+/* Toolbox Memory Move request */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_MEM_MOVE_REQUEST
+{
+ U8 Tool; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ SGE_SIMPLE_UNION SGL; /* 0Ch */
+} MSG_TOOLBOX_MEM_MOVE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_MEM_MOVE_REQUEST,
+ ToolboxMemMoveRequest_t, MPI_POINTER pToolboxMemMoveRequest_t;
+
+
+/****************************************************************************/
+/* Toolbox Diagnostic Data Upload request */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST
+{
+ U8 Tool; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 Flags; /* 0Ch */
+ U32 Reserved3; /* 10h */
+ SGE_SIMPLE_UNION SGL; /* 14h */
+} MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ ToolboxDiagDataUploadRequest_t, MPI_POINTER pToolboxDiagDataUploadRequest_t;
+
+typedef struct _DIAG_DATA_UPLOAD_HEADER
+{
+ U32 DiagDataLength; /* 00h */
+ U8 FormatCode; /* 04h */
+ U8 Reserved; /* 05h */
+ U16 Reserved1; /* 06h */
+} DIAG_DATA_UPLOAD_HEADER, MPI_POINTER PTR_DIAG_DATA_UPLOAD_HEADER,
+ DiagDataUploadHeader_t, MPI_POINTER pDiagDataUploadHeader_t;
+
+#define MPI_TB_DIAG_FORMAT_SCSI_PRINTF_1 (0x01)
+#define MPI_TB_DIAG_FORMAT_SCSI_2 (0x02)
+#define MPI_TB_DIAG_FORMAT_SCSI_3 (0x03)
+#define MPI_TB_DIAG_FORMAT_FC_TRACE_1 (0x04)
+
+
+/****************************************************************************/
+/* Toolbox ISTWI Read Write request */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST
+{
+ U8 Tool; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Flags; /* 0Ch */
+ U8 BusNum; /* 0Dh */
+ U16 Reserved3; /* 0Eh */
+ U8 NumAddressBytes; /* 10h */
+ U8 Reserved4; /* 11h */
+ U16 DataLength; /* 12h */
+ U8 DeviceAddr; /* 14h */
+ U8 Addr1; /* 15h */
+ U8 Addr2; /* 16h */
+ U8 Addr3; /* 17h */
+ U32 Reserved5; /* 18h */
+ SGE_SIMPLE_UNION SGL; /* 1Ch */
+} MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ ToolboxIstwiReadWriteRequest_t, MPI_POINTER pToolboxIstwiReadWriteRequest_t;
+
+#define MPI_TB_ISTWI_FLAGS_WRITE (0x00)
+#define MPI_TB_ISTWI_FLAGS_READ (0x01)
+
+
+/****************************************************************************/
+/* Toolbox FC Management request */
+/****************************************************************************/
+
+/* ActionInfo for Bus and TargetId */
+typedef struct _MPI_TB_FC_MANAGE_BUS_TID_AI
+{
+ U16 Reserved; /* 00h */
+ U8 Bus; /* 02h */
+ U8 TargetId; /* 03h */
+} MPI_TB_FC_MANAGE_BUS_TID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_BUS_TID_AI,
+ MpiTbFcManageBusTidAi_t, MPI_POINTER pMpiTbFcManageBusTidAi_t;
+
+/* ActionInfo for port identifier */
+typedef struct _MPI_TB_FC_MANAGE_PID_AI
+{
+ U32 PortIdentifier; /* 00h */
+} MPI_TB_FC_MANAGE_PID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_PID_AI,
+ MpiTbFcManagePidAi_t, MPI_POINTER pMpiTbFcManagePidAi_t;
+
+/* ActionInfo for set max frame size */
+typedef struct _MPI_TB_FC_MANAGE_FRAME_SIZE_AI
+{
+ U16 FrameSize; /* 00h */
+ U8 PortNum; /* 02h */
+ U8 Reserved1; /* 03h */
+} MPI_TB_FC_MANAGE_FRAME_SIZE_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_FRAME_SIZE_AI,
+ MpiTbFcManageFrameSizeAi_t, MPI_POINTER pMpiTbFcManageFrameSizeAi_t;
+
+/* union of ActionInfo */
+typedef union _MPI_TB_FC_MANAGE_AI_UNION
+{
+ MPI_TB_FC_MANAGE_BUS_TID_AI BusTid;
+ MPI_TB_FC_MANAGE_PID_AI Port;
+ MPI_TB_FC_MANAGE_FRAME_SIZE_AI FrameSize;
+} MPI_TB_FC_MANAGE_AI_UNION, MPI_POINTER PTR_MPI_TB_FC_MANAGE_AI_UNION,
+ MpiTbFcManageAiUnion_t, MPI_POINTER pMpiTbFcManageAiUnion_t;
+
+typedef struct _MSG_TOOLBOX_FC_MANAGE_REQUEST
+{
+ U8 Tool; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 Action; /* 0Ch */
+ U8 Reserved3; /* 0Dh */
+ U16 Reserved4; /* 0Eh */
+ MPI_TB_FC_MANAGE_AI_UNION ActionInfo; /* 10h */
+} MSG_TOOLBOX_FC_MANAGE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_FC_MANAGE_REQUEST,
+ ToolboxFcManageRequest_t, MPI_POINTER pToolboxFcManageRequest_t;
+
+/* defines for the Action field */
+#define MPI_TB_FC_MANAGE_ACTION_DISC_ALL (0x00)
+#define MPI_TB_FC_MANAGE_ACTION_DISC_PID (0x01)
+#define MPI_TB_FC_MANAGE_ACTION_DISC_BUS_TID (0x02)
+#define MPI_TB_FC_MANAGE_ACTION_SET_MAX_FRAME_SIZE (0x03)
+
+
+/****************************************************************************/
+/* Toolbox Beacon Tool request */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_BEACON_REQUEST
+{
+ U8 Tool; /* 00h */
+ U8 Reserved; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U8 ConnectNum; /* 0Ch */
+ U8 PortNum; /* 0Dh */
+ U8 Reserved3; /* 0Eh */
+ U8 Flags; /* 0Fh */
+} MSG_TOOLBOX_BEACON_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_BEACON_REQUEST,
+ ToolboxBeaconRequest_t, MPI_POINTER pToolboxBeaconRequest_t;
+
+#define MPI_TOOLBOX_FLAGS_BEACON_MODE_OFF (0x00)
+#define MPI_TOOLBOX_FLAGS_BEACON_MODE_ON (0x01)
+
+
+/****************************************************************************/
+/* Diagnostic Buffer Post request */
+/****************************************************************************/
+
+typedef struct _MSG_DIAG_BUFFER_POST_REQUEST
+{
+ U8 TraceLevel; /* 00h */
+ U8 BufferType; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved1; /* 04h */
+ U8 Reserved2; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U32 ExtendedType; /* 0Ch */
+ U32 BufferLength; /* 10h */
+ U32 ProductSpecific[4]; /* 14h */
+ U32 Reserved3; /* 24h */
+ U64 BufferAddress; /* 28h */
+} MSG_DIAG_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REQUEST,
+ DiagBufferPostRequest_t, MPI_POINTER pDiagBufferPostRequest_t;
+
+#define MPI_DIAG_BUF_TYPE_TRACE (0x00)
+#define MPI_DIAG_BUF_TYPE_SNAPSHOT (0x01)
+#define MPI_DIAG_BUF_TYPE_EXTENDED (0x02)
+/* count of the number of buffer types */
+#define MPI_DIAG_BUF_TYPE_COUNT (0x03)
+
+#define MPI_DIAG_EXTENDED_QTAG (0x00000001)
+
+
+/* Diagnostic Buffer Post reply */
+typedef struct _MSG_DIAG_BUFFER_POST_REPLY
+{
+ U8 Reserved1; /* 00h */
+ U8 BufferType; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 Reserved3; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved4; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+ U32 TransferLength; /* 14h */
+} MSG_DIAG_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REPLY,
+ DiagBufferPostReply_t, MPI_POINTER pDiagBufferPostReply_t;
+
+
+/****************************************************************************/
+/* Diagnostic Release request */
+/****************************************************************************/
+
+typedef struct _MSG_DIAG_RELEASE_REQUEST
+{
+ U8 Reserved1; /* 00h */
+ U8 BufferType; /* 01h */
+ U8 ChainOffset; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 Reserved3; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+} MSG_DIAG_RELEASE_REQUEST, MPI_POINTER PTR_MSG_DIAG_RELEASE_REQUEST,
+ DiagReleaseRequest_t, MPI_POINTER pDiagReleaseRequest_t;
+
+
+/* Diagnostic Release reply */
+typedef struct _MSG_DIAG_RELEASE_REPLY
+{
+ U8 Reserved1; /* 00h */
+ U8 BufferType; /* 01h */
+ U8 MsgLength; /* 02h */
+ U8 Function; /* 03h */
+ U16 Reserved2; /* 04h */
+ U8 Reserved3; /* 06h */
+ U8 MsgFlags; /* 07h */
+ U32 MsgContext; /* 08h */
+ U16 Reserved4; /* 0Ch */
+ U16 IOCStatus; /* 0Eh */
+ U32 IOCLogInfo; /* 10h */
+} MSG_DIAG_RELEASE_REPLY, MPI_POINTER PTR_MSG_DIAG_RELEASE_REPLY,
+ DiagReleaseReply_t, MPI_POINTER pDiagReleaseReply_t;
+
+
+#endif
+
+
diff --git a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h
new file mode 100644
index 00000000..888b26db
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_type.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2000-2008 LSI Corporation.
+ *
+ *
+ * Name: mpi_type.h
+ * Title: MPI Basic type definitions
+ * Creation Date: June 6, 2000
+ *
+ * mpi_type.h Version: 01.05.02
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 05-08-00 00.10.01 Original release for 0.10 spec dated 4/26/2000.
+ * 06-06-00 01.00.01 Update version number for 1.0 release.
+ * 11-02-00 01.01.01 Original release for post 1.0 work
+ * 02-20-01 01.01.02 Added define and ifdef for MPI_POINTER.
+ * 08-08-01 01.02.01 Original release for v1.2 work.
+ * 05-11-04 01.03.01 Original release for MPI v1.3.
+ * 08-19-04 01.05.01 Original release for MPI v1.5.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_TYPE_H
+#define MPI_TYPE_H
+
+
+/*******************************************************************************
+ * Define MPI_POINTER if it hasn't already been defined. By default MPI_POINTER
+ * is defined to be a near pointer. MPI_POINTER can be defined as a far pointer
+ * by defining MPI_POINTER as "far *" before this header file is included.
+ */
+#ifndef MPI_POINTER
+#define MPI_POINTER *
+#endif
+
+
+/*****************************************************************************
+*
+* B a s i c T y p e s
+*
+*****************************************************************************/
+
+typedef signed char S8;
+typedef unsigned char U8;
+typedef signed short S16;
+typedef unsigned short U16;
+
+
+typedef int32_t S32;
+typedef u_int32_t U32;
+
+typedef struct _S64
+{
+ U32 Low;
+ S32 High;
+} S64;
+
+typedef struct _U64
+{
+ U32 Low;
+ U32 High;
+} U64;
+
+
+/****************************************************************************/
+/* Pointers */
+/****************************************************************************/
+
+typedef S8 *PS8;
+typedef U8 *PU8;
+typedef S16 *PS16;
+typedef U16 *PU16;
+typedef S32 *PS32;
+typedef U32 *PU32;
+typedef S64 *PS64;
+typedef U64 *PU64;
+
+
+#endif
+
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
new file mode 100644
index 00000000..7956a10f
--- /dev/null
+++ b/drivers/message/fusion/mptbase.c
@@ -0,0 +1,8483 @@
+/*
+ * linux/drivers/message/fusion/mptbase.c
+ * This is the Fusion MPT base driver which supports multiple
+ * (SCSI + LAN) specialized protocol drivers.
+ * For use with LSI PCI chip/adapter(s)
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/dma-mapping.h>
+#include <asm/io.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#include "mptbase.h"
+#include "lsi/mpi_log_fc.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME "Fusion MPT base driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptbase"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(my_VERSION);
+
+/*
+ * cmd line parameters
+ */
+
+static int mpt_msi_enable_spi;
+module_param(mpt_msi_enable_spi, int, 0);
+MODULE_PARM_DESC(mpt_msi_enable_spi,
+ " Enable MSI Support for SPI controllers (default=0)");
+
+static int mpt_msi_enable_fc;
+module_param(mpt_msi_enable_fc, int, 0);
+MODULE_PARM_DESC(mpt_msi_enable_fc,
+ " Enable MSI Support for FC controllers (default=0)");
+
+static int mpt_msi_enable_sas;
+module_param(mpt_msi_enable_sas, int, 0);
+MODULE_PARM_DESC(mpt_msi_enable_sas,
+ " Enable MSI Support for SAS controllers (default=0)");
+
+static int mpt_channel_mapping;
+module_param(mpt_channel_mapping, int, 0);
+MODULE_PARM_DESC(mpt_channel_mapping, " Mapping id's to channels (default=0)");
+
+static int mpt_debug_level;
+static int mpt_set_debug_level(const char *val, struct kernel_param *kp);
+module_param_call(mpt_debug_level, mpt_set_debug_level, param_get_int,
+ &mpt_debug_level, 0600);
+MODULE_PARM_DESC(mpt_debug_level,
+ " debug level - refer to mptdebug.h - (default=0)");
+
+int mpt_fwfault_debug;
+EXPORT_SYMBOL(mpt_fwfault_debug);
+module_param(mpt_fwfault_debug, int, 0600);
+MODULE_PARM_DESC(mpt_fwfault_debug,
+ "Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
+
+static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
+
+#ifdef MFCNT
+static int mfcounter = 0;
+#define PRINT_MF_COUNT 20000
+#endif
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Public data...
+ */
+
+#define WHOINIT_UNKNOWN 0xAA
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Private data...
+ */
+ /* Adapter link list */
+LIST_HEAD(ioc_list);
+ /* Callback lookup table */
+static MPT_CALLBACK MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
+ /* Protocol driver class lookup table */
+static int MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
+ /* Event handler lookup table */
+static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+ /* Reset handler lookup table */
+static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *mpt_proc_root_dir;
+#endif
+
+/*
+ * Driver Callback Index's
+ */
+static u8 mpt_base_index = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 last_drv_idx;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Forward protos...
+ */
+static irqreturn_t mpt_interrupt(int irq, void *bus_id);
+static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+ MPT_FRAME_HDR *reply);
+static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
+ u32 *req, int replyBytes, u16 *u16reply, int maxwait,
+ int sleepFlag);
+static int mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
+static void mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
+static void mpt_adapter_disable(MPT_ADAPTER *ioc);
+static void mpt_adapter_dispose(MPT_ADAPTER *ioc);
+
+static void MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
+static int MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
+static int GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
+static int GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
+static int SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
+static int SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
+static int mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
+static int mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag);
+static int mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
+static int KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
+static int SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
+static int PrimeIocFifos(MPT_ADAPTER *ioc);
+static int WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
+static int WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
+static int WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
+static int GetLanConfigPages(MPT_ADAPTER *ioc);
+static int GetIoUnitPage2(MPT_ADAPTER *ioc);
+int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
+static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
+static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
+static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
+static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
+static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
+static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
+ int sleepFlag);
+static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
+static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
+static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
+
+#ifdef CONFIG_PROC_FS
+static const struct file_operations mpt_summary_proc_fops;
+static const struct file_operations mpt_version_proc_fops;
+static const struct file_operations mpt_iocinfo_proc_fops;
+#endif
+static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
+
+static int ProcessEventNotification(MPT_ADAPTER *ioc,
+ EventNotificationReply_t *evReply, int *evHandlers);
+static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
+static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
+static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
+static void mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info , u8 cb_idx);
+static int mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
+static void mpt_inactive_raid_list_free(MPT_ADAPTER *ioc);
+
+/* module entry point */
+static int __init fusion_init (void);
+static void __exit fusion_exit (void);
+
+#define CHIPREG_READ32(addr) readl_relaxed(addr)
+#define CHIPREG_READ32_dmasync(addr) readl(addr)
+#define CHIPREG_WRITE32(addr,val) writel(val, addr)
+#define CHIPREG_PIO_WRITE32(addr,val) outl(val, (unsigned long)addr)
+#define CHIPREG_PIO_READ32(addr) inl((unsigned long)addr)
+
+static void
+pci_disable_io_access(struct pci_dev *pdev)
+{
+ u16 command_reg;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
+ command_reg &= ~1;
+ pci_write_config_word(pdev, PCI_COMMAND, command_reg);
+}
+
+static void
+pci_enable_io_access(struct pci_dev *pdev)
+{
+ u16 command_reg;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &command_reg);
+ command_reg |= 1;
+ pci_write_config_word(pdev, PCI_COMMAND, command_reg);
+}
+
+static int mpt_set_debug_level(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ MPT_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ list_for_each_entry(ioc, &ioc_list, list)
+ ioc->debug_level = mpt_debug_level;
+ return 0;
+}
+
+/**
+ * mpt_get_cb_idx - obtain cb_idx for registered driver
+ * @dclass: class driver enum
+ *
+ * Returns cb_idx, or zero means it wasn't found
+ **/
+static u8
+mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
+{
+ u8 cb_idx;
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--)
+ if (MptDriverClass[cb_idx] == dclass)
+ return cb_idx;
+ return 0;
+}
+
+/**
+ * mpt_is_discovery_complete - determine if discovery has completed
+ * @ioc: per adatper instance
+ *
+ * Returns 1 when discovery completed, else zero.
+ */
+static int
+mpt_is_discovery_complete(MPT_ADAPTER *ioc)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasIOUnitPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int rc = 0;
+
+ memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
+ memset(&cfg, 0, sizeof(CONFIGPARMS));
+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if ((mpt_config(ioc, &cfg)))
+ goto out;
+ if (!hdr.ExtPageLength)
+ goto out;
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer)
+ goto out;
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((mpt_config(ioc, &cfg)))
+ goto out_free_consistent;
+
+ if (!(buffer->PhyData[0].PortFlags &
+ MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
+ rc = 1;
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return rc;
+}
+
+/**
+ * mpt_fault_reset_work - work performed on workq after ioc fault
+ * @work: input argument, used to derive ioc
+ *
+**/
+static void
+mpt_fault_reset_work(struct work_struct *work)
+{
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fault_reset_work.work);
+ u32 ioc_raw_state;
+ int rc;
+ unsigned long flags;
+
+ if (ioc->ioc_reset_in_progress || !ioc->active)
+ goto out;
+
+ ioc_raw_state = mpt_GetIocState(ioc, 0);
+ if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
+ printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
+ ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
+ printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
+ ioc->name, __func__);
+ rc = mpt_HardResetHandler(ioc, CAN_SLEEP);
+ printk(MYIOC_s_WARN_FMT "%s: HardReset: %s\n", ioc->name,
+ __func__, (rc == 0) ? "success" : "failed");
+ ioc_raw_state = mpt_GetIocState(ioc, 0);
+ if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT)
+ printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
+ "reset (%04xh)\n", ioc->name, ioc_raw_state &
+ MPI_DOORBELL_DATA_MASK);
+ } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
+ if ((mpt_is_discovery_complete(ioc))) {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
+ "discovery_quiesce_io flag\n", ioc->name));
+ ioc->sas_discovery_quiesce_io = 0;
+ }
+ }
+
+ out:
+ /*
+ * Take turns polling alternate controller
+ */
+ if (ioc->alt_ioc)
+ ioc = ioc->alt_ioc;
+
+ /* rearm the timer */
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->reset_work_q)
+ queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
+ msecs_to_jiffies(MPT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+}
+
+
+/*
+ * Process turbo (context) reply...
+ */
+static void
+mpt_turbo_reply(MPT_ADAPTER *ioc, u32 pa)
+{
+ MPT_FRAME_HDR *mf = NULL;
+ MPT_FRAME_HDR *mr = NULL;
+ u16 req_idx = 0;
+ u8 cb_idx;
+
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n",
+ ioc->name, pa));
+
+ switch (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT) {
+ case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
+ req_idx = pa & 0x0000FFFF;
+ cb_idx = (pa & 0x00FF0000) >> 16;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+ break;
+ case MPI_CONTEXT_REPLY_TYPE_LAN:
+ cb_idx = mpt_get_cb_idx(MPTLAN_DRIVER);
+ /*
+ * Blind set of mf to NULL here was fatal
+ * after lan_reply says "freeme"
+ * Fix sort of combined with an optimization here;
+ * added explicit check for case where lan_reply
+ * was just returning 1 and doing nothing else.
+ * For this case skip the callback, but set up
+ * proper mf value first here:-)
+ */
+ if ((pa & 0x58000000) == 0x58000000) {
+ req_idx = pa & 0x0000FFFF;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+ mpt_free_msg_frame(ioc, mf);
+ mb();
+ return;
+ break;
+ }
+ mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+ break;
+ case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
+ cb_idx = mpt_get_cb_idx(MPTSTM_DRIVER);
+ mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+ break;
+ default:
+ cb_idx = 0;
+ BUG();
+ }
+
+ /* Check for (valid) IO callback! */
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
+ MptCallbacks[cb_idx] == NULL) {
+ printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
+ __func__, ioc->name, cb_idx);
+ goto out;
+ }
+
+ if (MptCallbacks[cb_idx](ioc, mf, mr))
+ mpt_free_msg_frame(ioc, mf);
+ out:
+ mb();
+}
+
+static void
+mpt_reply(MPT_ADAPTER *ioc, u32 pa)
+{
+ MPT_FRAME_HDR *mf;
+ MPT_FRAME_HDR *mr;
+ u16 req_idx;
+ u8 cb_idx;
+ int freeme;
+
+ u32 reply_dma_low;
+ u16 ioc_stat;
+
+ /* non-TURBO reply! Hmmm, something may be up...
+ * Newest turbo reply mechanism; get address
+ * via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
+ */
+
+ /* Map DMA address of reply header to cpu address.
+ * pa is 32 bits - but the dma address may be 32 or 64 bits
+ * get offset based only only the low addresses
+ */
+
+ reply_dma_low = (pa <<= 1);
+ mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
+ (reply_dma_low - ioc->reply_frames_low_dma));
+
+ req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
+ cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
+ mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n",
+ ioc->name, mr, req_idx, cb_idx, mr->u.hdr.Function));
+ DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mr);
+
+ /* Check/log IOC log info
+ */
+ ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
+ if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+ u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
+ if (ioc->bus_type == FC)
+ mpt_fc_log_info(ioc, log_info);
+ else if (ioc->bus_type == SPI)
+ mpt_spi_log_info(ioc, log_info);
+ else if (ioc->bus_type == SAS)
+ mpt_sas_log_info(ioc, log_info, cb_idx);
+ }
+
+ if (ioc_stat & MPI_IOCSTATUS_MASK)
+ mpt_iocstatus_info(ioc, (u32)ioc_stat, mf);
+
+ /* Check for (valid) IO callback! */
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS ||
+ MptCallbacks[cb_idx] == NULL) {
+ printk(MYIOC_s_WARN_FMT "%s: Invalid cb_idx (%d)!\n",
+ __func__, ioc->name, cb_idx);
+ freeme = 0;
+ goto out;
+ }
+
+ freeme = MptCallbacks[cb_idx](ioc, mf, mr);
+
+ out:
+ /* Flush (non-TURBO) reply with a WRITE! */
+ CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
+
+ if (freeme)
+ mpt_free_msg_frame(ioc, mf);
+ mb();
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ *
+ * This routine is registered via the request_irq() kernel API call,
+ * and handles all interrupts generated from a specific MPT adapter
+ * (also referred to as a IO Controller or IOC).
+ * This routine must clear the interrupt from the adapter and does
+ * so by reading the reply FIFO. Multiple replies may be processed
+ * per single call to this routine.
+ *
+ * This routine handles register-level access of the adapter but
+ * dispatches (calls) a protocol-specific callback routine to handle
+ * the protocol-specific details of the MPT request completion.
+ */
+static irqreturn_t
+mpt_interrupt(int irq, void *bus_id)
+{
+ MPT_ADAPTER *ioc = bus_id;
+ u32 pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
+
+ if (pa == 0xFFFFFFFF)
+ return IRQ_NONE;
+
+ /*
+ * Drain the reply FIFO!
+ */
+ do {
+ if (pa & MPI_ADDRESS_REPLY_A_BIT)
+ mpt_reply(ioc, pa);
+ else
+ mpt_turbo_reply(ioc, pa);
+ pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo);
+ } while (pa != 0xFFFFFFFF);
+
+ return IRQ_HANDLED;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptbase_reply - MPT base driver's callback routine
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @req: Pointer to original MPT request frame
+ * @reply: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+ * MPT base driver's callback routine; all base driver
+ * "internal" request/reply processing is routed here.
+ * Currently used for EventNotification and EventAck handling.
+ *
+ * Returns 1 indicating original alloc'd request frame ptr
+ * should be freed, or 0 if it shouldn't.
+ */
+static int
+mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
+{
+ EventNotificationReply_t *pEventReply;
+ u8 event;
+ int evHandlers;
+ int freereq = 1;
+
+ switch (reply->u.hdr.Function) {
+ case MPI_FUNCTION_EVENT_NOTIFICATION:
+ pEventReply = (EventNotificationReply_t *)reply;
+ evHandlers = 0;
+ ProcessEventNotification(ioc, pEventReply, &evHandlers);
+ event = le32_to_cpu(pEventReply->Event) & 0xFF;
+ if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
+ freereq = 0;
+ if (event != MPI_EVENT_EVENT_CHANGE)
+ break;
+ case MPI_FUNCTION_CONFIG:
+ case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
+ ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+ if (reply) {
+ ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->mptbase_cmds.reply, reply,
+ min(MPT_DEFAULT_FRAME_SIZE,
+ 4 * reply->u.reply.MsgLength));
+ }
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->mptbase_cmds.done);
+ } else
+ freereq = 0;
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
+ freereq = 1;
+ break;
+ case MPI_FUNCTION_EVENT_ACK:
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "EventAck reply received\n", ioc->name));
+ break;
+ default:
+ printk(MYIOC_s_ERR_FMT
+ "Unexpected msg function (=%02Xh) reply received!\n",
+ ioc->name, reply->u.hdr.Function);
+ break;
+ }
+
+ /*
+ * Conditionally tell caller to free the original
+ * EventNotification/EventAck/unexpected request frame!
+ */
+ return freereq;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_register - Register protocol-specific main callback handler.
+ * @cbfunc: callback function pointer
+ * @dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
+ * @func_name: call function's name
+ *
+ * This routine is called by a protocol-specific driver (SCSI host,
+ * LAN, SCSI target) to register its reply callback routine. Each
+ * protocol-specific driver must do this before it will be able to
+ * use any IOC resources, such as obtaining request frames.
+ *
+ * NOTES: The SCSI protocol driver currently calls this routine thrice
+ * in order to register separate callbacks; one for "normal" SCSI IO;
+ * one for MptScsiTaskMgmt requests; one for Scan/DV requests.
+ *
+ * Returns u8 valued "handle" in the range (and S.O.D. order)
+ * {N,...,7,6,5,...,1} if successful.
+ * A return value of MPT_MAX_PROTOCOL_DRIVERS (including zero!) should be
+ * considered an error by the caller.
+ */
+u8
+mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
+{
+ u8 cb_idx;
+ last_drv_idx = MPT_MAX_PROTOCOL_DRIVERS;
+
+ /*
+ * Search for empty callback slot in this order: {N,...,7,6,5,...,1}
+ * (slot/handle 0 is reserved!)
+ */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptCallbacks[cb_idx] == NULL) {
+ MptCallbacks[cb_idx] = cbfunc;
+ MptDriverClass[cb_idx] = dclass;
+ MptEvHandlers[cb_idx] = NULL;
+ last_drv_idx = cb_idx;
+ memcpy(MptCallbacksName[cb_idx], func_name,
+ strlen(func_name) > 50 ? 50 : strlen(func_name));
+ break;
+ }
+ }
+
+ return last_drv_idx;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_deregister - Deregister a protocol drivers resources.
+ * @cb_idx: previously registered callback handle
+ *
+ * Each protocol-specific driver should call this routine when its
+ * module is unloaded.
+ */
+void
+mpt_deregister(u8 cb_idx)
+{
+ if (cb_idx && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
+ MptCallbacks[cb_idx] = NULL;
+ MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
+ MptEvHandlers[cb_idx] = NULL;
+
+ last_drv_idx++;
+ }
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_event_register - Register protocol-specific event callback handler.
+ * @cb_idx: previously registered (via mpt_register) callback handle
+ * @ev_cbfunc: callback function
+ *
+ * This routine can be called by one or more protocol-specific drivers
+ * if/when they choose to be notified of MPT events.
+ *
+ * Returns 0 for success.
+ */
+int
+mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
+{
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+ return -1;
+
+ MptEvHandlers[cb_idx] = ev_cbfunc;
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_event_deregister - Deregister protocol-specific event callback handler
+ * @cb_idx: previously registered callback handle
+ *
+ * Each protocol-specific driver should call this routine
+ * when it does not (or can no longer) handle events,
+ * or when its module is unloaded.
+ */
+void
+mpt_event_deregister(u8 cb_idx)
+{
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+ return;
+
+ MptEvHandlers[cb_idx] = NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_reset_register - Register protocol-specific IOC reset handler.
+ * @cb_idx: previously registered (via mpt_register) callback handle
+ * @reset_func: reset function
+ *
+ * This routine can be called by one or more protocol-specific drivers
+ * if/when they choose to be notified of IOC resets.
+ *
+ * Returns 0 for success.
+ */
+int
+mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func)
+{
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+ return -1;
+
+ MptResetHandlers[cb_idx] = reset_func;
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_reset_deregister - Deregister protocol-specific IOC reset handler.
+ * @cb_idx: previously registered callback handle
+ *
+ * Each protocol-specific driver should call this routine
+ * when it does not (or can no longer) handle IOC reset handling,
+ * or when its module is unloaded.
+ */
+void
+mpt_reset_deregister(u8 cb_idx)
+{
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+ return;
+
+ MptResetHandlers[cb_idx] = NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_device_driver_register - Register device driver hooks
+ * @dd_cbfunc: driver callbacks struct
+ * @cb_idx: MPT protocol driver index
+ */
+int
+mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx)
+{
+ MPT_ADAPTER *ioc;
+ const struct pci_device_id *id;
+
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+ return -EINVAL;
+
+ MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
+
+ /* call per pci device probe entry point */
+ list_for_each_entry(ioc, &ioc_list, list) {
+ id = ioc->pcidev->driver ?
+ ioc->pcidev->driver->id_table : NULL;
+ if (dd_cbfunc->probe)
+ dd_cbfunc->probe(ioc->pcidev, id);
+ }
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_device_driver_deregister - DeRegister device driver hooks
+ * @cb_idx: MPT protocol driver index
+ */
+void
+mpt_device_driver_deregister(u8 cb_idx)
+{
+ struct mpt_pci_driver *dd_cbfunc;
+ MPT_ADAPTER *ioc;
+
+ if (!cb_idx || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+ return;
+
+ dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
+
+ list_for_each_entry(ioc, &ioc_list, list) {
+ if (dd_cbfunc->remove)
+ dd_cbfunc->remove(ioc->pcidev);
+ }
+
+ MptDeviceDriverHandlers[cb_idx] = NULL;
+}
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_get_msg_frame - Obtain an MPT request frame from the pool
+ * @cb_idx: Handle of registered MPT protocol driver
+ * @ioc: Pointer to MPT adapter structure
+ *
+ * Obtain an MPT request frame from the pool (of 1024) that are
+ * allocated per MPT adapter.
+ *
+ * Returns pointer to a MPT request frame or %NULL if none are available
+ * or IOC is not active.
+ */
+MPT_FRAME_HDR*
+mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
+{
+ MPT_FRAME_HDR *mf;
+ unsigned long flags;
+ u16 req_idx; /* Request index */
+
+ /* validate handle and ioc identifier */
+
+#ifdef MFCNT
+ if (!ioc->active)
+ printk(MYIOC_s_WARN_FMT "IOC Not Active! mpt_get_msg_frame "
+ "returning NULL!\n", ioc->name);
+#endif
+
+ /* If interrupts are not attached, do not return a request frame */
+ if (!ioc->active)
+ return NULL;
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+ if (!list_empty(&ioc->FreeQ)) {
+ int req_offset;
+
+ mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
+ u.frame.linkage.list);
+ list_del(&mf->u.frame.linkage.list);
+ mf->u.frame.linkage.arg1 = 0;
+ mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
+ req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
+ /* u16! */
+ req_idx = req_offset / ioc->req_sz;
+ mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
+ mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
+ /* Default, will be changed if necessary in SG generation */
+ ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame;
+#ifdef MFCNT
+ ioc->mfcnt++;
+#endif
+ }
+ else
+ mf = NULL;
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+#ifdef MFCNT
+ if (mf == NULL)
+ printk(MYIOC_s_WARN_FMT "IOC Active. No free Msg Frames! "
+ "Count 0x%x Max 0x%x\n", ioc->name, ioc->mfcnt,
+ ioc->req_depth);
+ mfcounter++;
+ if (mfcounter == PRINT_MF_COUNT)
+ printk(MYIOC_s_INFO_FMT "MF Count 0x%x Max 0x%x \n", ioc->name,
+ ioc->mfcnt, ioc->req_depth);
+#endif
+
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_get_msg_frame(%d,%d), got mf=%p\n",
+ ioc->name, cb_idx, ioc->id, mf));
+ return mf;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
+ * @cb_idx: Handle of registered MPT protocol driver
+ * @ioc: Pointer to MPT adapter structure
+ * @mf: Pointer to MPT request frame
+ *
+ * This routine posts an MPT request frame to the request post FIFO of a
+ * specific MPT adapter.
+ */
+void
+mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ u32 mf_dma_addr;
+ int req_offset;
+ u16 req_idx; /* Request index */
+
+ /* ensure values are reset properly! */
+ mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx; /* byte */
+ req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
+ /* u16! */
+ req_idx = req_offset / ioc->req_sz;
+ mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
+ mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
+
+ DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
+
+ mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d "
+ "RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx,
+ ioc->RequestNB[req_idx]));
+ CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
+}
+
+/**
+ * mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
+ * @cb_idx: Handle of registered MPT protocol driver
+ * @ioc: Pointer to MPT adapter structure
+ * @mf: Pointer to MPT request frame
+ *
+ * Send a protocol-specific MPT request frame to an IOC using
+ * hi-priority request queue.
+ *
+ * This routine posts an MPT request frame to the request post FIFO of a
+ * specific MPT adapter.
+ **/
+void
+mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ u32 mf_dma_addr;
+ int req_offset;
+ u16 req_idx; /* Request index */
+
+ /* ensure values are reset properly! */
+ mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
+ req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
+ req_idx = req_offset / ioc->req_sz;
+ mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(req_idx);
+ mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
+
+ DBG_DUMP_PUT_MSG_FRAME(ioc, (u32 *)mf);
+
+ mf_dma_addr = (ioc->req_frames_low_dma + req_offset);
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mf_dma_addr=%x req_idx=%d\n",
+ ioc->name, mf_dma_addr, req_idx));
+ CHIPREG_WRITE32(&ioc->chip->RequestHiPriFifo, mf_dma_addr);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_free_msg_frame - Place MPT request frame back on FreeQ.
+ * @ioc: Pointer to MPT adapter structure
+ * @mf: Pointer to MPT request frame
+ *
+ * This routine places a MPT request frame back on the MPT adapter's
+ * FreeQ.
+ */
+void
+mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ unsigned long flags;
+
+ /* Put Request back on FreeQ! */
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+ if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
+ goto out;
+ /* signature to know if this mf is freed */
+ mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
+ list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
+#ifdef MFCNT
+ ioc->mfcnt--;
+#endif
+ out:
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
+ * @pAddr: virtual address for SGE
+ * @flagslength: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * This routine places a MPT request frame back on the MPT adapter's
+ * FreeQ.
+ */
+static void
+mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+ SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
+ pSge->FlagsLength = cpu_to_le32(flagslength);
+ pSge->Address = cpu_to_le32(dma_addr);
+}
+
+/**
+ * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
+ * @pAddr: virtual address for SGE
+ * @flagslength: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * This routine places a MPT request frame back on the MPT adapter's
+ * FreeQ.
+ **/
+static void
+mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+ SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+ pSge->Address.Low = cpu_to_le32
+ (lower_32_bits(dma_addr));
+ pSge->Address.High = cpu_to_le32
+ (upper_32_bits(dma_addr));
+ pSge->FlagsLength = cpu_to_le32
+ ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
+}
+
+/**
+ * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr (1078 workaround).
+ * @pAddr: virtual address for SGE
+ * @flagslength: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * This routine places a MPT request frame back on the MPT adapter's
+ * FreeQ.
+ **/
+static void
+mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+ SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+ u32 tmp;
+
+ pSge->Address.Low = cpu_to_le32
+ (lower_32_bits(dma_addr));
+ tmp = (u32)(upper_32_bits(dma_addr));
+
+ /*
+ * 1078 errata workaround for the 36GB limitation
+ */
+ if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) {
+ flagslength |=
+ MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
+ tmp |= (1<<31);
+ if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
+ printk(KERN_DEBUG "1078 P0M2 addressing for "
+ "addr = 0x%llx len = %d\n",
+ (unsigned long long)dma_addr,
+ MPI_SGE_LENGTH(flagslength));
+ }
+
+ pSge->Address.High = cpu_to_le32(tmp);
+ pSge->FlagsLength = cpu_to_le32(
+ (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
+ * @pAddr: virtual address for SGE
+ * @next: nextChainOffset value (u32's)
+ * @length: length of next SGL segment
+ * @dma_addr: Physical address
+ *
+ */
+static void
+mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
+{
+ SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
+ pChain->NextChainOffset = next;
+ pChain->Address = cpu_to_le32(dma_addr);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
+ * @pAddr: virtual address for SGE
+ * @next: nextChainOffset value (u32's)
+ * @length: length of next SGL segment
+ * @dma_addr: Physical address
+ *
+ */
+static void
+mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
+{
+ SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
+ u32 tmp = dma_addr & 0xFFFFFFFF;
+
+ pChain->Length = cpu_to_le16(length);
+ pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING);
+
+ pChain->NextChainOffset = next;
+
+ pChain->Address.Low = cpu_to_le32(tmp);
+ tmp = (u32)(upper_32_bits(dma_addr));
+ pChain->Address.High = cpu_to_le32(tmp);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_send_handshake_request - Send MPT request via doorbell handshake method.
+ * @cb_idx: Handle of registered MPT protocol driver
+ * @ioc: Pointer to MPT adapter structure
+ * @reqBytes: Size of the request in bytes
+ * @req: Pointer to MPT request frame
+ * @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
+ *
+ * This routine is used exclusively to send MptScsiTaskMgmt
+ * requests since they are required to be sent via doorbell handshake.
+ *
+ * NOTE: It is the callers responsibility to byte-swap fields in the
+ * request which are greater than 1 byte in size.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
+{
+ int r = 0;
+ u8 *req_as_bytes;
+ int ii;
+
+ /* State is known to be good upon entering
+ * this function so issue the bus reset
+ * request.
+ */
+
+ /*
+ * Emulate what mpt_put_msg_frame() does /wrt to sanity
+ * setting cb_idx/req_idx. But ONLY if this request
+ * is in proper (pre-alloc'd) request buffer range...
+ */
+ ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
+ if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
+ MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
+ mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
+ mf->u.frame.hwhdr.msgctxu.fld.cb_idx = cb_idx;
+ }
+
+ /* Make sure there are no doorbells */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ CHIPREG_WRITE32(&ioc->chip->Doorbell,
+ ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
+ ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
+
+ /* Wait for IOC doorbell int */
+ if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
+ return ii;
+ }
+
+ /* Read doorbell and check for active bit */
+ if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
+ return -5;
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_send_handshake_request start, WaitCnt=%d\n",
+ ioc->name, ii));
+
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+ return -2;
+ }
+
+ /* Send request via doorbell handshake */
+ req_as_bytes = (u8 *) req;
+ for (ii = 0; ii < reqBytes/4; ii++) {
+ u32 word;
+
+ word = ((req_as_bytes[(ii*4) + 0] << 0) |
+ (req_as_bytes[(ii*4) + 1] << 8) |
+ (req_as_bytes[(ii*4) + 2] << 16) |
+ (req_as_bytes[(ii*4) + 3] << 24));
+ CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
+ if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+ r = -3;
+ break;
+ }
+ }
+
+ if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
+ r = 0;
+ else
+ r = -4;
+
+ /* Make sure there are no doorbells */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ return r;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_host_page_access_control - control the IOC's Host Page Buffer access
+ * @ioc: Pointer to MPT adapter structure
+ * @access_control_value: define bits below
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Provides mechanism for the host driver to control the IOC's
+ * Host Page Buffer access.
+ *
+ * Access Control Value - bits[15:12]
+ * 0h Reserved
+ * 1h Enable Access { MPI_DB_HPBAC_ENABLE_ACCESS }
+ * 2h Disable Access { MPI_DB_HPBAC_DISABLE_ACCESS }
+ * 3h Free Buffer { MPI_DB_HPBAC_FREE_BUFFER }
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+
+static int
+mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
+{
+ int r = 0;
+
+ /* return if in use */
+ if (CHIPREG_READ32(&ioc->chip->Doorbell)
+ & MPI_DOORBELL_ACTIVE)
+ return -1;
+
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ CHIPREG_WRITE32(&ioc->chip->Doorbell,
+ ((MPI_FUNCTION_HOST_PAGEBUF_ACCESS_CONTROL
+ <<MPI_DOORBELL_FUNCTION_SHIFT) |
+ (access_control_value<<12)));
+
+ /* Wait for IOC to clear Doorbell Status bit */
+ if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+ return -2;
+ }else
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_host_page_alloc - allocate system memory for the fw
+ * @ioc: Pointer to pointer to IOC adapter
+ * @ioc_init: Pointer to ioc init config page
+ *
+ * If we already allocated memory in past, then resend the same pointer.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
+{
+ char *psge;
+ int flags_length;
+ u32 host_page_buffer_sz=0;
+
+ if(!ioc->HostPageBuffer) {
+
+ host_page_buffer_sz =
+ le32_to_cpu(ioc->facts.HostPageBufferSGE.FlagsLength) & 0xFFFFFF;
+
+ if(!host_page_buffer_sz)
+ return 0; /* fw doesn't need any host buffers */
+
+ /* spin till we get enough memory */
+ while(host_page_buffer_sz > 0) {
+
+ if((ioc->HostPageBuffer = pci_alloc_consistent(
+ ioc->pcidev,
+ host_page_buffer_sz,
+ &ioc->HostPageBuffer_dma)) != NULL) {
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
+ ioc->name, ioc->HostPageBuffer,
+ (u32)ioc->HostPageBuffer_dma,
+ host_page_buffer_sz));
+ ioc->alloc_total += host_page_buffer_sz;
+ ioc->HostPageBuffer_sz = host_page_buffer_sz;
+ break;
+ }
+
+ host_page_buffer_sz -= (4*1024);
+ }
+ }
+
+ if(!ioc->HostPageBuffer) {
+ printk(MYIOC_s_ERR_FMT
+ "Failed to alloc memory for host_page_buffer!\n",
+ ioc->name);
+ return -999;
+ }
+
+ psge = (char *)&ioc_init->HostPageBufferSGE;
+ flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_HOST_TO_IOC |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
+ flags_length |= ioc->HostPageBuffer_sz;
+ ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
+ ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
+
+return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_verify_adapter - Given IOC identifier, set pointer to its adapter structure.
+ * @iocid: IOC unique identifier (integer)
+ * @iocpp: Pointer to pointer to IOC adapter
+ *
+ * Given a unique IOC identifier, set pointer to the associated MPT
+ * adapter structure.
+ *
+ * Returns iocid and sets iocpp if iocid is found.
+ * Returns -1 if iocid is not found.
+ */
+int
+mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
+{
+ MPT_ADAPTER *ioc;
+
+ list_for_each_entry(ioc,&ioc_list,list) {
+ if (ioc->id == iocid) {
+ *iocpp =ioc;
+ return iocid;
+ }
+ }
+
+ *iocpp = NULL;
+ return -1;
+}
+
+/**
+ * mpt_get_product_name - returns product string
+ * @vendor: pci vendor id
+ * @device: pci device id
+ * @revision: pci revision id
+ * @prod_name: string returned
+ *
+ * Returns product string displayed when driver loads,
+ * in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
+ *
+ **/
+static void
+mpt_get_product_name(u16 vendor, u16 device, u8 revision, char *prod_name)
+{
+ char *product_str = NULL;
+
+ if (vendor == PCI_VENDOR_ID_BROCADE) {
+ switch (device)
+ {
+ case MPI_MANUFACTPAGE_DEVICEID_FC949E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "BRE040 A0";
+ break;
+ case 0x01:
+ product_str = "BRE040 A1";
+ break;
+ default:
+ product_str = "BRE040";
+ break;
+ }
+ break;
+ }
+ goto out;
+ }
+
+ switch (device)
+ {
+ case MPI_MANUFACTPAGE_DEVICEID_FC909:
+ product_str = "LSIFC909 B1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC919:
+ product_str = "LSIFC919 B0";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC929:
+ product_str = "LSIFC929 B0";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC919X:
+ if (revision < 0x80)
+ product_str = "LSIFC919X A0";
+ else
+ product_str = "LSIFC919XL A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC929X:
+ if (revision < 0x80)
+ product_str = "LSIFC929X A0";
+ else
+ product_str = "LSIFC929XL A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC939X:
+ product_str = "LSIFC939X A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC949X:
+ product_str = "LSIFC949X A1";
+ break;
+ case MPI_MANUFACTPAGE_DEVICEID_FC949E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSIFC949E A0";
+ break;
+ case 0x01:
+ product_str = "LSIFC949E A1";
+ break;
+ default:
+ product_str = "LSIFC949E";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_53C1030:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSI53C1030 A0";
+ break;
+ case 0x01:
+ product_str = "LSI53C1030 B0";
+ break;
+ case 0x03:
+ product_str = "LSI53C1030 B1";
+ break;
+ case 0x07:
+ product_str = "LSI53C1030 B2";
+ break;
+ case 0x08:
+ product_str = "LSI53C1030 C0";
+ break;
+ case 0x80:
+ product_str = "LSI53C1030T A0";
+ break;
+ case 0x83:
+ product_str = "LSI53C1030T A2";
+ break;
+ case 0x87:
+ product_str = "LSI53C1030T A3";
+ break;
+ case 0xc1:
+ product_str = "LSI53C1020A A1";
+ break;
+ default:
+ product_str = "LSI53C1030";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
+ switch (revision)
+ {
+ case 0x03:
+ product_str = "LSI53C1035 A2";
+ break;
+ case 0x04:
+ product_str = "LSI53C1035 B0";
+ break;
+ default:
+ product_str = "LSI53C1035";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1064:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1064 A1";
+ break;
+ case 0x01:
+ product_str = "LSISAS1064 A2";
+ break;
+ case 0x02:
+ product_str = "LSISAS1064 A3";
+ break;
+ case 0x03:
+ product_str = "LSISAS1064 A4";
+ break;
+ default:
+ product_str = "LSISAS1064";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1064E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1064E A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1064E B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1064E B1";
+ break;
+ case 0x04:
+ product_str = "LSISAS1064E B2";
+ break;
+ case 0x08:
+ product_str = "LSISAS1064E B3";
+ break;
+ default:
+ product_str = "LSISAS1064E";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1068:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1068 A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1068 B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1068 B1";
+ break;
+ default:
+ product_str = "LSISAS1068";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1068E:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1068E A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1068E B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1068E B1";
+ break;
+ case 0x04:
+ product_str = "LSISAS1068E B2";
+ break;
+ case 0x08:
+ product_str = "LSISAS1068E B3";
+ break;
+ default:
+ product_str = "LSISAS1068E";
+ break;
+ }
+ break;
+ case MPI_MANUFACTPAGE_DEVID_SAS1078:
+ switch (revision)
+ {
+ case 0x00:
+ product_str = "LSISAS1078 A0";
+ break;
+ case 0x01:
+ product_str = "LSISAS1078 B0";
+ break;
+ case 0x02:
+ product_str = "LSISAS1078 C0";
+ break;
+ case 0x03:
+ product_str = "LSISAS1078 C1";
+ break;
+ case 0x04:
+ product_str = "LSISAS1078 C2";
+ break;
+ default:
+ product_str = "LSISAS1078";
+ break;
+ }
+ break;
+ }
+
+ out:
+ if (product_str)
+ sprintf(prod_name, "%s", product_str);
+}
+
+/**
+ * mpt_mapresources - map in memory mapped io
+ * @ioc: Pointer to pointer to IOC adapter
+ *
+ **/
+static int
+mpt_mapresources(MPT_ADAPTER *ioc)
+{
+ u8 __iomem *mem;
+ int ii;
+ resource_size_t mem_phys;
+ unsigned long port;
+ u32 msize;
+ u32 psize;
+ u8 revision;
+ int r = -ENODEV;
+ struct pci_dev *pdev;
+
+ pdev = ioc->pcidev;
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev)) {
+ printk(MYIOC_s_ERR_FMT "pci_enable_device_mem() "
+ "failed\n", ioc->name);
+ return r;
+ }
+ if (pci_request_selected_regions(pdev, ioc->bars, "mpt")) {
+ printk(MYIOC_s_ERR_FMT "pci_request_selected_regions() with "
+ "MEM failed\n", ioc->name);
+ return r;
+ }
+
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask = dma_get_required_mask
+ (&pdev->dev);
+ if (required_mask > DMA_BIT_MASK(32)
+ && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(64))) {
+ ioc->dma_mask = DMA_BIT_MASK(64);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+ ioc->name));
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32))) {
+ ioc->dma_mask = DMA_BIT_MASK(32);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+ ioc->name));
+ } else {
+ printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ pci_release_selected_regions(pdev, ioc->bars);
+ return r;
+ }
+ } else {
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32))) {
+ ioc->dma_mask = DMA_BIT_MASK(32);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+ ioc->name));
+ } else {
+ printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ pci_release_selected_regions(pdev, ioc->bars);
+ return r;
+ }
+ }
+
+ mem_phys = msize = 0;
+ port = psize = 0;
+ for (ii = 0; ii < DEVICE_COUNT_RESOURCE; ii++) {
+ if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
+ if (psize)
+ continue;
+ /* Get I/O space! */
+ port = pci_resource_start(pdev, ii);
+ psize = pci_resource_len(pdev, ii);
+ } else {
+ if (msize)
+ continue;
+ /* Get memmap */
+ mem_phys = pci_resource_start(pdev, ii);
+ msize = pci_resource_len(pdev, ii);
+ }
+ }
+ ioc->mem_size = msize;
+
+ mem = NULL;
+ /* Get logical ptr for PciMem0 space */
+ /*mem = ioremap(mem_phys, msize);*/
+ mem = ioremap(mem_phys, msize);
+ if (mem == NULL) {
+ printk(MYIOC_s_ERR_FMT ": ERROR - Unable to map adapter"
+ " memory!\n", ioc->name);
+ pci_release_selected_regions(pdev, ioc->bars);
+ return -EINVAL;
+ }
+ ioc->memmap = mem;
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
+ ioc->name, mem, (unsigned long long)mem_phys));
+
+ ioc->mem_phys = mem_phys;
+ ioc->chip = (SYSIF_REGS __iomem *)mem;
+
+ /* Save Port IO values in case we need to do downloadboot */
+ ioc->pio_mem_phys = port;
+ ioc->pio_chip = (SYSIF_REGS __iomem *)port;
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_attach - Install a PCI intelligent MPT adapter.
+ * @pdev: Pointer to pci_dev structure
+ * @id: PCI device ID information
+ *
+ * This routine performs all the steps necessary to bring the IOC of
+ * a MPT adapter to a OPERATIONAL state. This includes registering
+ * memory regions, registering the interrupt, and allocating request
+ * and reply memory pools.
+ *
+ * This routine also pre-fetches the LAN MAC address of a Fibre Channel
+ * MPT adapter.
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * TODO: Add support for polled controllers
+ */
+int
+mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ MPT_ADAPTER *ioc;
+ u8 cb_idx;
+ int r = -ENODEV;
+ u8 revision;
+ u8 pcixcmd;
+ static int mpt_ids = 0;
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *dent;
+#endif
+
+ ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
+ if (ioc == NULL) {
+ printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
+ return -ENOMEM;
+ }
+
+ ioc->id = mpt_ids++;
+ sprintf(ioc->name, "ioc%d", ioc->id);
+ dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
+
+ /*
+ * set initial debug level
+ * (refer to mptdebug.h)
+ *
+ */
+ ioc->debug_level = mpt_debug_level;
+ if (mpt_debug_level)
+ printk(KERN_INFO "mpt_debug_level=%xh\n", mpt_debug_level);
+
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": mpt_adapter_install\n", ioc->name));
+
+ ioc->pcidev = pdev;
+ if (mpt_mapresources(ioc)) {
+ kfree(ioc);
+ return r;
+ }
+
+ /*
+ * Setting up proper handlers for scatter gather handling
+ */
+ if (ioc->dma_mask == DMA_BIT_MASK(64)) {
+ if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
+ ioc->add_sge = &mpt_add_sge_64bit_1078;
+ else
+ ioc->add_sge = &mpt_add_sge_64bit;
+ ioc->add_chain = &mpt_add_chain_64bit;
+ ioc->sg_addr_size = 8;
+ } else {
+ ioc->add_sge = &mpt_add_sge;
+ ioc->add_chain = &mpt_add_chain;
+ ioc->sg_addr_size = 4;
+ }
+ ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
+
+ ioc->alloc_total = sizeof(MPT_ADAPTER);
+ ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */
+ ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
+
+
+ spin_lock_init(&ioc->taskmgmt_lock);
+ mutex_init(&ioc->internal_cmds.mutex);
+ init_completion(&ioc->internal_cmds.done);
+ mutex_init(&ioc->mptbase_cmds.mutex);
+ init_completion(&ioc->mptbase_cmds.done);
+ mutex_init(&ioc->taskmgmt_cmds.mutex);
+ init_completion(&ioc->taskmgmt_cmds.done);
+
+ /* Initialize the event logging.
+ */
+ ioc->eventTypes = 0; /* None */
+ ioc->eventContext = 0;
+ ioc->eventLogSize = 0;
+ ioc->events = NULL;
+
+#ifdef MFCNT
+ ioc->mfcnt = 0;
+#endif
+
+ ioc->sh = NULL;
+ ioc->cached_fw = NULL;
+
+ /* Initialize SCSI Config Data structure
+ */
+ memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
+
+ /* Initialize the fc rport list head.
+ */
+ INIT_LIST_HEAD(&ioc->fc_rports);
+
+ /* Find lookup slot. */
+ INIT_LIST_HEAD(&ioc->list);
+
+
+ /* Initialize workqueue */
+ INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
+
+ snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
+ "mpt_poll_%d", ioc->id);
+ ioc->reset_work_q =
+ create_singlethread_workqueue(ioc->reset_work_q_name);
+ if (!ioc->reset_work_q) {
+ printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
+ ioc->name);
+ pci_release_selected_regions(pdev, ioc->bars);
+ kfree(ioc);
+ return -ENOMEM;
+ }
+
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
+ ioc->name, &ioc->facts, &ioc->pfacts[0]));
+
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+
+ switch (pdev->device)
+ {
+ case MPI_MANUFACTPAGE_DEVICEID_FC939X:
+ case MPI_MANUFACTPAGE_DEVICEID_FC949X:
+ ioc->errata_flag_1064 = 1;
+ case MPI_MANUFACTPAGE_DEVICEID_FC909:
+ case MPI_MANUFACTPAGE_DEVICEID_FC929:
+ case MPI_MANUFACTPAGE_DEVICEID_FC919:
+ case MPI_MANUFACTPAGE_DEVICEID_FC949E:
+ ioc->bus_type = FC;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVICEID_FC929X:
+ if (revision < XL_929) {
+ /* 929X Chip Fix. Set Split transactions level
+ * for PCIX. Set MOST bits to zero.
+ */
+ pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+ pcixcmd &= 0x8F;
+ pci_write_config_byte(pdev, 0x6a, pcixcmd);
+ } else {
+ /* 929XL Chip Fix. Set MMRBC to 0x08.
+ */
+ pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+ pcixcmd |= 0x08;
+ pci_write_config_byte(pdev, 0x6a, pcixcmd);
+ }
+ ioc->bus_type = FC;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVICEID_FC919X:
+ /* 919X Chip Fix. Set Split transactions level
+ * for PCIX. Set MOST bits to zero.
+ */
+ pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+ pcixcmd &= 0x8F;
+ pci_write_config_byte(pdev, 0x6a, pcixcmd);
+ ioc->bus_type = FC;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVID_53C1030:
+ /* 1030 Chip Fix. Disable Split transactions
+ * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
+ */
+ if (revision < C0_1030) {
+ pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+ pcixcmd &= 0x8F;
+ pci_write_config_byte(pdev, 0x6a, pcixcmd);
+ }
+
+ case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
+ ioc->bus_type = SPI;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVID_SAS1064:
+ case MPI_MANUFACTPAGE_DEVID_SAS1068:
+ ioc->errata_flag_1064 = 1;
+ ioc->bus_type = SAS;
+ break;
+
+ case MPI_MANUFACTPAGE_DEVID_SAS1064E:
+ case MPI_MANUFACTPAGE_DEVID_SAS1068E:
+ case MPI_MANUFACTPAGE_DEVID_SAS1078:
+ ioc->bus_type = SAS;
+ break;
+ }
+
+
+ switch (ioc->bus_type) {
+
+ case SAS:
+ ioc->msi_enable = mpt_msi_enable_sas;
+ break;
+
+ case SPI:
+ ioc->msi_enable = mpt_msi_enable_spi;
+ break;
+
+ case FC:
+ ioc->msi_enable = mpt_msi_enable_fc;
+ break;
+
+ default:
+ ioc->msi_enable = 0;
+ break;
+ }
+
+ ioc->fw_events_off = 1;
+
+ if (ioc->errata_flag_1064)
+ pci_disable_io_access(pdev);
+
+ spin_lock_init(&ioc->FreeQlock);
+
+ /* Disable all! */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ /* Set IOC ptr in the pcidev's driver data. */
+ pci_set_drvdata(ioc->pcidev, ioc);
+
+ /* Set lookup ptr. */
+ list_add_tail(&ioc->list, &ioc_list);
+
+ /* Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
+ */
+ mpt_detect_bound_ports(ioc, pdev);
+
+ INIT_LIST_HEAD(&ioc->fw_event_list);
+ spin_lock_init(&ioc->fw_event_lock);
+ snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
+ ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+
+ if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
+ CAN_SLEEP)) != 0){
+ printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
+ ioc->name, r);
+
+ list_del(&ioc->list);
+ if (ioc->alt_ioc)
+ ioc->alt_ioc->alt_ioc = NULL;
+ iounmap(ioc->memmap);
+ if (r != -5)
+ pci_release_selected_regions(pdev, ioc->bars);
+
+ destroy_workqueue(ioc->reset_work_q);
+ ioc->reset_work_q = NULL;
+
+ kfree(ioc);
+ pci_set_drvdata(pdev, NULL);
+ return r;
+ }
+
+ /* call per device driver probe entry point */
+ for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
+ if(MptDeviceDriverHandlers[cb_idx] &&
+ MptDeviceDriverHandlers[cb_idx]->probe) {
+ MptDeviceDriverHandlers[cb_idx]->probe(pdev,id);
+ }
+ }
+
+#ifdef CONFIG_PROC_FS
+ /*
+ * Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
+ */
+ dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
+ if (dent) {
+ proc_create_data("info", S_IRUGO, dent, &mpt_iocinfo_proc_fops, ioc);
+ proc_create_data("summary", S_IRUGO, dent, &mpt_summary_proc_fops, ioc);
+ }
+#endif
+
+ if (!ioc->alt_ioc)
+ queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
+ msecs_to_jiffies(MPT_POLLING_INTERVAL));
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_detach - Remove a PCI intelligent MPT adapter.
+ * @pdev: Pointer to pci_dev structure
+ */
+
+void
+mpt_detach(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ char pname[32];
+ u8 cb_idx;
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ /*
+ * Stop polling ioc for fault condition
+ */
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ wq = ioc->reset_work_q;
+ ioc->reset_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ cancel_delayed_work(&ioc->fault_reset_work);
+ destroy_workqueue(wq);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->fw_event_q;
+ ioc->fw_event_q = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ destroy_workqueue(wq);
+
+ sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
+ remove_proc_entry(pname, NULL);
+ sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
+ remove_proc_entry(pname, NULL);
+ sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
+ remove_proc_entry(pname, NULL);
+
+ /* call per device driver remove entry point */
+ for(cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
+ if(MptDeviceDriverHandlers[cb_idx] &&
+ MptDeviceDriverHandlers[cb_idx]->remove) {
+ MptDeviceDriverHandlers[cb_idx]->remove(pdev);
+ }
+ }
+
+ /* Disable interrupts! */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+
+ ioc->active = 0;
+ synchronize_irq(pdev->irq);
+
+ /* Clear any lingering interrupt */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ CHIPREG_READ32(&ioc->chip->IntStatus);
+
+ mpt_adapter_dispose(ioc);
+
+}
+
+/**************************************************************************
+ * Power Management
+ */
+#ifdef CONFIG_PM
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_suspend - Fusion MPT base driver suspend routine.
+ * @pdev: Pointer to pci_dev structure
+ * @state: new state to enter
+ */
+int
+mpt_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ u32 device_state;
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+ device_state = pci_choose_state(pdev, state);
+ printk(MYIOC_s_INFO_FMT "pci-suspend: pdev=0x%p, slot=%s, Entering "
+ "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
+ device_state);
+
+ /* put ioc into READY_STATE */
+ if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
+ printk(MYIOC_s_ERR_FMT
+ "pci-suspend: IOC msg unit reset failed!\n", ioc->name);
+ }
+
+ /* disable interrupts */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+
+ /* Clear any lingering interrupt */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ free_irq(ioc->pci_irq, ioc);
+ if (ioc->msi_enable)
+ pci_disable_msi(ioc->pcidev);
+ ioc->pci_irq = -1;
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_release_selected_regions(pdev, ioc->bars);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_resume - Fusion MPT base driver resume routine.
+ * @pdev: Pointer to pci_dev structure
+ */
+int
+mpt_resume(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ u32 device_state = pdev->current_state;
+ int recovery_state;
+ int err;
+
+ printk(MYIOC_s_INFO_FMT "pci-resume: pdev=0x%p, slot=%s, Previous "
+ "operating state [D%d]\n", ioc->name, pdev, pci_name(pdev),
+ device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ ioc->pcidev = pdev;
+ err = mpt_mapresources(ioc);
+ if (err)
+ return err;
+
+ if (ioc->dma_mask == DMA_BIT_MASK(64)) {
+ if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
+ ioc->add_sge = &mpt_add_sge_64bit_1078;
+ else
+ ioc->add_sge = &mpt_add_sge_64bit;
+ ioc->add_chain = &mpt_add_chain_64bit;
+ ioc->sg_addr_size = 8;
+ } else {
+
+ ioc->add_sge = &mpt_add_sge;
+ ioc->add_chain = &mpt_add_chain;
+ ioc->sg_addr_size = 4;
+ }
+ ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
+
+ printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
+ ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
+ CHIPREG_READ32(&ioc->chip->Doorbell));
+
+ /*
+ * Errata workaround for SAS pci express:
+ * Upon returning to the D0 state, the contents of the doorbell will be
+ * stale data, and this will incorrectly signal to the host driver that
+ * the firmware is ready to process mpt commands. The workaround is
+ * to issue a diagnostic reset.
+ */
+ if (ioc->bus_type == SAS && (pdev->device ==
+ MPI_MANUFACTPAGE_DEVID_SAS1068E || pdev->device ==
+ MPI_MANUFACTPAGE_DEVID_SAS1064E)) {
+ if (KickStart(ioc, 1, CAN_SLEEP) < 0) {
+ printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover\n",
+ ioc->name);
+ goto out;
+ }
+ }
+
+ /* bring ioc to operational state */
+ printk(MYIOC_s_INFO_FMT "Sending mpt_do_ioc_recovery\n", ioc->name);
+ recovery_state = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
+ CAN_SLEEP);
+ if (recovery_state != 0)
+ printk(MYIOC_s_WARN_FMT "pci-resume: Cannot recover, "
+ "error:[%x]\n", ioc->name, recovery_state);
+ else
+ printk(MYIOC_s_INFO_FMT
+ "pci-resume: success\n", ioc->name);
+ out:
+ return 0;
+
+}
+#endif
+
+static int
+mpt_signal_reset(u8 index, MPT_ADAPTER *ioc, int reset_phase)
+{
+ if ((MptDriverClass[index] == MPTSPI_DRIVER &&
+ ioc->bus_type != SPI) ||
+ (MptDriverClass[index] == MPTFC_DRIVER &&
+ ioc->bus_type != FC) ||
+ (MptDriverClass[index] == MPTSAS_DRIVER &&
+ ioc->bus_type != SAS))
+ /* make sure we only call the relevant reset handler
+ * for the bus */
+ return 0;
+ return (MptResetHandlers[index])(ioc, reset_phase);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_do_ioc_recovery - Initialize or recover MPT adapter.
+ * @ioc: Pointer to MPT adapter structure
+ * @reason: Event word / reason
+ * @sleepFlag: Use schedule if CAN_SLEEP else use udelay.
+ *
+ * This routine performs all the steps necessary to bring the IOC
+ * to a OPERATIONAL state.
+ *
+ * This routine also pre-fetches the LAN MAC address of a Fibre Channel
+ * MPT adapter.
+ *
+ * Returns:
+ * 0 for success
+ * -1 if failed to get board READY
+ * -2 if READY but IOCFacts Failed
+ * -3 if READY but PrimeIOCFifos Failed
+ * -4 if READY but IOCInit Failed
+ * -5 if failed to enable_device and/or request_selected_regions
+ * -6 if failed to upload firmware
+ */
+static int
+mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
+{
+ int hard_reset_done = 0;
+ int alt_ioc_ready = 0;
+ int hard;
+ int rc=0;
+ int ii;
+ int ret = 0;
+ int reset_alt_ioc_active = 0;
+ int irq_allocated = 0;
+ u8 *a;
+
+ printk(MYIOC_s_INFO_FMT "Initiating %s\n", ioc->name,
+ reason == MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
+
+ /* Disable reply interrupts (also blocks FreeQ) */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+
+ if (ioc->alt_ioc) {
+ if (ioc->alt_ioc->active ||
+ reason == MPT_HOSTEVENT_IOC_RECOVER) {
+ reset_alt_ioc_active = 1;
+ /* Disable alt-IOC's reply interrupts
+ * (and FreeQ) for a bit
+ **/
+ CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
+ 0xFFFFFFFF);
+ ioc->alt_ioc->active = 0;
+ }
+ }
+
+ hard = 1;
+ if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
+ hard = 0;
+
+ if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
+ if (hard_reset_done == -4) {
+ printk(MYIOC_s_WARN_FMT "Owned by PEER..skipping!\n",
+ ioc->name);
+
+ if (reset_alt_ioc_active && ioc->alt_ioc) {
+ /* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
+ dprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name));
+ CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
+ ioc->alt_ioc->active = 1;
+ }
+
+ } else {
+ printk(MYIOC_s_WARN_FMT
+ "NOT READY WARNING!\n", ioc->name);
+ }
+ ret = -1;
+ goto out;
+ }
+
+ /* hard_reset_done = 0 if a soft reset was performed
+ * and 1 if a hard reset was performed.
+ */
+ if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
+ if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
+ alt_ioc_ready = 1;
+ else
+ printk(MYIOC_s_WARN_FMT
+ ": alt-ioc Not ready WARNING!\n",
+ ioc->alt_ioc->name);
+ }
+
+ for (ii=0; ii<5; ii++) {
+ /* Get IOC facts! Allow 5 retries */
+ if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
+ break;
+ }
+
+
+ if (ii == 5) {
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Retry IocFacts failed rc=%x\n", ioc->name, rc));
+ ret = -2;
+ } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+ MptDisplayIocCapabilities(ioc);
+ }
+
+ if (alt_ioc_ready) {
+ if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Initial Alt IocFacts failed rc=%x\n",
+ ioc->name, rc));
+ /* Retry - alt IOC was initialized once
+ */
+ rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
+ }
+ if (rc) {
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
+ alt_ioc_ready = 0;
+ reset_alt_ioc_active = 0;
+ } else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+ MptDisplayIocCapabilities(ioc->alt_ioc);
+ }
+ }
+
+ if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP) &&
+ (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)) {
+ pci_release_selected_regions(ioc->pcidev, ioc->bars);
+ ioc->bars = pci_select_bars(ioc->pcidev, IORESOURCE_MEM |
+ IORESOURCE_IO);
+ if (pci_enable_device(ioc->pcidev))
+ return -5;
+ if (pci_request_selected_regions(ioc->pcidev, ioc->bars,
+ "mpt"))
+ return -5;
+ }
+
+ /*
+ * Device is reset now. It must have de-asserted the interrupt line
+ * (if it was asserted) and it should be safe to register for the
+ * interrupt now.
+ */
+ if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
+ ioc->pci_irq = -1;
+ if (ioc->pcidev->irq) {
+ if (ioc->msi_enable && !pci_enable_msi(ioc->pcidev))
+ printk(MYIOC_s_INFO_FMT "PCI-MSI enabled\n",
+ ioc->name);
+ else
+ ioc->msi_enable = 0;
+ rc = request_irq(ioc->pcidev->irq, mpt_interrupt,
+ IRQF_SHARED, ioc->name, ioc);
+ if (rc < 0) {
+ printk(MYIOC_s_ERR_FMT "Unable to allocate "
+ "interrupt %d!\n",
+ ioc->name, ioc->pcidev->irq);
+ if (ioc->msi_enable)
+ pci_disable_msi(ioc->pcidev);
+ ret = -EBUSY;
+ goto out;
+ }
+ irq_allocated = 1;
+ ioc->pci_irq = ioc->pcidev->irq;
+ pci_set_master(ioc->pcidev); /* ?? */
+ pci_set_drvdata(ioc->pcidev, ioc);
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "installed at interrupt %d\n", ioc->name,
+ ioc->pcidev->irq));
+ }
+ }
+
+ /* Prime reply & request queues!
+ * (mucho alloc's) Must be done prior to
+ * init as upper addresses are needed for init.
+ * If fails, continue with alt-ioc processing
+ */
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
+ ioc->name));
+ if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
+ ret = -3;
+
+ /* May need to check/upload firmware & data here!
+ * If fails, continue with alt-ioc processing
+ */
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
+ ioc->name));
+ if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
+ ret = -4;
+// NEW!
+ if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
+ printk(MYIOC_s_WARN_FMT
+ ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
+ ioc->alt_ioc->name, rc);
+ alt_ioc_ready = 0;
+ reset_alt_ioc_active = 0;
+ }
+
+ if (alt_ioc_ready) {
+ if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
+ alt_ioc_ready = 0;
+ reset_alt_ioc_active = 0;
+ printk(MYIOC_s_WARN_FMT
+ ": alt-ioc: (%d) init failure WARNING!\n",
+ ioc->alt_ioc->name, rc);
+ }
+ }
+
+ if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
+ if (ioc->upload_fw) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "firmware upload required!\n", ioc->name));
+
+ /* Controller is not operational, cannot do upload
+ */
+ if (ret == 0) {
+ rc = mpt_do_upload(ioc, sleepFlag);
+ if (rc == 0) {
+ if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
+ /*
+ * Maintain only one pointer to FW memory
+ * so there will not be two attempt to
+ * downloadboot onboard dual function
+ * chips (mpt_adapter_disable,
+ * mpt_diag_reset)
+ */
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "mpt_upload: alt_%s has cached_fw=%p \n",
+ ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
+ ioc->cached_fw = NULL;
+ }
+ } else {
+ printk(MYIOC_s_WARN_FMT
+ "firmware upload failure!\n", ioc->name);
+ ret = -6;
+ }
+ }
+ }
+ }
+
+ /* Enable MPT base driver management of EventNotification
+ * and EventAck handling.
+ */
+ if ((ret == 0) && (!ioc->facts.EventState)) {
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "SendEventNotification\n",
+ ioc->name));
+ ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
+ }
+
+ if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
+ rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
+
+ if (ret == 0) {
+ /* Enable! (reply interrupt) */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
+ ioc->active = 1;
+ }
+ if (rc == 0) { /* alt ioc */
+ if (reset_alt_ioc_active && ioc->alt_ioc) {
+ /* (re)Enable alt-IOC! (reply interrupt) */
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
+ "reply irq re-enabled\n",
+ ioc->alt_ioc->name));
+ CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
+ MPI_HIM_DIM);
+ ioc->alt_ioc->active = 1;
+ }
+ }
+
+
+ /* Add additional "reason" check before call to GetLanConfigPages
+ * (combined with GetIoUnitPage2 call). This prevents a somewhat
+ * recursive scenario; GetLanConfigPages times out, timer expired
+ * routine calls HardResetHandler, which calls into here again,
+ * and we try GetLanConfigPages again...
+ */
+ if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
+
+ /*
+ * Initialize link list for inactive raid volumes.
+ */
+ mutex_init(&ioc->raid_data.inactive_list_mutex);
+ INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
+
+ switch (ioc->bus_type) {
+
+ case SAS:
+ /* clear persistency table */
+ if(ioc->facts.IOCExceptions &
+ MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
+ ret = mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ if(ret != 0)
+ goto out;
+ }
+
+ /* Find IM volumes
+ */
+ mpt_findImVolumes(ioc);
+
+ /* Check, and possibly reset, the coalescing value
+ */
+ mpt_read_ioc_pg_1(ioc);
+
+ break;
+
+ case FC:
+ if ((ioc->pfacts[0].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_LAN) &&
+ (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
+ /*
+ * Pre-fetch the ports LAN MAC address!
+ * (LANPage1_t stuff)
+ */
+ (void) GetLanConfigPages(ioc);
+ a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "LanAddr = %02X:%02X:%02X"
+ ":%02X:%02X:%02X\n",
+ ioc->name, a[5], a[4],
+ a[3], a[2], a[1], a[0]));
+ }
+ break;
+
+ case SPI:
+ /* Get NVRAM and adapter maximums from SPP 0 and 2
+ */
+ mpt_GetScsiPortSettings(ioc, 0);
+
+ /* Get version and length of SDP 1
+ */
+ mpt_readScsiDevicePageHeaders(ioc, 0);
+
+ /* Find IM volumes
+ */
+ if (ioc->facts.MsgVersion >= MPI_VERSION_01_02)
+ mpt_findImVolumes(ioc);
+
+ /* Check, and possibly reset, the coalescing value
+ */
+ mpt_read_ioc_pg_1(ioc);
+
+ mpt_read_ioc_pg_4(ioc);
+
+ break;
+ }
+
+ GetIoUnitPage2(ioc);
+ mpt_get_manufacturing_pg_0(ioc);
+ }
+
+ out:
+ if ((ret != 0) && irq_allocated) {
+ free_irq(ioc->pci_irq, ioc);
+ if (ioc->msi_enable)
+ pci_disable_msi(ioc->pcidev);
+ }
+ return ret;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_detect_bound_ports - Search for matching PCI bus/dev_function
+ * @ioc: Pointer to MPT adapter structure
+ * @pdev: Pointer to (struct pci_dev) structure
+ *
+ * Search for PCI bus/dev_function which matches
+ * PCI bus/dev_function (+/-1) for newly discovered 929,
+ * 929X, 1030 or 1035.
+ *
+ * If match on PCI dev_function +/-1 is found, bind the two MPT adapters
+ * using alt_ioc pointer fields in their %MPT_ADAPTER structures.
+ */
+static void
+mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
+{
+ struct pci_dev *peer=NULL;
+ unsigned int slot = PCI_SLOT(pdev->devfn);
+ unsigned int func = PCI_FUNC(pdev->devfn);
+ MPT_ADAPTER *ioc_srch;
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PCI device %s devfn=%x/%x,"
+ " searching for devfn match on %x or %x\n",
+ ioc->name, pci_name(pdev), pdev->bus->number,
+ pdev->devfn, func-1, func+1));
+
+ peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
+ if (!peer) {
+ peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1));
+ if (!peer)
+ return;
+ }
+
+ list_for_each_entry(ioc_srch, &ioc_list, list) {
+ struct pci_dev *_pcidev = ioc_srch->pcidev;
+ if (_pcidev == peer) {
+ /* Paranoia checks */
+ if (ioc->alt_ioc != NULL) {
+ printk(MYIOC_s_WARN_FMT
+ "Oops, already bound (%s <==> %s)!\n",
+ ioc->name, ioc->name, ioc->alt_ioc->name);
+ break;
+ } else if (ioc_srch->alt_ioc != NULL) {
+ printk(MYIOC_s_WARN_FMT
+ "Oops, already bound (%s <==> %s)!\n",
+ ioc_srch->name, ioc_srch->name,
+ ioc_srch->alt_ioc->name);
+ break;
+ }
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "FOUND! binding %s <==> %s\n",
+ ioc->name, ioc->name, ioc_srch->name));
+ ioc_srch->alt_ioc = ioc;
+ ioc->alt_ioc = ioc_srch;
+ }
+ }
+ pci_dev_put(peer);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_adapter_disable - Disable misbehaving MPT adapter.
+ * @ioc: Pointer to MPT adapter structure
+ */
+static void
+mpt_adapter_disable(MPT_ADAPTER *ioc)
+{
+ int sz;
+ int ret;
+
+ if (ioc->cached_fw != NULL) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Pushing FW onto adapter\n", __func__, ioc->name));
+ if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
+ ioc->cached_fw, CAN_SLEEP)) < 0) {
+ printk(MYIOC_s_WARN_FMT
+ ": firmware downloadboot failure (%d)!\n",
+ ioc->name, ret);
+ }
+ }
+
+ /*
+ * Put the controller into ready state (if its not already)
+ */
+ if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
+ if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
+ CAN_SLEEP)) {
+ if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
+ printk(MYIOC_s_ERR_FMT "%s: IOC msg unit "
+ "reset failed to put ioc in ready state!\n",
+ ioc->name, __func__);
+ } else
+ printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset "
+ "failed!\n", ioc->name, __func__);
+ }
+
+
+ /* Disable adapter interrupts! */
+ synchronize_irq(ioc->pcidev->irq);
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+
+ /* Clear any lingering interrupt */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ CHIPREG_READ32(&ioc->chip->IntStatus);
+
+ if (ioc->alloc != NULL) {
+ sz = ioc->alloc_sz;
+ dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
+ ioc->name, ioc->alloc, ioc->alloc_sz));
+ pci_free_consistent(ioc->pcidev, sz,
+ ioc->alloc, ioc->alloc_dma);
+ ioc->reply_frames = NULL;
+ ioc->req_frames = NULL;
+ ioc->alloc = NULL;
+ ioc->alloc_total -= sz;
+ }
+
+ if (ioc->sense_buf_pool != NULL) {
+ sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
+ pci_free_consistent(ioc->pcidev, sz,
+ ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ ioc->sense_buf_pool = NULL;
+ ioc->alloc_total -= sz;
+ }
+
+ if (ioc->events != NULL){
+ sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
+ kfree(ioc->events);
+ ioc->events = NULL;
+ ioc->alloc_total -= sz;
+ }
+
+ mpt_free_fw_memory(ioc);
+
+ kfree(ioc->spi_data.nvram);
+ mpt_inactive_raid_list_free(ioc);
+ kfree(ioc->raid_data.pIocPg2);
+ kfree(ioc->raid_data.pIocPg3);
+ ioc->spi_data.nvram = NULL;
+ ioc->raid_data.pIocPg3 = NULL;
+
+ if (ioc->spi_data.pIocPg4 != NULL) {
+ sz = ioc->spi_data.IocPg4Sz;
+ pci_free_consistent(ioc->pcidev, sz,
+ ioc->spi_data.pIocPg4,
+ ioc->spi_data.IocPg4_dma);
+ ioc->spi_data.pIocPg4 = NULL;
+ ioc->alloc_total -= sz;
+ }
+
+ if (ioc->ReqToChain != NULL) {
+ kfree(ioc->ReqToChain);
+ kfree(ioc->RequestNB);
+ ioc->ReqToChain = NULL;
+ }
+
+ kfree(ioc->ChainToChain);
+ ioc->ChainToChain = NULL;
+
+ if (ioc->HostPageBuffer != NULL) {
+ if((ret = mpt_host_page_access_control(ioc,
+ MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
+ printk(MYIOC_s_ERR_FMT
+ ": %s: host page buffers free failed (%d)!\n",
+ ioc->name, __func__, ret);
+ }
+ dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "HostPageBuffer free @ %p, sz=%d bytes\n",
+ ioc->name, ioc->HostPageBuffer,
+ ioc->HostPageBuffer_sz));
+ pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+ ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
+ ioc->HostPageBuffer = NULL;
+ ioc->HostPageBuffer_sz = 0;
+ ioc->alloc_total -= ioc->HostPageBuffer_sz;
+ }
+
+ pci_set_drvdata(ioc->pcidev, NULL);
+}
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_adapter_dispose - Free all resources associated with an MPT adapter
+ * @ioc: Pointer to MPT adapter structure
+ *
+ * This routine unregisters h/w resources and frees all alloc'd memory
+ * associated with a MPT adapter structure.
+ */
+static void
+mpt_adapter_dispose(MPT_ADAPTER *ioc)
+{
+ int sz_first, sz_last;
+
+ if (ioc == NULL)
+ return;
+
+ sz_first = ioc->alloc_total;
+
+ mpt_adapter_disable(ioc);
+
+ if (ioc->pci_irq != -1) {
+ free_irq(ioc->pci_irq, ioc);
+ if (ioc->msi_enable)
+ pci_disable_msi(ioc->pcidev);
+ ioc->pci_irq = -1;
+ }
+
+ if (ioc->memmap != NULL) {
+ iounmap(ioc->memmap);
+ ioc->memmap = NULL;
+ }
+
+ pci_disable_device(ioc->pcidev);
+ pci_release_selected_regions(ioc->pcidev, ioc->bars);
+
+#if defined(CONFIG_MTRR) && 0
+ if (ioc->mtrr_reg > 0) {
+ mtrr_del(ioc->mtrr_reg, 0, 0);
+ dprintk(ioc, printk(MYIOC_s_INFO_FMT "MTRR region de-registered\n", ioc->name));
+ }
+#endif
+
+ /* Zap the adapter lookup ptr! */
+ list_del(&ioc->list);
+
+ sz_last = ioc->alloc_total;
+ dprintk(ioc, printk(MYIOC_s_INFO_FMT "free'd %d of %d bytes\n",
+ ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
+
+ if (ioc->alt_ioc)
+ ioc->alt_ioc->alt_ioc = NULL;
+
+ kfree(ioc);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * MptDisplayIocCapabilities - Disply IOC's capabilities.
+ * @ioc: Pointer to MPT adapter structure
+ */
+static void
+MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
+{
+ int i = 0;
+
+ printk(KERN_INFO "%s: ", ioc->name);
+ if (ioc->prod_name)
+ printk("%s: ", ioc->prod_name);
+ printk("Capabilities={");
+
+ if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
+ printk("Initiator");
+ i++;
+ }
+
+ if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
+ printk("%sTarget", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
+ printk("%sLAN", i ? "," : "");
+ i++;
+ }
+
+#if 0
+ /*
+ * This would probably evoke more questions than it's worth
+ */
+ if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
+ printk("%sLogBusAddr", i ? "," : "");
+ i++;
+ }
+#endif
+
+ printk("}\n");
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * MakeIocReady - Get IOC to a READY state, using KickStart if needed.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @force: Force hard KickStart of IOC
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Returns:
+ * 1 - DIAG reset and READY
+ * 0 - READY initially OR soft reset and READY
+ * -1 - Any failure on KickStart
+ * -2 - Msg Unit Reset Failed
+ * -3 - IO Unit Reset Failed
+ * -4 - IOC owned by a PEER
+ */
+static int
+MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
+{
+ u32 ioc_state;
+ int statefault = 0;
+ int cntdn;
+ int hard_reset_done = 0;
+ int r;
+ int ii;
+ int whoinit;
+
+ /* Get current [raw] IOC state */
+ ioc_state = mpt_GetIocState(ioc, 0);
+ dhsprintk(ioc, printk(MYIOC_s_INFO_FMT "MakeIocReady [raw] state=%08x\n", ioc->name, ioc_state));
+
+ /*
+ * Check to see if IOC got left/stuck in doorbell handshake
+ * grip of death. If so, hard reset the IOC.
+ */
+ if (ioc_state & MPI_DOORBELL_ACTIVE) {
+ statefault = 1;
+ printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
+ ioc->name);
+ }
+
+ /* Is it already READY? */
+ if (!statefault &&
+ ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "IOC is in READY state\n", ioc->name));
+ return 0;
+ }
+
+ /*
+ * Check to see if IOC is in FAULT state.
+ */
+ if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
+ statefault = 2;
+ printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
+ ioc->name);
+ printk(MYIOC_s_WARN_FMT " FAULT code = %04xh\n",
+ ioc->name, ioc_state & MPI_DOORBELL_DATA_MASK);
+ }
+
+ /*
+ * Hmmm... Did it get left operational?
+ */
+ if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC operational unexpected\n",
+ ioc->name));
+
+ /* Check WhoInit.
+ * If PCI Peer, exit.
+ * Else, if no fault conditions are present, issue a MessageUnitReset
+ * Else, fall through to KickStart case
+ */
+ whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+ "whoinit 0x%x statefault %d force %d\n",
+ ioc->name, whoinit, statefault, force));
+ if (whoinit == MPI_WHOINIT_PCI_PEER)
+ return -4;
+ else {
+ if ((statefault == 0 ) && (force == 0)) {
+ if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
+ return 0;
+ }
+ statefault = 3;
+ }
+ }
+
+ hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
+ if (hard_reset_done < 0)
+ return -1;
+
+ /*
+ * Loop here waiting for IOC to come READY.
+ */
+ ii = 0;
+ cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 5; /* 5 seconds */
+
+ while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
+ if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
+ /*
+ * BIOS or previous driver load left IOC in OP state.
+ * Reset messaging FIFOs.
+ */
+ if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
+ printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
+ return -2;
+ }
+ } else if (ioc_state == MPI_IOC_STATE_RESET) {
+ /*
+ * Something is wrong. Try to get IOC back
+ * to a known state.
+ */
+ if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
+ printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
+ return -3;
+ }
+ }
+
+ ii++; cntdn--;
+ if (!cntdn) {
+ printk(MYIOC_s_ERR_FMT
+ "Wait IOC_READY state (0x%x) timeout(%d)!\n",
+ ioc->name, ioc_state, (int)((ii+5)/HZ));
+ return -ETIME;
+ }
+
+ if (sleepFlag == CAN_SLEEP) {
+ msleep(1);
+ } else {
+ mdelay (1); /* 1 msec delay */
+ }
+
+ }
+
+ if (statefault < 3) {
+ printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
+ statefault == 1 ? "stuck handshake" : "IOC FAULT");
+ }
+
+ return hard_reset_done;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_GetIocState - Get the current state of a MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @cooked: Request raw or cooked IOC state
+ *
+ * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Doorbell bits in MPI_IOC_STATE_MASK.
+ */
+u32
+mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
+{
+ u32 s, sc;
+
+ /* Get! */
+ s = CHIPREG_READ32(&ioc->chip->Doorbell);
+ sc = s & MPI_IOC_STATE_MASK;
+
+ /* Save! */
+ ioc->last_state = sc;
+
+ return cooked ? sc : s;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * GetIocFacts - Send IOCFacts request to MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Specifies whether the process can sleep
+ * @reason: If recovery, only update facts.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
+{
+ IOCFacts_t get_facts;
+ IOCFactsReply_t *facts;
+ int r;
+ int req_sz;
+ int reply_sz;
+ int sz;
+ u32 status, vv;
+ u8 shiftFactor=1;
+
+ /* IOC *must* NOT be in RESET state! */
+ if (ioc->last_state == MPI_IOC_STATE_RESET) {
+ printk(KERN_ERR MYNAM
+ ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
+ ioc->name, ioc->last_state);
+ return -44;
+ }
+
+ facts = &ioc->facts;
+
+ /* Destination (reply area)... */
+ reply_sz = sizeof(*facts);
+ memset(facts, 0, reply_sz);
+
+ /* Request area (get_facts on the stack right now!) */
+ req_sz = sizeof(get_facts);
+ memset(&get_facts, 0, req_sz);
+
+ get_facts.Function = MPI_FUNCTION_IOC_FACTS;
+ /* Assert: All other get_facts fields are zero! */
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Sending get IocFacts request req_sz=%d reply_sz=%d\n",
+ ioc->name, req_sz, reply_sz));
+
+ /* No non-zero fields in the get_facts request are greater than
+ * 1 byte in size, so we can just fire it off as is.
+ */
+ r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
+ reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
+ if (r != 0)
+ return r;
+
+ /*
+ * Now byte swap (GRRR) the necessary fields before any further
+ * inspection of reply contents.
+ *
+ * But need to do some sanity checks on MsgLength (byte) field
+ * to make sure we don't zero IOC's req_sz!
+ */
+ /* Did we get a valid reply? */
+ if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
+ if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+ /*
+ * If not been here, done that, save off first WhoInit value
+ */
+ if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
+ ioc->FirstWhoInit = facts->WhoInit;
+ }
+
+ facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
+ facts->MsgContext = le32_to_cpu(facts->MsgContext);
+ facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
+ facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
+ facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
+ status = le16_to_cpu(facts->IOCStatus) & MPI_IOCSTATUS_MASK;
+ /* CHECKME! IOCStatus, IOCLogInfo */
+
+ facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
+ facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize);
+
+ /*
+ * FC f/w version changed between 1.1 and 1.2
+ * Old: u16{Major(4),Minor(4),SubMinor(8)}
+ * New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
+ */
+ if (facts->MsgVersion < MPI_VERSION_01_02) {
+ /*
+ * Handle old FC f/w style, convert to new...
+ */
+ u16 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
+ facts->FWVersion.Word =
+ ((oldv<<12) & 0xFF000000) |
+ ((oldv<<8) & 0x000FFF00);
+ } else
+ facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
+
+ facts->ProductID = le16_to_cpu(facts->ProductID);
+
+ if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
+ > MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
+ ioc->ir_firmware = 1;
+
+ facts->CurrentHostMfaHighAddr =
+ le32_to_cpu(facts->CurrentHostMfaHighAddr);
+ facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
+ facts->CurrentSenseBufferHighAddr =
+ le32_to_cpu(facts->CurrentSenseBufferHighAddr);
+ facts->CurReplyFrameSize =
+ le16_to_cpu(facts->CurReplyFrameSize);
+ facts->IOCCapabilities = le32_to_cpu(facts->IOCCapabilities);
+
+ /*
+ * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
+ * Older MPI-1.00.xx struct had 13 dwords, and enlarged
+ * to 14 in MPI-1.01.0x.
+ */
+ if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
+ facts->MsgVersion > MPI_VERSION_01_00) {
+ facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
+ }
+
+ sz = facts->FWImageSize;
+ if ( sz & 0x01 )
+ sz += 1;
+ if ( sz & 0x02 )
+ sz += 2;
+ facts->FWImageSize = sz;
+
+ if (!facts->RequestFrameSize) {
+ /* Something is wrong! */
+ printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
+ ioc->name);
+ return -55;
+ }
+
+ r = sz = facts->BlockSize;
+ vv = ((63 / (sz * 4)) + 1) & 0x03;
+ ioc->NB_for_64_byte_frame = vv;
+ while ( sz )
+ {
+ shiftFactor++;
+ sz = sz >> 1;
+ }
+ ioc->NBShiftFactor = shiftFactor;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
+ ioc->name, vv, shiftFactor, r));
+
+ if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+ /*
+ * Set values for this IOC's request & reply frame sizes,
+ * and request & reply queue depths...
+ */
+ ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4);
+ ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits);
+ ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
+ ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth);
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "reply_sz=%3d, reply_depth=%4d\n",
+ ioc->name, ioc->reply_sz, ioc->reply_depth));
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "req_sz =%3d, req_depth =%4d\n",
+ ioc->name, ioc->req_sz, ioc->req_depth));
+
+ /* Get port facts! */
+ if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 )
+ return r;
+ }
+ } else {
+ printk(MYIOC_s_ERR_FMT
+ "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
+ ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
+ RequestFrameSize)/sizeof(u32)));
+ return -66;
+ }
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * GetPortFacts - Send PortFacts request to MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @portnum: Port number
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
+{
+ PortFacts_t get_pfacts;
+ PortFactsReply_t *pfacts;
+ int ii;
+ int req_sz;
+ int reply_sz;
+ int max_id;
+
+ /* IOC *must* NOT be in RESET state! */
+ if (ioc->last_state == MPI_IOC_STATE_RESET) {
+ printk(MYIOC_s_ERR_FMT "Can't get PortFacts NOT READY! (%08x)\n",
+ ioc->name, ioc->last_state );
+ return -4;
+ }
+
+ pfacts = &ioc->pfacts[portnum];
+
+ /* Destination (reply area)... */
+ reply_sz = sizeof(*pfacts);
+ memset(pfacts, 0, reply_sz);
+
+ /* Request area (get_pfacts on the stack right now!) */
+ req_sz = sizeof(get_pfacts);
+ memset(&get_pfacts, 0, req_sz);
+
+ get_pfacts.Function = MPI_FUNCTION_PORT_FACTS;
+ get_pfacts.PortNumber = portnum;
+ /* Assert: All other get_pfacts fields are zero! */
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending get PortFacts(%d) request\n",
+ ioc->name, portnum));
+
+ /* No non-zero fields in the get_pfacts request are greater than
+ * 1 byte in size, so we can just fire it off as is.
+ */
+ ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts,
+ reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag);
+ if (ii != 0)
+ return ii;
+
+ /* Did we get a valid reply? */
+
+ /* Now byte swap the necessary fields in the response. */
+ pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext);
+ pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus);
+ pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo);
+ pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices);
+ pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID);
+ pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags);
+ pfacts->MaxPostedCmdBuffers = le16_to_cpu(pfacts->MaxPostedCmdBuffers);
+ pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs);
+ pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets);
+
+ max_id = (ioc->bus_type == SAS) ? pfacts->PortSCSIID :
+ pfacts->MaxDevices;
+ ioc->devices_per_bus = (max_id > 255) ? 256 : max_id;
+ ioc->number_of_buses = (ioc->devices_per_bus < 256) ? 1 : max_id/256;
+
+ /*
+ * Place all the devices on channels
+ *
+ * (for debuging)
+ */
+ if (mpt_channel_mapping) {
+ ioc->devices_per_bus = 1;
+ ioc->number_of_buses = (max_id > 255) ? 255 : max_id;
+ }
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * SendIocInit - Send IOCInit request to MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ IOCInit_t ioc_init;
+ MPIDefaultReply_t init_reply;
+ u32 state;
+ int r;
+ int count;
+ int cntdn;
+
+ memset(&ioc_init, 0, sizeof(ioc_init));
+ memset(&init_reply, 0, sizeof(init_reply));
+
+ ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER;
+ ioc_init.Function = MPI_FUNCTION_IOC_INIT;
+
+ /* If we are in a recovery mode and we uploaded the FW image,
+ * then this pointer is not NULL. Skip the upload a second time.
+ * Set this flag if cached_fw set for either IOC.
+ */
+ if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
+ ioc->upload_fw = 1;
+ else
+ ioc->upload_fw = 0;
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "upload_fw %d facts.Flags=%x\n",
+ ioc->name, ioc->upload_fw, ioc->facts.Flags));
+
+ ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
+ ioc_init.MaxBuses = (U8)ioc->number_of_buses;
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
+ ioc->name, ioc->facts.MsgVersion));
+ if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
+ // set MsgVersion and HeaderVersion host driver was built with
+ ioc_init.MsgVersion = cpu_to_le16(MPI_VERSION);
+ ioc_init.HeaderVersion = cpu_to_le16(MPI_HEADER_VERSION);
+
+ if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_HOST_PAGE_BUFFER_PERSISTENT) {
+ ioc_init.HostPageBufferSGE = ioc->facts.HostPageBufferSGE;
+ } else if(mpt_host_page_alloc(ioc, &ioc_init))
+ return -99;
+ }
+ ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */
+
+ if (ioc->sg_addr_size == sizeof(u64)) {
+ /* Save the upper 32-bits of the request
+ * (reply) and sense buffers.
+ */
+ ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32));
+ ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
+ } else {
+ /* Force 32-bit addressing */
+ ioc_init.HostMfaHighAddr = cpu_to_le32(0);
+ ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
+ }
+
+ ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
+ ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
+ ioc->facts.MaxDevices = ioc_init.MaxDevices;
+ ioc->facts.MaxBuses = ioc_init.MaxBuses;
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOCInit (req @ %p)\n",
+ ioc->name, &ioc_init));
+
+ r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
+ sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
+ if (r != 0) {
+ printk(MYIOC_s_ERR_FMT "Sending IOCInit failed(%d)!\n",ioc->name, r);
+ return r;
+ }
+
+ /* No need to byte swap the multibyte fields in the reply
+ * since we don't even look at its contents.
+ */
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending PortEnable (req @ %p)\n",
+ ioc->name, &ioc_init));
+
+ if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0) {
+ printk(MYIOC_s_ERR_FMT "Sending PortEnable failed(%d)!\n",ioc->name, r);
+ return r;
+ }
+
+ /* YIKES! SUPER IMPORTANT!!!
+ * Poll IocState until _OPERATIONAL while IOC is doing
+ * LoopInit and TargetDiscovery!
+ */
+ count = 0;
+ cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60; /* 60 seconds */
+ state = mpt_GetIocState(ioc, 1);
+ while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
+ if (sleepFlag == CAN_SLEEP) {
+ msleep(1);
+ } else {
+ mdelay(1);
+ }
+
+ if (!cntdn) {
+ printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n",
+ ioc->name, (int)((count+5)/HZ));
+ return -9;
+ }
+
+ state = mpt_GetIocState(ioc, 1);
+ count++;
+ }
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wait IOC_OPERATIONAL state (cnt=%d)\n",
+ ioc->name, count));
+
+ ioc->aen_event_read_flag=0;
+ return r;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * SendPortEnable - Send PortEnable request to MPT adapter port.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @portnum: Port number to enable
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Send PortEnable to bring IOC to OPERATIONAL state.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
+{
+ PortEnable_t port_enable;
+ MPIDefaultReply_t reply_buf;
+ int rc;
+ int req_sz;
+ int reply_sz;
+
+ /* Destination... */
+ reply_sz = sizeof(MPIDefaultReply_t);
+ memset(&reply_buf, 0, reply_sz);
+
+ req_sz = sizeof(PortEnable_t);
+ memset(&port_enable, 0, req_sz);
+
+ port_enable.Function = MPI_FUNCTION_PORT_ENABLE;
+ port_enable.PortNumber = portnum;
+/* port_enable.ChainOffset = 0; */
+/* port_enable.MsgFlags = 0; */
+/* port_enable.MsgContext = 0; */
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Port(%d)Enable (req @ %p)\n",
+ ioc->name, portnum, &port_enable));
+
+ /* RAID FW may take a long time to enable
+ */
+ if (ioc->ir_firmware || ioc->bus_type == SAS) {
+ rc = mpt_handshake_req_reply_wait(ioc, req_sz,
+ (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
+ 300 /*seconds*/, sleepFlag);
+ } else {
+ rc = mpt_handshake_req_reply_wait(ioc, req_sz,
+ (u32*)&port_enable, reply_sz, (u16*)&reply_buf,
+ 30 /*seconds*/, sleepFlag);
+ }
+ return rc;
+}
+
+/**
+ * mpt_alloc_fw_memory - allocate firmware memory
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @size: total FW bytes
+ *
+ * If memory has already been allocated, the same (cached) value
+ * is returned.
+ *
+ * Return 0 if successful, or non-zero for failure
+ **/
+int
+mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
+{
+ int rc;
+
+ if (ioc->cached_fw) {
+ rc = 0; /* use already allocated memory */
+ goto out;
+ }
+ else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
+ ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
+ ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
+ rc = 0;
+ goto out;
+ }
+ ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
+ if (!ioc->cached_fw) {
+ printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
+ ioc->name);
+ rc = -1;
+ } else {
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Image @ %p[%p], sz=%d[%x] bytes\n",
+ ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, size, size));
+ ioc->alloc_total += size;
+ rc = 0;
+ }
+ out:
+ return rc;
+}
+
+/**
+ * mpt_free_fw_memory - free firmware memory
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * If alt_img is NULL, delete from ioc structure.
+ * Else, delete a secondary image in same format.
+ **/
+void
+mpt_free_fw_memory(MPT_ADAPTER *ioc)
+{
+ int sz;
+
+ if (!ioc->cached_fw)
+ return;
+
+ sz = ioc->facts.FWImageSize;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
+ ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
+ pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
+ ioc->alloc_total -= sz;
+ ioc->cached_fw = NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Returns 0 for success, >0 for handshake failure
+ * <0 for fw upload failure.
+ *
+ * Remark: If bound IOC and a successful FWUpload was performed
+ * on the bound IOC, the second image is discarded
+ * and memory is free'd. Both channels must upload to prevent
+ * IOC from running in degraded mode.
+ */
+static int
+mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ u8 reply[sizeof(FWUploadReply_t)];
+ FWUpload_t *prequest;
+ FWUploadReply_t *preply;
+ FWUploadTCSGE_t *ptcsge;
+ u32 flagsLength;
+ int ii, sz, reply_sz;
+ int cmdStatus;
+ int request_size;
+ /* If the image size is 0, we are done.
+ */
+ if ((sz = ioc->facts.FWImageSize) == 0)
+ return 0;
+
+ if (mpt_alloc_fw_memory(ioc, ioc->facts.FWImageSize) != 0)
+ return -ENOMEM;
+
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Image @ %p[%p], sz=%d[%x] bytes\n",
+ ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
+
+ prequest = (sleepFlag == NO_SLEEP) ? kzalloc(ioc->req_sz, GFP_ATOMIC) :
+ kzalloc(ioc->req_sz, GFP_KERNEL);
+ if (!prequest) {
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed "
+ "while allocating memory \n", ioc->name));
+ mpt_free_fw_memory(ioc);
+ return -ENOMEM;
+ }
+
+ preply = (FWUploadReply_t *)&reply;
+
+ reply_sz = sizeof(reply);
+ memset(preply, 0, reply_sz);
+
+ prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
+ prequest->Function = MPI_FUNCTION_FW_UPLOAD;
+
+ ptcsge = (FWUploadTCSGE_t *) &prequest->SGL;
+ ptcsge->DetailsLength = 12;
+ ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
+ ptcsge->ImageSize = cpu_to_le32(sz);
+ ptcsge++;
+
+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
+ ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
+ request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
+ ioc->SGE_size;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
+ " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
+ ioc->facts.FWImageSize, request_size));
+ DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
+
+ ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
+ reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
+ "rc=%x \n", ioc->name, ii));
+
+ cmdStatus = -EFAULT;
+ if (ii == 0) {
+ /* Handshake transfer was complete and successful.
+ * Check the Reply Frame.
+ */
+ int status;
+ status = le16_to_cpu(preply->IOCStatus) &
+ MPI_IOCSTATUS_MASK;
+ if (status == MPI_IOCSTATUS_SUCCESS &&
+ ioc->facts.FWImageSize ==
+ le32_to_cpu(preply->ActualImageSize))
+ cmdStatus = 0;
+ }
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
+ ioc->name, cmdStatus));
+
+
+ if (cmdStatus) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
+ "freeing image \n", ioc->name));
+ mpt_free_fw_memory(ioc);
+ }
+ kfree(prequest);
+
+ return cmdStatus;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_downloadboot - DownloadBoot code
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @pFwHeader: Pointer to firmware header info
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * FwDownloadBoot requires Programmed IO access.
+ *
+ * Returns 0 for success
+ * -1 FW Image size is 0
+ * -2 No valid cached_fw Pointer
+ * <0 for fw upload failure.
+ */
+static int
+mpt_downloadboot(MPT_ADAPTER *ioc, MpiFwHeader_t *pFwHeader, int sleepFlag)
+{
+ MpiExtImageHeader_t *pExtImage;
+ u32 fwSize;
+ u32 diag0val;
+ int count;
+ u32 *ptrFw;
+ u32 diagRwData;
+ u32 nextImage;
+ u32 load_addr;
+ u32 ioc_state=0;
+
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot: fw size 0x%x (%d), FW Ptr %p\n",
+ ioc->name, pFwHeader->ImageSize, pFwHeader->ImageSize, pFwHeader));
+
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM));
+
+ /* wait 1 msec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep(1);
+ } else {
+ mdelay (1);
+ }
+
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
+
+ for (count = 0; count < 30; count ++) {
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RESET_ADAPTER cleared, count=%d\n",
+ ioc->name, count));
+ break;
+ }
+ /* wait .1 sec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (100);
+ } else {
+ mdelay (100);
+ }
+ }
+
+ if ( count == 30 ) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot failed! "
+ "Unable to get MPI_DIAG_DRWE mode, diag0val=%x\n",
+ ioc->name, diag0val));
+ return -3;
+ }
+
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+ /* Set the DiagRwEn and Disable ARM bits */
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
+
+ fwSize = (pFwHeader->ImageSize + 3)/4;
+ ptrFw = (u32 *) pFwHeader;
+
+ /* Write the LoadStartAddress to the DiagRw Address Register
+ * using Programmed IO
+ */
+ if (ioc->errata_flag_1064)
+ pci_enable_io_access(ioc->pcidev);
+
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress);
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "LoadStart addr written 0x%x \n",
+ ioc->name, pFwHeader->LoadStartAddress));
+
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write FW Image: 0x%x bytes @ %p\n",
+ ioc->name, fwSize*4, ptrFw));
+ while (fwSize--) {
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
+ }
+
+ nextImage = pFwHeader->NextImageHeaderOffset;
+ while (nextImage) {
+ pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage);
+
+ load_addr = pExtImage->LoadStartAddress;
+
+ fwSize = (pExtImage->ImageSize + 3) >> 2;
+ ptrFw = (u32 *)pExtImage;
+
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write Ext Image: 0x%x (%d) bytes @ %p load_addr=%x\n",
+ ioc->name, fwSize*4, fwSize*4, ptrFw, load_addr));
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
+
+ while (fwSize--) {
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
+ }
+ nextImage = pExtImage->NextImageHeaderOffset;
+ }
+
+ /* Write the IopResetVectorRegAddr */
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Addr=%x! \n", ioc->name, pFwHeader->IopResetRegAddr));
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr);
+
+ /* Write the IopResetVectorValue */
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue));
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue);
+
+ /* Clear the internal flash bad bit - autoincrementing register,
+ * so must do two writes.
+ */
+ if (ioc->bus_type == SPI) {
+ /*
+ * 1030 and 1035 H/W errata, workaround to access
+ * the ClearFlashBadSignatureBit
+ */
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+ diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
+ diagRwData |= 0x40000000;
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+ CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
+
+ } else /* if((ioc->bus_type == SAS) || (ioc->bus_type == FC)) */ {
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val |
+ MPI_DIAG_CLEAR_FLASH_BAD_SIG);
+
+ /* wait 1 msec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (1);
+ } else {
+ mdelay (1);
+ }
+ }
+
+ if (ioc->errata_flag_1064)
+ pci_disable_io_access(ioc->pcidev);
+
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot diag0val=%x, "
+ "turning off PREVENT_IOC_BOOT, DISABLE_ARM, RW_ENABLE\n",
+ ioc->name, diag0val));
+ diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM | MPI_DIAG_RW_ENABLE);
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "downloadboot now diag0val=%x\n",
+ ioc->name, diag0val));
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
+
+ /* Write 0xFF to reset the sequencer */
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+
+ if (ioc->bus_type == SAS) {
+ ioc_state = mpt_GetIocState(ioc, 0);
+ if ( (GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_BRINGUP)) != 0 ) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "GetIocFacts failed: IocState=%x\n",
+ ioc->name, ioc_state));
+ return -EFAULT;
+ }
+ }
+
+ for (count=0; count<HZ*20; count++) {
+ if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "downloadboot successful! (count=%d) IocState=%x\n",
+ ioc->name, count, ioc_state));
+ if (ioc->bus_type == SAS) {
+ return 0;
+ }
+ if ((SendIocInit(ioc, sleepFlag)) != 0) {
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "downloadboot: SendIocInit failed\n",
+ ioc->name));
+ return -EFAULT;
+ }
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "downloadboot: SendIocInit successful\n",
+ ioc->name));
+ return 0;
+ }
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (10);
+ } else {
+ mdelay (10);
+ }
+ }
+ ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "downloadboot failed! IocState=%x\n",ioc->name, ioc_state));
+ return -EFAULT;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * KickStart - Perform hard reset of MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @force: Force hard reset
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * This routine places MPT adapter in diagnostic mode via the
+ * WriteSequence register, and then performs a hard reset of adapter
+ * via the Diagnostic register.
+ *
+ * Inputs: sleepflag - CAN_SLEEP (non-interrupt thread)
+ * or NO_SLEEP (interrupt thread, use mdelay)
+ * force - 1 if doorbell active, board fault state
+ * board operational, IOC_RECOVERY or
+ * IOC_BRINGUP and there is an alt_ioc.
+ * 0 else
+ *
+ * Returns:
+ * 1 - hard reset, READY
+ * 0 - no reset due to History bit, READY
+ * -1 - no reset due to History bit but not READY
+ * OR reset but failed to come READY
+ * -2 - no reset, could not enter DIAG mode
+ * -3 - reset but bad FW bit
+ */
+static int
+KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
+{
+ int hard_reset_done = 0;
+ u32 ioc_state=0;
+ int cnt,cntdn;
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStarting!\n", ioc->name));
+ if (ioc->bus_type == SPI) {
+ /* Always issue a Msg Unit Reset first. This will clear some
+ * SCSI bus hang conditions.
+ */
+ SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
+
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (1000);
+ } else {
+ mdelay (1000);
+ }
+ }
+
+ hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag);
+ if (hard_reset_done < 0)
+ return hard_reset_done;
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset successful!\n",
+ ioc->name));
+
+ cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 2; /* 2 seconds */
+ for (cnt=0; cnt<cntdn; cnt++) {
+ ioc_state = mpt_GetIocState(ioc, 1);
+ if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) {
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "KickStart successful! (cnt=%d)\n",
+ ioc->name, cnt));
+ return hard_reset_done;
+ }
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (10);
+ } else {
+ mdelay (10);
+ }
+ }
+
+ dinitprintk(ioc, printk(MYIOC_s_ERR_FMT "Failed to come READY after reset! IocState=%x\n",
+ ioc->name, mpt_GetIocState(ioc, 0)));
+ return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_diag_reset - Perform hard reset of the adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @ignore: Set if to honor and clear to ignore
+ * the reset history bit
+ * @sleepFlag: CAN_SLEEP if called in a non-interrupt thread,
+ * else set to NO_SLEEP (use mdelay instead)
+ *
+ * This routine places the adapter in diagnostic mode via the
+ * WriteSequence register and then performs a hard reset of adapter
+ * via the Diagnostic register. Adapter should be in ready state
+ * upon successful completion.
+ *
+ * Returns: 1 hard reset successful
+ * 0 no reset performed because reset history bit set
+ * -2 enabling diagnostic mode failed
+ * -3 diagnostic reset failed
+ */
+static int
+mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
+{
+ u32 diag0val;
+ u32 doorbell;
+ int hard_reset_done = 0;
+ int count = 0;
+ u32 diag1val = 0;
+ MpiFwHeader_t *cached_fw; /* Pointer to FW */
+ u8 cb_idx;
+
+ /* Clear any existing interrupts */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
+
+ if (!ignore)
+ return 0;
+
+ drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
+ "address=%p\n", ioc->name, __func__,
+ &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
+ CHIPREG_WRITE32(&ioc->chip->Reset_1078, 0x07);
+ if (sleepFlag == CAN_SLEEP)
+ msleep(1);
+ else
+ mdelay(1);
+
+ /*
+ * Call each currently registered protocol IOC reset handler
+ * with pre-reset indication.
+ * NOTE: If we're doing _IOC_BRINGUP, there can be no
+ * MptResetHandlers[] registered yet.
+ */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ (*(MptResetHandlers[cb_idx]))(ioc,
+ MPT_IOC_PRE_RESET);
+ }
+
+ for (count = 0; count < 60; count ++) {
+ doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
+ doorbell &= MPI_IOC_STATE_MASK;
+
+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "looking for READY STATE: doorbell=%x"
+ " count=%d\n",
+ ioc->name, doorbell, count));
+
+ if (doorbell == MPI_IOC_STATE_READY) {
+ return 1;
+ }
+
+ /* wait 1 sec */
+ if (sleepFlag == CAN_SLEEP)
+ msleep(1000);
+ else
+ mdelay(1000);
+ }
+ return -1;
+ }
+
+ /* Use "Diagnostic reset" method! (only thing available!) */
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+
+ if (ioc->debug_level & MPT_DEBUG) {
+ if (ioc->alt_ioc)
+ diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG1: diag0=%08x, diag1=%08x\n",
+ ioc->name, diag0val, diag1val));
+ }
+
+ /* Do the reset if we are told to ignore the reset history
+ * or if the reset history is 0
+ */
+ if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) {
+ while ((diag0val & MPI_DIAG_DRWE) == 0) {
+ /* Write magic sequence to WriteSequence register
+ * Loop until in diagnostic mode
+ */
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+ /* wait 100 msec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (100);
+ } else {
+ mdelay (100);
+ }
+
+ count++;
+ if (count > 20) {
+ printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
+ ioc->name, diag0val);
+ return -2;
+
+ }
+
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Wrote magic DiagWriteEn sequence (%x)\n",
+ ioc->name, diag0val));
+ }
+
+ if (ioc->debug_level & MPT_DEBUG) {
+ if (ioc->alt_ioc)
+ diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG2: diag0=%08x, diag1=%08x\n",
+ ioc->name, diag0val, diag1val));
+ }
+ /*
+ * Disable the ARM (Bug fix)
+ *
+ */
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
+ mdelay(1);
+
+ /*
+ * Now hit the reset bit in the Diagnostic register
+ * (THE BIG HAMMER!) (Clears DRWE bit).
+ */
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
+ hard_reset_done = 1;
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Diagnostic reset performed\n",
+ ioc->name));
+
+ /*
+ * Call each currently registered protocol IOC reset handler
+ * with pre-reset indication.
+ * NOTE: If we're doing _IOC_BRINGUP, there can be no
+ * MptResetHandlers[] registered yet.
+ */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx]) {
+ mpt_signal_reset(cb_idx,
+ ioc, MPT_IOC_PRE_RESET);
+ if (ioc->alt_ioc) {
+ mpt_signal_reset(cb_idx,
+ ioc->alt_ioc, MPT_IOC_PRE_RESET);
+ }
+ }
+ }
+
+ if (ioc->cached_fw)
+ cached_fw = (MpiFwHeader_t *)ioc->cached_fw;
+ else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
+ cached_fw = (MpiFwHeader_t *)ioc->alt_ioc->cached_fw;
+ else
+ cached_fw = NULL;
+ if (cached_fw) {
+ /* If the DownloadBoot operation fails, the
+ * IOC will be left unusable. This is a fatal error
+ * case. _diag_reset will return < 0
+ */
+ for (count = 0; count < 30; count ++) {
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
+ break;
+ }
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "cached_fw: diag0val=%x count=%d\n",
+ ioc->name, diag0val, count));
+ /* wait 1 sec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (1000);
+ } else {
+ mdelay (1000);
+ }
+ }
+ if ((count = mpt_downloadboot(ioc, cached_fw, sleepFlag)) < 0) {
+ printk(MYIOC_s_WARN_FMT
+ "firmware downloadboot failure (%d)!\n", ioc->name, count);
+ }
+
+ } else {
+ /* Wait for FW to reload and for board
+ * to go to the READY state.
+ * Maximum wait is 60 seconds.
+ * If fail, no error will check again
+ * with calling program.
+ */
+ for (count = 0; count < 60; count ++) {
+ doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
+ doorbell &= MPI_IOC_STATE_MASK;
+
+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "looking for READY STATE: doorbell=%x"
+ " count=%d\n", ioc->name, doorbell, count));
+
+ if (doorbell == MPI_IOC_STATE_READY) {
+ break;
+ }
+
+ /* wait 1 sec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (1000);
+ } else {
+ mdelay (1000);
+ }
+ }
+
+ if (doorbell != MPI_IOC_STATE_READY)
+ printk(MYIOC_s_ERR_FMT "Failed to come READY "
+ "after reset! IocState=%x", ioc->name,
+ doorbell);
+ }
+ }
+
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ if (ioc->debug_level & MPT_DEBUG) {
+ if (ioc->alt_ioc)
+ diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG3: diag0=%08x, diag1=%08x\n",
+ ioc->name, diag0val, diag1val));
+ }
+
+ /* Clear RESET_HISTORY bit! Place board in the
+ * diagnostic mode to update the diag register.
+ */
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ count = 0;
+ while ((diag0val & MPI_DIAG_DRWE) == 0) {
+ /* Write magic sequence to WriteSequence register
+ * Loop until in diagnostic mode
+ */
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+ /* wait 100 msec */
+ if (sleepFlag == CAN_SLEEP) {
+ msleep (100);
+ } else {
+ mdelay (100);
+ }
+
+ count++;
+ if (count > 20) {
+ printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
+ ioc->name, diag0val);
+ break;
+ }
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ }
+ diag0val &= ~MPI_DIAG_RESET_HISTORY;
+ CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ if (diag0val & MPI_DIAG_RESET_HISTORY) {
+ printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n",
+ ioc->name);
+ }
+
+ /* Disable Diagnostic Mode
+ */
+ CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF);
+
+ /* Check FW reload status flags.
+ */
+ diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+ if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) {
+ printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n",
+ ioc->name, diag0val);
+ return -3;
+ }
+
+ if (ioc->debug_level & MPT_DEBUG) {
+ if (ioc->alt_ioc)
+ diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DbG4: diag0=%08x, diag1=%08x\n",
+ ioc->name, diag0val, diag1val));
+ }
+
+ /*
+ * Reset flag that says we've enabled event notification
+ */
+ ioc->facts.EventState = 0;
+
+ if (ioc->alt_ioc)
+ ioc->alt_ioc->facts.EventState = 0;
+
+ return hard_reset_done;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * SendIocReset - Send IOCReset request to MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @reset_type: reset type, expected values are
+ * %MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * Send IOCReset request to the MPT adapter.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
+{
+ int r;
+ u32 state;
+ int cntdn, count;
+
+ drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending IOC reset(0x%02x)!\n",
+ ioc->name, reset_type));
+ CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
+ if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
+ return r;
+
+ /* FW ACK'd request, wait for READY state
+ */
+ count = 0;
+ cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15; /* 15 seconds */
+
+ while ((state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
+ cntdn--;
+ count++;
+ if (!cntdn) {
+ if (sleepFlag != CAN_SLEEP)
+ count *= 10;
+
+ printk(MYIOC_s_ERR_FMT
+ "Wait IOC_READY state (0x%x) timeout(%d)!\n",
+ ioc->name, state, (int)((count+5)/HZ));
+ return -ETIME;
+ }
+
+ if (sleepFlag == CAN_SLEEP) {
+ msleep(1);
+ } else {
+ mdelay (1); /* 1 msec delay */
+ }
+ }
+
+ /* TODO!
+ * Cleanup all event stuff for this IOC; re-issue EventNotification
+ * request if needed.
+ */
+ if (ioc->facts.Function)
+ ioc->facts.EventState = 0;
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * initChainBuffers - Allocate memory for and initialize chain buffers
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * Allocates memory for and initializes chain buffers,
+ * chain buffer control arrays and spinlock.
+ */
+static int
+initChainBuffers(MPT_ADAPTER *ioc)
+{
+ u8 *mem;
+ int sz, ii, num_chain;
+ int scale, num_sge, numSGE;
+
+ /* ReqToChain size must equal the req_depth
+ * index = req_idx
+ */
+ if (ioc->ReqToChain == NULL) {
+ sz = ioc->req_depth * sizeof(int);
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL)
+ return -1;
+
+ ioc->ReqToChain = (int *) mem;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReqToChain alloc @ %p, sz=%d bytes\n",
+ ioc->name, mem, sz));
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL)
+ return -1;
+
+ ioc->RequestNB = (int *) mem;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestNB alloc @ %p, sz=%d bytes\n",
+ ioc->name, mem, sz));
+ }
+ for (ii = 0; ii < ioc->req_depth; ii++) {
+ ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN;
+ }
+
+ /* ChainToChain size must equal the total number
+ * of chain buffers to be allocated.
+ * index = chain_idx
+ *
+ * Calculate the number of chain buffers needed(plus 1) per I/O
+ * then multiply the maximum number of simultaneous cmds
+ *
+ * num_sge = num sge in request frame + last chain buffer
+ * scale = num sge per chain buffer if no chain element
+ */
+ scale = ioc->req_sz / ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64))
+ num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size;
+ else
+ num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
+
+ if (ioc->sg_addr_size == sizeof(u64)) {
+ numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 60) / ioc->SGE_size;
+ } else {
+ numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
+ scale + (ioc->req_sz - 64) / ioc->SGE_size;
+ }
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
+ ioc->name, num_sge, numSGE));
+
+ if (ioc->bus_type == FC) {
+ if (numSGE > MPT_SCSI_FC_SG_DEPTH)
+ numSGE = MPT_SCSI_FC_SG_DEPTH;
+ } else {
+ if (numSGE > MPT_SCSI_SG_DEPTH)
+ numSGE = MPT_SCSI_SG_DEPTH;
+ }
+
+ num_chain = 1;
+ while (numSGE - num_sge > 0) {
+ num_chain++;
+ num_sge += (scale - 1);
+ }
+ num_chain++;
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Now numSGE=%d num_sge=%d num_chain=%d\n",
+ ioc->name, numSGE, num_sge, num_chain));
+
+ if (ioc->bus_type == SPI)
+ num_chain *= MPT_SCSI_CAN_QUEUE;
+ else if (ioc->bus_type == SAS)
+ num_chain *= MPT_SAS_CAN_QUEUE;
+ else
+ num_chain *= MPT_FC_CAN_QUEUE;
+
+ ioc->num_chain = num_chain;
+
+ sz = num_chain * sizeof(int);
+ if (ioc->ChainToChain == NULL) {
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL)
+ return -1;
+
+ ioc->ChainToChain = (int *) mem;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainToChain alloc @ %p, sz=%d bytes\n",
+ ioc->name, mem, sz));
+ } else {
+ mem = (u8 *) ioc->ChainToChain;
+ }
+ memset(mem, 0xFF, sz);
+ return num_chain;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * PrimeIocFifos - Initialize IOC request and reply FIFOs.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * This routine allocates memory for the MPT reply and request frame
+ * pools (if necessary), and primes the IOC reply FIFO with
+ * reply frames.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+PrimeIocFifos(MPT_ADAPTER *ioc)
+{
+ MPT_FRAME_HDR *mf;
+ unsigned long flags;
+ dma_addr_t alloc_dma;
+ u8 *mem;
+ int i, reply_sz, sz, total_size, num_chain;
+ u64 dma_mask;
+
+ dma_mask = 0;
+
+ /* Prime reply FIFO... */
+
+ if (ioc->reply_frames == NULL) {
+ if ( (num_chain = initChainBuffers(ioc)) < 0)
+ return -1;
+ /*
+ * 1078 errata workaround for the 36GB limitation
+ */
+ if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
+ ioc->dma_mask > DMA_BIT_MASK(35)) {
+ if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(32))) {
+ dma_mask = DMA_BIT_MASK(35);
+ d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "setting 35 bit addressing for "
+ "Request/Reply/Chain and Sense Buffers\n",
+ ioc->name));
+ } else {
+ /*Reseting DMA mask to 64 bit*/
+ pci_set_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64));
+ pci_set_consistent_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64));
+
+ printk(MYIOC_s_ERR_FMT
+ "failed setting 35 bit addressing for "
+ "Request/Reply/Chain and Sense Buffers\n",
+ ioc->name);
+ return -1;
+ }
+ }
+
+ total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
+ ioc->name, ioc->reply_sz, ioc->reply_depth));
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d[%x] bytes\n",
+ ioc->name, reply_sz, reply_sz));
+
+ sz = (ioc->req_sz * ioc->req_depth);
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d bytes, RequestDepth=%d\n",
+ ioc->name, ioc->req_sz, ioc->req_depth));
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffer sz=%d[%x] bytes\n",
+ ioc->name, sz, sz));
+ total_size += sz;
+
+ sz = num_chain * ioc->req_sz; /* chain buffer pool size */
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d bytes, ChainDepth=%d\n",
+ ioc->name, ioc->req_sz, num_chain));
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffer sz=%d[%x] bytes num_chain=%d\n",
+ ioc->name, sz, sz, num_chain));
+
+ total_size += sz;
+ mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+ if (mem == NULL) {
+ printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
+ ioc->name);
+ goto out_fail;
+ }
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Total alloc @ %p[%p], sz=%d[%x] bytes\n",
+ ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size));
+
+ memset(mem, 0, total_size);
+ ioc->alloc_total += total_size;
+ ioc->alloc = mem;
+ ioc->alloc_dma = alloc_dma;
+ ioc->alloc_sz = total_size;
+ ioc->reply_frames = (MPT_FRAME_HDR *) mem;
+ ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
+ ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
+
+ alloc_dma += reply_sz;
+ mem += reply_sz;
+
+ /* Request FIFO - WE manage this! */
+
+ ioc->req_frames = (MPT_FRAME_HDR *) mem;
+ ioc->req_frames_dma = alloc_dma;
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RequestBuffers @ %p[%p]\n",
+ ioc->name, mem, (void *)(ulong)alloc_dma));
+
+ ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
+
+#if defined(CONFIG_MTRR) && 0
+ /*
+ * Enable Write Combining MTRR for IOC's memory region.
+ * (at least as much as we can; "size and base must be
+ * multiples of 4 kiB"
+ */
+ ioc->mtrr_reg = mtrr_add(ioc->req_frames_dma,
+ sz,
+ MTRR_TYPE_WRCOMB, 1);
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MTRR region registered (base:size=%08x:%x)\n",
+ ioc->name, ioc->req_frames_dma, sz));
+#endif
+
+ for (i = 0; i < ioc->req_depth; i++) {
+ alloc_dma += ioc->req_sz;
+ mem += ioc->req_sz;
+ }
+
+ ioc->ChainBuffer = mem;
+ ioc->ChainBufferDMA = alloc_dma;
+
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ChainBuffers @ %p(%p)\n",
+ ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
+
+ /* Initialize the free chain Q.
+ */
+
+ INIT_LIST_HEAD(&ioc->FreeChainQ);
+
+ /* Post the chain buffers to the FreeChainQ.
+ */
+ mem = (u8 *)ioc->ChainBuffer;
+ for (i=0; i < num_chain; i++) {
+ mf = (MPT_FRAME_HDR *) mem;
+ list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeChainQ);
+ mem += ioc->req_sz;
+ }
+
+ /* Initialize Request frames linked list
+ */
+ alloc_dma = ioc->req_frames_dma;
+ mem = (u8 *) ioc->req_frames;
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+ INIT_LIST_HEAD(&ioc->FreeQ);
+ for (i = 0; i < ioc->req_depth; i++) {
+ mf = (MPT_FRAME_HDR *) mem;
+
+ /* Queue REQUESTs *internally*! */
+ list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
+
+ mem += ioc->req_sz;
+ }
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
+ ioc->sense_buf_pool =
+ pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+ if (ioc->sense_buf_pool == NULL) {
+ printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
+ ioc->name);
+ goto out_fail;
+ }
+
+ ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF);
+ ioc->alloc_total += sz;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SenseBuffers @ %p[%p]\n",
+ ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma));
+
+ }
+
+ /* Post Reply frames to FIFO
+ */
+ alloc_dma = ioc->alloc_dma;
+ dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffers @ %p[%p]\n",
+ ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
+
+ for (i = 0; i < ioc->reply_depth; i++) {
+ /* Write each address to the IOC! */
+ CHIPREG_WRITE32(&ioc->chip->ReplyFifo, alloc_dma);
+ alloc_dma += ioc->reply_sz;
+ }
+
+ if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
+ ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ ioc->dma_mask))
+ d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "restoring 64 bit addressing\n", ioc->name));
+
+ return 0;
+
+out_fail:
+
+ if (ioc->alloc != NULL) {
+ sz = ioc->alloc_sz;
+ pci_free_consistent(ioc->pcidev,
+ sz,
+ ioc->alloc, ioc->alloc_dma);
+ ioc->reply_frames = NULL;
+ ioc->req_frames = NULL;
+ ioc->alloc_total -= sz;
+ }
+ if (ioc->sense_buf_pool != NULL) {
+ sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
+ pci_free_consistent(ioc->pcidev,
+ sz,
+ ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ ioc->sense_buf_pool = NULL;
+ }
+
+ if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ DMA_BIT_MASK(64)))
+ d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "restoring 64 bit addressing\n", ioc->name));
+
+ return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_handshake_req_reply_wait - Send MPT request to and receive reply
+ * from IOC via doorbell handshake method.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @reqBytes: Size of the request in bytes
+ * @req: Pointer to MPT request frame
+ * @replyBytes: Expected size of the reply in bytes
+ * @u16reply: Pointer to area where reply should be written
+ * @maxwait: Max wait time for a reply (in seconds)
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * NOTES: It is the callers responsibility to byte-swap fields in the
+ * request which are greater than 1 byte in size. It is also the
+ * callers responsibility to byte-swap response fields which are
+ * greater than 1 byte in size.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
+ int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
+{
+ MPIDefaultReply_t *mptReply;
+ int failcnt = 0;
+ int t;
+
+ /*
+ * Get ready to cache a handshake reply
+ */
+ ioc->hs_reply_idx = 0;
+ mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
+ mptReply->MsgLength = 0;
+
+ /*
+ * Make sure there are no doorbells (WRITE 0 to IntStatus reg),
+ * then tell IOC that we want to handshake a request of N words.
+ * (WRITE u32val to Doorbell reg).
+ */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ CHIPREG_WRITE32(&ioc->chip->Doorbell,
+ ((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
+ ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
+
+ /*
+ * Wait for IOC's doorbell handshake int
+ */
+ if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+ failcnt++;
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request start reqBytes=%d, WaitCnt=%d%s\n",
+ ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
+
+ /* Read doorbell and check for active bit */
+ if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
+ return -1;
+
+ /*
+ * Clear doorbell int (WRITE 0 to IntStatus reg),
+ * then wait for IOC to ACKnowledge that it's ready for
+ * our handshake request.
+ */
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
+ failcnt++;
+
+ if (!failcnt) {
+ int ii;
+ u8 *req_as_bytes = (u8 *) req;
+
+ /*
+ * Stuff request words via doorbell handshake,
+ * with ACK from IOC for each.
+ */
+ for (ii = 0; !failcnt && ii < reqBytes/4; ii++) {
+ u32 word = ((req_as_bytes[(ii*4) + 0] << 0) |
+ (req_as_bytes[(ii*4) + 1] << 8) |
+ (req_as_bytes[(ii*4) + 2] << 16) |
+ (req_as_bytes[(ii*4) + 3] << 24));
+
+ CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
+ if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
+ failcnt++;
+ }
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handshake request frame (@%p) header\n", ioc->name, req));
+ DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)req);
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake request post done, WaitCnt=%d%s\n",
+ ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : ""));
+
+ /*
+ * Wait for completion of doorbell handshake reply from the IOC
+ */
+ if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
+ failcnt++;
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake reply count=%d%s\n",
+ ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
+
+ /*
+ * Copy out the cached reply...
+ */
+ for (ii=0; ii < min(replyBytes/2,mptReply->MsgLength*2); ii++)
+ u16reply[ii] = ioc->hs_reply[ii];
+ } else {
+ return -99;
+ }
+
+ return -failcnt;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * WaitForDoorbellAck - Wait for IOC doorbell handshake acknowledge
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @howlong: How long to wait (in seconds)
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * This routine waits (up to ~2 seconds max) for IOC doorbell
+ * handshake ACKnowledge, indicated by the IOP_DOORBELL_STATUS
+ * bit in its IntStatus register being clear.
+ *
+ * Returns a negative value on failure, else wait loop count.
+ */
+static int
+WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
+{
+ int cntdn;
+ int count = 0;
+ u32 intstat=0;
+
+ cntdn = 1000 * howlong;
+
+ if (sleepFlag == CAN_SLEEP) {
+ while (--cntdn) {
+ msleep (1);
+ intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+ if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
+ break;
+ count++;
+ }
+ } else {
+ while (--cntdn) {
+ udelay (1000);
+ intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+ if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
+ break;
+ count++;
+ }
+ }
+
+ if (cntdn) {
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell ACK (count=%d)\n",
+ ioc->name, count));
+ return count;
+ }
+
+ printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout (count=%d), IntStatus=%x!\n",
+ ioc->name, count, intstat);
+ return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * WaitForDoorbellInt - Wait for IOC to set its doorbell interrupt bit
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @howlong: How long to wait (in seconds)
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * This routine waits (up to ~2 seconds max) for IOC doorbell interrupt
+ * (MPI_HIS_DOORBELL_INTERRUPT) to be set in the IntStatus register.
+ *
+ * Returns a negative value on failure, else wait loop count.
+ */
+static int
+WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
+{
+ int cntdn;
+ int count = 0;
+ u32 intstat=0;
+
+ cntdn = 1000 * howlong;
+ if (sleepFlag == CAN_SLEEP) {
+ while (--cntdn) {
+ intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+ if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
+ break;
+ msleep(1);
+ count++;
+ }
+ } else {
+ while (--cntdn) {
+ intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+ if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
+ break;
+ udelay (1000);
+ count++;
+ }
+ }
+
+ if (cntdn) {
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell INT (cnt=%d) howlong=%d\n",
+ ioc->name, count, howlong));
+ return count;
+ }
+
+ printk(MYIOC_s_ERR_FMT "Doorbell INT timeout (count=%d), IntStatus=%x!\n",
+ ioc->name, count, intstat);
+ return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * WaitForDoorbellReply - Wait for and capture an IOC handshake reply.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @howlong: How long to wait (in seconds)
+ * @sleepFlag: Specifies whether the process can sleep
+ *
+ * This routine polls the IOC for a handshake reply, 16 bits at a time.
+ * Reply is cached to IOC private area large enough to hold a maximum
+ * of 128 bytes of reply data.
+ *
+ * Returns a negative value on failure, else size of reply in WORDS.
+ */
+static int
+WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
+{
+ int u16cnt = 0;
+ int failcnt = 0;
+ int t;
+ u16 *hs_reply = ioc->hs_reply;
+ volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
+ u16 hword;
+
+ hs_reply[0] = hs_reply[1] = hs_reply[7] = 0;
+
+ /*
+ * Get first two u16's so we can look at IOC's intended reply MsgLength
+ */
+ u16cnt=0;
+ if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) {
+ failcnt++;
+ } else {
+ hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+ failcnt++;
+ else {
+ hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ }
+ }
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
+ ioc->name, t, le32_to_cpu(*(u32 *)hs_reply),
+ failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
+
+ /*
+ * If no error (and IOC said MsgLength is > 0), piece together
+ * reply 16 bits at a time.
+ */
+ for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) {
+ if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+ failcnt++;
+ hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
+ /* don't overflow our IOC hs_reply[] buffer! */
+ if (u16cnt < ARRAY_SIZE(ioc->hs_reply))
+ hs_reply[u16cnt] = hword;
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+ }
+
+ if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+ failcnt++;
+ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+ if (failcnt) {
+ printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n",
+ ioc->name);
+ return -failcnt;
+ }
+#if 0
+ else if (u16cnt != (2 * mptReply->MsgLength)) {
+ return -101;
+ }
+ else if ((mptReply->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
+ return -102;
+ }
+#endif
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got Handshake reply:\n", ioc->name));
+ DBG_DUMP_REPLY_FRAME(ioc, (u32 *)mptReply);
+
+ dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitForDoorbell REPLY WaitCnt=%d (sz=%d)\n",
+ ioc->name, t, u16cnt/2));
+ return u16cnt/2;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * GetLanConfigPages - Fetch LANConfig pages.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * Return: 0 for success
+ * -ENOMEM if no memory available
+ * -EPERM if not allowed due to ISR context
+ * -EAGAIN if no msg frames currently available
+ * -EFAULT for non-successful reply or no reply (timeout)
+ */
+static int
+GetLanConfigPages(MPT_ADAPTER *ioc)
+{
+ ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ LANPage0_t *ppage0_alloc;
+ dma_addr_t page0_dma;
+ LANPage1_t *ppage1_alloc;
+ dma_addr_t page1_dma;
+ int rc = 0;
+ int data_sz;
+ int copy_sz;
+
+ /* Get LAN Page 0 header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = 0;
+ cfg.timeout = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ return rc;
+
+ if (hdr.PageLength > 0) {
+ data_sz = hdr.PageLength * 4;
+ ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ rc = -ENOMEM;
+ if (ppage0_alloc) {
+ memset((u8 *)ppage0_alloc, 0, data_sz);
+ cfg.physAddr = page0_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ /* save the data */
+ copy_sz = min_t(int, sizeof(LANPage0_t), data_sz);
+ memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz);
+
+ }
+
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
+
+ /* FIXME!
+ * Normalize endianness of structure data,
+ * by byte-swapping all > 1 byte fields!
+ */
+
+ }
+
+ if (rc)
+ return rc;
+ }
+
+ /* Get LAN Page 1 header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 1;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ return rc;
+
+ if (hdr.PageLength == 0)
+ return 0;
+
+ data_sz = hdr.PageLength * 4;
+ rc = -ENOMEM;
+ ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
+ if (ppage1_alloc) {
+ memset((u8 *)ppage1_alloc, 0, data_sz);
+ cfg.physAddr = page1_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ /* save the data */
+ copy_sz = min_t(int, sizeof(LANPage1_t), data_sz);
+ memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
+ }
+
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
+
+ /* FIXME!
+ * Normalize endianness of structure data,
+ * by byte-swapping all > 1 byte fields!
+ */
+
+ }
+
+ return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptbase_sas_persist_operation - Perform operation on SAS Persistent Table
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @persist_opcode: see below
+ *
+ * MPI_SAS_OP_CLEAR_NOT_PRESENT - Free all persist TargetID mappings for
+ * devices not currently present.
+ * MPI_SAS_OP_CLEAR_ALL_PERSISTENT - Clear al persist TargetID mappings
+ *
+ * NOTE: Don't use not this function during interrupt time.
+ *
+ * Returns 0 for success, non-zero error
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+int
+mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
+{
+ SasIoUnitControlRequest_t *sasIoUnitCntrReq;
+ SasIoUnitControlReply_t *sasIoUnitCntrReply;
+ MPT_FRAME_HDR *mf = NULL;
+ MPIHeader_t *mpi_hdr;
+ int ret = 0;
+ unsigned long timeleft;
+
+ mutex_lock(&ioc->mptbase_cmds.mutex);
+
+ /* init the internal cmd struct */
+ memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
+ INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
+
+ /* insure garbage is not sent to fw */
+ switch(persist_opcode) {
+
+ case MPI_SAS_OP_CLEAR_NOT_PRESENT:
+ case MPI_SAS_OP_CLEAR_ALL_PERSISTENT:
+ break;
+
+ default:
+ ret = -1;
+ goto out;
+ }
+
+ printk(KERN_DEBUG "%s: persist_opcode=%x\n",
+ __func__, persist_opcode);
+
+ /* Get a MF for this command.
+ */
+ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+ printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
+ ret = -1;
+ goto out;
+ }
+
+ mpi_hdr = (MPIHeader_t *) mf;
+ sasIoUnitCntrReq = (SasIoUnitControlRequest_t *)mf;
+ memset(sasIoUnitCntrReq,0,sizeof(SasIoUnitControlRequest_t));
+ sasIoUnitCntrReq->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
+ sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
+ sasIoUnitCntrReq->Operation = persist_opcode;
+
+ mpt_put_msg_frame(mpt_base_index, ioc, mf);
+ timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ printk(KERN_DEBUG "%s: failed\n", __func__);
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT
+ "Issuing Reset from %s!!, doorbell=0x%08x\n",
+ ioc->name, __func__, mpt_GetIocState(ioc, 0));
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+ goto out;
+ }
+
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ ret = -1;
+ goto out;
+ }
+
+ sasIoUnitCntrReply =
+ (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
+ if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
+ printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
+ __func__, sasIoUnitCntrReply->IOCStatus,
+ sasIoUnitCntrReply->IOCLogInfo);
+ printk(KERN_DEBUG "%s: failed\n", __func__);
+ ret = -1;
+ } else
+ printk(KERN_DEBUG "%s: success\n", __func__);
+ out:
+
+ CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
+ mutex_unlock(&ioc->mptbase_cmds.mutex);
+ return ret;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+static void
+mptbase_raid_process_event_data(MPT_ADAPTER *ioc,
+ MpiEventDataRaid_t * pRaidEventData)
+{
+ int volume;
+ int reason;
+ int disk;
+ int status;
+ int flags;
+ int state;
+
+ volume = pRaidEventData->VolumeID;
+ reason = pRaidEventData->ReasonCode;
+ disk = pRaidEventData->PhysDiskNum;
+ status = le32_to_cpu(pRaidEventData->SettingsStatus);
+ flags = (status >> 0) & 0xff;
+ state = (status >> 8) & 0xff;
+
+ if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
+ return;
+ }
+
+ if ((reason >= MPI_EVENT_RAID_RC_PHYSDISK_CREATED &&
+ reason <= MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED) ||
+ (reason == MPI_EVENT_RAID_RC_SMART_DATA)) {
+ printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for PhysDisk %d id=%d\n",
+ ioc->name, disk, volume);
+ } else {
+ printk(MYIOC_s_INFO_FMT "RAID STATUS CHANGE for VolumeID %d\n",
+ ioc->name, volume);
+ }
+
+ switch(reason) {
+ case MPI_EVENT_RAID_RC_VOLUME_CREATED:
+ printk(MYIOC_s_INFO_FMT " volume has been created\n",
+ ioc->name);
+ break;
+
+ case MPI_EVENT_RAID_RC_VOLUME_DELETED:
+
+ printk(MYIOC_s_INFO_FMT " volume has been deleted\n",
+ ioc->name);
+ break;
+
+ case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED:
+ printk(MYIOC_s_INFO_FMT " volume settings have been changed\n",
+ ioc->name);
+ break;
+
+ case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
+ printk(MYIOC_s_INFO_FMT " volume is now %s%s%s%s\n",
+ ioc->name,
+ state == MPI_RAIDVOL0_STATUS_STATE_OPTIMAL
+ ? "optimal"
+ : state == MPI_RAIDVOL0_STATUS_STATE_DEGRADED
+ ? "degraded"
+ : state == MPI_RAIDVOL0_STATUS_STATE_FAILED
+ ? "failed"
+ : "state unknown",
+ flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED
+ ? ", enabled" : "",
+ flags & MPI_RAIDVOL0_STATUS_FLAG_QUIESCED
+ ? ", quiesced" : "",
+ flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
+ ? ", resync in progress" : "" );
+ break;
+
+ case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED:
+ printk(MYIOC_s_INFO_FMT " volume membership of PhysDisk %d has changed\n",
+ ioc->name, disk);
+ break;
+
+ case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
+ printk(MYIOC_s_INFO_FMT " PhysDisk has been created\n",
+ ioc->name);
+ break;
+
+ case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
+ printk(MYIOC_s_INFO_FMT " PhysDisk has been deleted\n",
+ ioc->name);
+ break;
+
+ case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED:
+ printk(MYIOC_s_INFO_FMT " PhysDisk settings have been changed\n",
+ ioc->name);
+ break;
+
+ case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
+ printk(MYIOC_s_INFO_FMT " PhysDisk is now %s%s%s\n",
+ ioc->name,
+ state == MPI_PHYSDISK0_STATUS_ONLINE
+ ? "online"
+ : state == MPI_PHYSDISK0_STATUS_MISSING
+ ? "missing"
+ : state == MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE
+ ? "not compatible"
+ : state == MPI_PHYSDISK0_STATUS_FAILED
+ ? "failed"
+ : state == MPI_PHYSDISK0_STATUS_INITIALIZING
+ ? "initializing"
+ : state == MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED
+ ? "offline requested"
+ : state == MPI_PHYSDISK0_STATUS_FAILED_REQUESTED
+ ? "failed requested"
+ : state == MPI_PHYSDISK0_STATUS_OTHER_OFFLINE
+ ? "offline"
+ : "state unknown",
+ flags & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
+ ? ", out of sync" : "",
+ flags & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED
+ ? ", quiesced" : "" );
+ break;
+
+ case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED:
+ printk(MYIOC_s_INFO_FMT " Domain Validation needed for PhysDisk %d\n",
+ ioc->name, disk);
+ break;
+
+ case MPI_EVENT_RAID_RC_SMART_DATA:
+ printk(MYIOC_s_INFO_FMT " SMART data received, ASC/ASCQ = %02xh/%02xh\n",
+ ioc->name, pRaidEventData->ASC, pRaidEventData->ASCQ);
+ break;
+
+ case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED:
+ printk(MYIOC_s_INFO_FMT " replacement of PhysDisk %d has started\n",
+ ioc->name, disk);
+ break;
+ }
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * GetIoUnitPage2 - Retrieve BIOS version and boot order information.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * Returns: 0 for success
+ * -ENOMEM if no memory available
+ * -EPERM if not allowed due to ISR context
+ * -EAGAIN if no msg frames currently available
+ * -EFAULT for non-successful reply or no reply (timeout)
+ */
+static int
+GetIoUnitPage2(MPT_ADAPTER *ioc)
+{
+ ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ IOUnitPage2_t *ppage_alloc;
+ dma_addr_t page_dma;
+ int data_sz;
+ int rc;
+
+ /* Get the page header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 2;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = 0;
+ cfg.timeout = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ return rc;
+
+ if (hdr.PageLength == 0)
+ return 0;
+
+ /* Read the config page */
+ data_sz = hdr.PageLength * 4;
+ rc = -ENOMEM;
+ ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ if (ppage_alloc) {
+ memset((u8 *)ppage_alloc, 0, data_sz);
+ cfg.physAddr = page_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ /* If Good, save data */
+ if ((rc = mpt_config(ioc, &cfg)) == 0)
+ ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
+
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
+ }
+
+ return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
+ * @ioc: Pointer to a Adapter Strucutre
+ * @portnum: IOC port number
+ *
+ * Return: -EFAULT if read of config page header fails
+ * or if no nvram
+ * If read of SCSI Port Page 0 fails,
+ * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
+ * Adapter settings: async, narrow
+ * Return 1
+ * If read of SCSI Port Page 2 fails,
+ * Adapter settings valid
+ * NVRAM = MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
+ * Return 1
+ * Else
+ * Both valid
+ * Return 0
+ * CHECK - what type of locking mechanisms should be used????
+ */
+static int
+mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
+{
+ u8 *pbuf;
+ dma_addr_t buf_dma;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t header;
+ int ii;
+ int data, rc = 0;
+
+ /* Allocate memory
+ */
+ if (!ioc->spi_data.nvram) {
+ int sz;
+ u8 *mem;
+ sz = MPT_MAX_SCSI_DEVICES * sizeof(int);
+ mem = kmalloc(sz, GFP_ATOMIC);
+ if (mem == NULL)
+ return -EFAULT;
+
+ ioc->spi_data.nvram = (int *) mem;
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SCSI device NVRAM settings @ %p, sz=%d\n",
+ ioc->name, ioc->spi_data.nvram, sz));
+ }
+
+ /* Invalidate NVRAM information
+ */
+ for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+ ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID;
+ }
+
+ /* Read SPP0 header, allocate memory, then read page.
+ */
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 0;
+ header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = -1;
+ cfg.pageAddr = portnum;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.timeout = 0; /* use default */
+ if (mpt_config(ioc, &cfg) != 0)
+ return -EFAULT;
+
+ if (header.PageLength > 0) {
+ pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+ if (pbuf) {
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.physAddr = buf_dma;
+ if (mpt_config(ioc, &cfg) != 0) {
+ ioc->spi_data.maxBusWidth = MPT_NARROW;
+ ioc->spi_data.maxSyncOffset = 0;
+ ioc->spi_data.minSyncFactor = MPT_ASYNC;
+ ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
+ rc = 1;
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Unable to read PortPage0 minSyncFactor=%x\n",
+ ioc->name, ioc->spi_data.minSyncFactor));
+ } else {
+ /* Save the Port Page 0 data
+ */
+ SCSIPortPage0_t *pPP0 = (SCSIPortPage0_t *) pbuf;
+ pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities);
+ pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface);
+
+ if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
+ ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "noQas due to Capabilities=%x\n",
+ ioc->name, pPP0->Capabilities));
+ }
+ ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
+ data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK;
+ if (data) {
+ ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
+ data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
+ ioc->spi_data.minSyncFactor = (u8) (data >> 8);
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "PortPage0 minSyncFactor=%x\n",
+ ioc->name, ioc->spi_data.minSyncFactor));
+ } else {
+ ioc->spi_data.maxSyncOffset = 0;
+ ioc->spi_data.minSyncFactor = MPT_ASYNC;
+ }
+
+ ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK;
+
+ /* Update the minSyncFactor based on bus type.
+ */
+ if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
+ (ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE)) {
+
+ if (ioc->spi_data.minSyncFactor < MPT_ULTRA) {
+ ioc->spi_data.minSyncFactor = MPT_ULTRA;
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "HVD or SE detected, minSyncFactor=%x\n",
+ ioc->name, ioc->spi_data.minSyncFactor));
+ }
+ }
+ }
+ if (pbuf) {
+ pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+ }
+ }
+ }
+
+ /* SCSI Port Page 2 - Read the header then the page.
+ */
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 2;
+ header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = -1;
+ cfg.pageAddr = portnum;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ if (mpt_config(ioc, &cfg) != 0)
+ return -EFAULT;
+
+ if (header.PageLength > 0) {
+ /* Allocate memory and read SCSI Port Page 2
+ */
+ pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+ if (pbuf) {
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
+ cfg.physAddr = buf_dma;
+ if (mpt_config(ioc, &cfg) != 0) {
+ /* Nvram data is left with INVALID mark
+ */
+ rc = 1;
+ } else if (ioc->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
+
+ /* This is an ATTO adapter, read Page2 accordingly
+ */
+ ATTO_SCSIPortPage2_t *pPP2 = (ATTO_SCSIPortPage2_t *) pbuf;
+ ATTODeviceInfo_t *pdevice = NULL;
+ u16 ATTOFlags;
+
+ /* Save the Port Page 2 data
+ * (reformat into a 32bit quantity)
+ */
+ for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+ pdevice = &pPP2->DeviceSettings[ii];
+ ATTOFlags = le16_to_cpu(pdevice->ATTOFlags);
+ data = 0;
+
+ /* Translate ATTO device flags to LSI format
+ */
+ if (ATTOFlags & ATTOFLAG_DISC)
+ data |= (MPI_SCSIPORTPAGE2_DEVICE_DISCONNECT_ENABLE);
+ if (ATTOFlags & ATTOFLAG_ID_ENB)
+ data |= (MPI_SCSIPORTPAGE2_DEVICE_ID_SCAN_ENABLE);
+ if (ATTOFlags & ATTOFLAG_LUN_ENB)
+ data |= (MPI_SCSIPORTPAGE2_DEVICE_LUN_SCAN_ENABLE);
+ if (ATTOFlags & ATTOFLAG_TAGGED)
+ data |= (MPI_SCSIPORTPAGE2_DEVICE_TAG_QUEUE_ENABLE);
+ if (!(ATTOFlags & ATTOFLAG_WIDE_ENB))
+ data |= (MPI_SCSIPORTPAGE2_DEVICE_WIDE_DISABLE);
+
+ data = (data << 16) | (pdevice->Period << 8) | 10;
+ ioc->spi_data.nvram[ii] = data;
+ }
+ } else {
+ SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t *) pbuf;
+ MpiDeviceInfo_t *pdevice = NULL;
+
+ /*
+ * Save "Set to Avoid SCSI Bus Resets" flag
+ */
+ ioc->spi_data.bus_reset =
+ (le32_to_cpu(pPP2->PortFlags) &
+ MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET) ?
+ 0 : 1 ;
+
+ /* Save the Port Page 2 data
+ * (reformat into a 32bit quantity)
+ */
+ data = le32_to_cpu(pPP2->PortFlags) & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
+ ioc->spi_data.PortFlags = data;
+ for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+ pdevice = &pPP2->DeviceSettings[ii];
+ data = (le16_to_cpu(pdevice->DeviceFlags) << 16) |
+ (pdevice->SyncFactor << 8) | pdevice->Timeout;
+ ioc->spi_data.nvram[ii] = data;
+ }
+ }
+
+ pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+ }
+ }
+
+ /* Update Adapter limits with those from NVRAM
+ * Comment: Don't need to do this. Target performance
+ * parameters will never exceed the adapters limits.
+ */
+
+ return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_readScsiDevicePageHeaders - save version and length of SDP1
+ * @ioc: Pointer to a Adapter Strucutre
+ * @portnum: IOC port number
+ *
+ * Return: -EFAULT if read of config page header fails
+ * or 0 if success.
+ */
+static int
+mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t header;
+
+ /* Read the SCSI Device Page 1 header
+ */
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 1;
+ header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = -1;
+ cfg.pageAddr = portnum;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.timeout = 0;
+ if (mpt_config(ioc, &cfg) != 0)
+ return -EFAULT;
+
+ ioc->spi_data.sdp1version = cfg.cfghdr.hdr->PageVersion;
+ ioc->spi_data.sdp1length = cfg.cfghdr.hdr->PageLength;
+
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 0;
+ header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+ if (mpt_config(ioc, &cfg) != 0)
+ return -EFAULT;
+
+ ioc->spi_data.sdp0version = cfg.cfghdr.hdr->PageVersion;
+ ioc->spi_data.sdp0length = cfg.cfghdr.hdr->PageLength;
+
+ dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 0: version %d length %d\n",
+ ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
+
+ dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Headers: 1: version %d length %d\n",
+ ioc->name, ioc->spi_data.sdp1version, ioc->spi_data.sdp1length));
+ return 0;
+}
+
+/**
+ * mpt_inactive_raid_list_free - This clears this link list.
+ * @ioc : pointer to per adapter structure
+ **/
+static void
+mpt_inactive_raid_list_free(MPT_ADAPTER *ioc)
+{
+ struct inactive_raid_component_info *component_info, *pNext;
+
+ if (list_empty(&ioc->raid_data.inactive_list))
+ return;
+
+ mutex_lock(&ioc->raid_data.inactive_list_mutex);
+ list_for_each_entry_safe(component_info, pNext,
+ &ioc->raid_data.inactive_list, list) {
+ list_del(&component_info->list);
+ kfree(component_info);
+ }
+ mutex_unlock(&ioc->raid_data.inactive_list_mutex);
+}
+
+/**
+ * mpt_inactive_raid_volumes - sets up link list of phy_disk_nums for devices belonging in an inactive volume
+ *
+ * @ioc : pointer to per adapter structure
+ * @channel : volume channel
+ * @id : volume target id
+ **/
+static void
+mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidVolumePage0_t buffer = NULL;
+ int i;
+ RaidPhysDiskPage0_t phys_disk;
+ struct inactive_raid_component_info *component_info;
+ int handle_inactive_volumes;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
+ cfg.pageAddr = (channel << 8) + id;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!hdr.PageLength)
+ goto out;
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer)
+ goto out;
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!buffer->NumPhysDisks)
+ goto out;
+
+ handle_inactive_volumes =
+ (buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE ||
+ (buffer->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED) == 0 ||
+ buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_FAILED ||
+ buffer->VolumeStatus.State == MPI_RAIDVOL0_STATUS_STATE_MISSING) ? 1 : 0;
+
+ if (!handle_inactive_volumes)
+ goto out;
+
+ mutex_lock(&ioc->raid_data.inactive_list_mutex);
+ for (i = 0; i < buffer->NumPhysDisks; i++) {
+ if(mpt_raid_phys_disk_pg0(ioc,
+ buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
+ continue;
+
+ if ((component_info = kmalloc(sizeof (*component_info),
+ GFP_KERNEL)) == NULL)
+ continue;
+
+ component_info->volumeID = id;
+ component_info->volumeBus = channel;
+ component_info->d.PhysDiskNum = phys_disk.PhysDiskNum;
+ component_info->d.PhysDiskBus = phys_disk.PhysDiskBus;
+ component_info->d.PhysDiskID = phys_disk.PhysDiskID;
+ component_info->d.PhysDiskIOC = phys_disk.PhysDiskIOC;
+
+ list_add_tail(&component_info->list,
+ &ioc->raid_data.inactive_list);
+ }
+ mutex_unlock(&ioc->raid_data.inactive_list_mutex);
+
+ out:
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+}
+
+/**
+ * mpt_raid_phys_disk_pg0 - returns phys disk page zero
+ * @ioc: Pointer to a Adapter Structure
+ * @phys_disk_num: io unit unique phys disk num generated by the ioc
+ * @phys_disk: requested payload data returned
+ *
+ * Return:
+ * 0 on success
+ * -EFAULT if read of config page header fails or data pointer not NULL
+ * -ENOMEM if pci_alloc failed
+ **/
+int
+mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ RaidPhysDiskPage0_t *phys_disk)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidPhysDiskPage0_t buffer = NULL;
+ int rc;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
+
+ hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (!hdr.PageLength) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.pageAddr = phys_disk_num;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ rc = 0;
+ memcpy(phys_disk, buffer, sizeof(*buffer));
+ phys_disk->MaxLBA = le32_to_cpu(buffer->MaxLBA);
+
+ out:
+
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+
+ return rc;
+}
+
+/**
+ * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
+ * @ioc: Pointer to a Adapter Structure
+ * @phys_disk_num: io unit unique phys disk num generated by the ioc
+ *
+ * Return:
+ * returns number paths
+ **/
+int
+mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidPhysDiskPage1_t buffer = NULL;
+ int rc;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+
+ hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ hdr.PageNumber = 1;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = 0;
+ goto out;
+ }
+
+ if (!hdr.PageLength) {
+ rc = 0;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer) {
+ rc = 0;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.pageAddr = phys_disk_num;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = 0;
+ goto out;
+ }
+
+ rc = buffer->NumPhysDiskPaths;
+ out:
+
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+
+ return rc;
+}
+EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
+
+/**
+ * mpt_raid_phys_disk_pg1 - returns phys disk page 1
+ * @ioc: Pointer to a Adapter Structure
+ * @phys_disk_num: io unit unique phys disk num generated by the ioc
+ * @phys_disk: requested payload data returned
+ *
+ * Return:
+ * 0 on success
+ * -EFAULT if read of config page header fails or data pointer not NULL
+ * -ENOMEM if pci_alloc failed
+ **/
+int
+mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ RaidPhysDiskPage1_t *phys_disk)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidPhysDiskPage1_t buffer = NULL;
+ int rc;
+ int i;
+ __le64 sas_address;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ rc = 0;
+
+ hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ hdr.PageNumber = 1;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (!hdr.PageLength) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.pageAddr = phys_disk_num;
+
+ if (mpt_config(ioc, &cfg) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
+ phys_disk->PhysDiskNum = phys_disk_num;
+ for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
+ phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
+ phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
+ phys_disk->Path[i].OwnerIdentifier =
+ buffer->Path[i].OwnerIdentifier;
+ phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
+ memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
+ sas_address = le64_to_cpu(sas_address);
+ memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
+ memcpy(&sas_address,
+ &buffer->Path[i].OwnerWWID, sizeof(__le64));
+ sas_address = le64_to_cpu(sas_address);
+ memcpy(&phys_disk->Path[i].OwnerWWID,
+ &sas_address, sizeof(__le64));
+ }
+
+ out:
+
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+
+ return rc;
+}
+EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
+
+
+/**
+ * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
+ * @ioc: Pointer to a Adapter Strucutre
+ *
+ * Return:
+ * 0 on success
+ * -EFAULT if read of config page header fails or data pointer not NULL
+ * -ENOMEM if pci_alloc failed
+ **/
+int
+mpt_findImVolumes(MPT_ADAPTER *ioc)
+{
+ IOCPage2_t *pIoc2;
+ u8 *mem;
+ dma_addr_t ioc2_dma;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t header;
+ int rc = 0;
+ int iocpage2sz;
+ int i;
+
+ if (!ioc->ir_firmware)
+ return 0;
+
+ /* Free the old page
+ */
+ kfree(ioc->raid_data.pIocPg2);
+ ioc->raid_data.pIocPg2 = NULL;
+ mpt_inactive_raid_list_free(ioc);
+
+ /* Read IOCP2 header then the page.
+ */
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 2;
+ header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.timeout = 0;
+ if (mpt_config(ioc, &cfg) != 0)
+ return -EFAULT;
+
+ if (header.PageLength == 0)
+ return -EFAULT;
+
+ iocpage2sz = header.PageLength * 4;
+ pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
+ if (!pIoc2)
+ return -ENOMEM;
+
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.physAddr = ioc2_dma;
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ mem = kmalloc(iocpage2sz, GFP_KERNEL);
+ if (!mem) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(mem, (u8 *)pIoc2, iocpage2sz);
+ ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
+
+ mpt_read_ioc_pg_3(ioc);
+
+ for (i = 0; i < pIoc2->NumActiveVolumes ; i++)
+ mpt_inactive_raid_volumes(ioc,
+ pIoc2->RaidVolume[i].VolumeBus,
+ pIoc2->RaidVolume[i].VolumeID);
+
+ out:
+ pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
+
+ return rc;
+}
+
+static int
+mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
+{
+ IOCPage3_t *pIoc3;
+ u8 *mem;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t header;
+ dma_addr_t ioc3_dma;
+ int iocpage3sz = 0;
+
+ /* Free the old page
+ */
+ kfree(ioc->raid_data.pIocPg3);
+ ioc->raid_data.pIocPg3 = NULL;
+
+ /* There is at least one physical disk.
+ * Read and save IOC Page 3
+ */
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 3;
+ header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.timeout = 0;
+ if (mpt_config(ioc, &cfg) != 0)
+ return 0;
+
+ if (header.PageLength == 0)
+ return 0;
+
+ /* Read Header good, alloc memory
+ */
+ iocpage3sz = header.PageLength * 4;
+ pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
+ if (!pIoc3)
+ return 0;
+
+ /* Read the Page and save the data
+ * into malloc'd memory.
+ */
+ cfg.physAddr = ioc3_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ if (mpt_config(ioc, &cfg) == 0) {
+ mem = kmalloc(iocpage3sz, GFP_KERNEL);
+ if (mem) {
+ memcpy(mem, (u8 *)pIoc3, iocpage3sz);
+ ioc->raid_data.pIocPg3 = (IOCPage3_t *) mem;
+ }
+ }
+
+ pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
+
+ return 0;
+}
+
+static void
+mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
+{
+ IOCPage4_t *pIoc4;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t header;
+ dma_addr_t ioc4_dma;
+ int iocpage4sz;
+
+ /* Read and save IOC Page 4
+ */
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 4;
+ header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.timeout = 0;
+ if (mpt_config(ioc, &cfg) != 0)
+ return;
+
+ if (header.PageLength == 0)
+ return;
+
+ if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
+ iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
+ pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
+ if (!pIoc4)
+ return;
+ ioc->alloc_total += iocpage4sz;
+ } else {
+ ioc4_dma = ioc->spi_data.IocPg4_dma;
+ iocpage4sz = ioc->spi_data.IocPg4Sz;
+ }
+
+ /* Read the Page into dma memory.
+ */
+ cfg.physAddr = ioc4_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ if (mpt_config(ioc, &cfg) == 0) {
+ ioc->spi_data.pIocPg4 = (IOCPage4_t *) pIoc4;
+ ioc->spi_data.IocPg4_dma = ioc4_dma;
+ ioc->spi_data.IocPg4Sz = iocpage4sz;
+ } else {
+ pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
+ ioc->spi_data.pIocPg4 = NULL;
+ ioc->alloc_total -= iocpage4sz;
+ }
+}
+
+static void
+mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
+{
+ IOCPage1_t *pIoc1;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t header;
+ dma_addr_t ioc1_dma;
+ int iocpage1sz = 0;
+ u32 tmp;
+
+ /* Check the Coalescing Timeout in IOC Page 1
+ */
+ header.PageVersion = 0;
+ header.PageLength = 0;
+ header.PageNumber = 1;
+ header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+ cfg.cfghdr.hdr = &header;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.timeout = 0;
+ if (mpt_config(ioc, &cfg) != 0)
+ return;
+
+ if (header.PageLength == 0)
+ return;
+
+ /* Read Header good, alloc memory
+ */
+ iocpage1sz = header.PageLength * 4;
+ pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
+ if (!pIoc1)
+ return;
+
+ /* Read the Page and check coalescing timeout
+ */
+ cfg.physAddr = ioc1_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ if (mpt_config(ioc, &cfg) == 0) {
+
+ tmp = le32_to_cpu(pIoc1->Flags) & MPI_IOCPAGE1_REPLY_COALESCING;
+ if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) {
+ tmp = le32_to_cpu(pIoc1->CoalescingTimeout);
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Coalescing Enabled Timeout = %d\n",
+ ioc->name, tmp));
+
+ if (tmp > MPT_COALESCING_TIMEOUT) {
+ pIoc1->CoalescingTimeout = cpu_to_le32(MPT_COALESCING_TIMEOUT);
+
+ /* Write NVRAM and current
+ */
+ cfg.dir = 1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ if (mpt_config(ioc, &cfg) == 0) {
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Reset Current Coalescing Timeout to = %d\n",
+ ioc->name, MPT_COALESCING_TIMEOUT));
+
+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ if (mpt_config(ioc, &cfg) == 0) {
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Reset NVRAM Coalescing Timeout to = %d\n",
+ ioc->name, MPT_COALESCING_TIMEOUT));
+ } else {
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Reset NVRAM Coalescing Timeout Failed\n",
+ ioc->name));
+ }
+
+ } else {
+ dprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "Reset of Current Coalescing Timeout Failed!\n",
+ ioc->name));
+ }
+ }
+
+ } else {
+ dprintk(ioc, printk(MYIOC_s_WARN_FMT "Coalescing Disabled\n", ioc->name));
+ }
+ }
+
+ pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
+
+ return;
+}
+
+static void
+mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t buf_dma;
+ ManufacturingPage0_t *pbuf = NULL;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+
+ hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = 10;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!cfg.cfghdr.hdr->PageLength)
+ goto out;
+
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+ if (!pbuf)
+ goto out;
+
+ cfg.physAddr = buf_dma;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ memcpy(ioc->board_name, pbuf->BoardName, sizeof(ioc->board_name));
+ memcpy(ioc->board_assembly, pbuf->BoardAssembly, sizeof(ioc->board_assembly));
+ memcpy(ioc->board_tracer, pbuf->BoardTracerNumber, sizeof(ioc->board_tracer));
+
+ out:
+
+ if (pbuf)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * SendEventNotification - Send EventNotification (on or off) request to adapter
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @EvSwitch: Event switch flags
+ * @sleepFlag: Specifies whether the process can sleep
+ */
+static int
+SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
+{
+ EventNotification_t evn;
+ MPIDefaultReply_t reply_buf;
+
+ memset(&evn, 0, sizeof(EventNotification_t));
+ memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
+
+ evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
+ evn.Switch = EvSwitch;
+ evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
+
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Sending EventNotification (%d) request %p\n",
+ ioc->name, EvSwitch, &evn));
+
+ return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
+ (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
+ sleepFlag);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * SendEventAck - Send EventAck request to MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @evnp: Pointer to original EventNotification request
+ */
+static int
+SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
+{
+ EventAck_t *pAck;
+
+ if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
+ ioc->name, __func__));
+ return -1;
+ }
+
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventAck\n", ioc->name));
+
+ pAck->Function = MPI_FUNCTION_EVENT_ACK;
+ pAck->ChainOffset = 0;
+ pAck->Reserved[0] = pAck->Reserved[1] = 0;
+ pAck->MsgFlags = 0;
+ pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
+ pAck->Event = evnp->Event;
+ pAck->EventContext = evnp->EventContext;
+
+ mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)pAck);
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_config - Generic function to issue config message
+ * @ioc: Pointer to an adapter structure
+ * @pCfg: Pointer to a configuration structure. Struct contains
+ * action, page address, direction, physical address
+ * and pointer to a configuration page header
+ * Page header is updated.
+ *
+ * Returns 0 for success
+ * -EPERM if not allowed due to ISR context
+ * -EAGAIN if no msg frames currently available
+ * -EFAULT for non-successful reply or no reply (timeout)
+ */
+int
+mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
+{
+ Config_t *pReq;
+ ConfigReply_t *pReply;
+ ConfigExtendedPageHeader_t *pExtHdr = NULL;
+ MPT_FRAME_HDR *mf;
+ int ii;
+ int flagsLength;
+ long timeout;
+ int ret;
+ u8 page_type = 0, extend_page;
+ unsigned long timeleft;
+ unsigned long flags;
+ int in_isr;
+ u8 issue_hard_reset = 0;
+ u8 retry_count = 0;
+
+ /* Prevent calling wait_event() (below), if caller happens
+ * to be in ISR context, because that is fatal!
+ */
+ in_isr = in_interrupt();
+ if (in_isr) {
+ dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
+ ioc->name));
+ return -EPERM;
+ }
+
+ /* don't send a config page during diag reset */
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: busy with host reset\n", ioc->name, __func__));
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ /* don't send if no chance of success */
+ if (!ioc->active ||
+ mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: ioc not operational, %d, %xh\n",
+ ioc->name, __func__, ioc->active,
+ mpt_GetIocState(ioc, 0)));
+ return -EFAULT;
+ }
+
+ retry_config:
+ mutex_lock(&ioc->mptbase_cmds.mutex);
+ /* init the internal cmd struct */
+ memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
+ INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
+
+ /* Get and Populate a free Frame
+ */
+ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+ dcprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "mpt_config: no msg frames!\n", ioc->name));
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ pReq = (Config_t *)mf;
+ pReq->Action = pCfg->action;
+ pReq->Reserved = 0;
+ pReq->ChainOffset = 0;
+ pReq->Function = MPI_FUNCTION_CONFIG;
+
+ /* Assume page type is not extended and clear "reserved" fields. */
+ pReq->ExtPageLength = 0;
+ pReq->ExtPageType = 0;
+ pReq->MsgFlags = 0;
+
+ for (ii=0; ii < 8; ii++)
+ pReq->Reserved2[ii] = 0;
+
+ pReq->Header.PageVersion = pCfg->cfghdr.hdr->PageVersion;
+ pReq->Header.PageLength = pCfg->cfghdr.hdr->PageLength;
+ pReq->Header.PageNumber = pCfg->cfghdr.hdr->PageNumber;
+ pReq->Header.PageType = (pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
+
+ if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
+ pExtHdr = (ConfigExtendedPageHeader_t *)pCfg->cfghdr.ehdr;
+ pReq->ExtPageLength = cpu_to_le16(pExtHdr->ExtPageLength);
+ pReq->ExtPageType = pExtHdr->ExtPageType;
+ pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+
+ /* Page Length must be treated as a reserved field for the
+ * extended header.
+ */
+ pReq->Header.PageLength = 0;
+ }
+
+ pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
+
+ /* Add a SGE to the config request.
+ */
+ if (pCfg->dir)
+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
+ else
+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
+
+ if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
+ MPI_CONFIG_PAGETYPE_EXTENDED) {
+ flagsLength |= pExtHdr->ExtPageLength * 4;
+ page_type = pReq->ExtPageType;
+ extend_page = 1;
+ } else {
+ flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
+ page_type = pReq->Header.PageType;
+ extend_page = 0;
+ }
+
+ dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Sending Config request type 0x%x, page 0x%x and action %d\n",
+ ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
+
+ ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
+ timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
+ mpt_put_msg_frame(mpt_base_index, ioc, mf);
+ timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
+ timeout);
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Failed Sending Config request type 0x%x, page 0x%x,"
+ " action %d, status %xh, time left %ld\n\n",
+ ioc->name, page_type, pReq->Header.PageNumber,
+ pReq->Action, ioc->mptbase_cmds.status, timeleft));
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ if (!timeleft)
+ issue_hard_reset = 1;
+ goto out;
+ }
+
+ if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ ret = -1;
+ goto out;
+ }
+ pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
+ ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+ if (ret == MPI_IOCSTATUS_SUCCESS) {
+ if (extend_page) {
+ pCfg->cfghdr.ehdr->ExtPageLength =
+ le16_to_cpu(pReply->ExtPageLength);
+ pCfg->cfghdr.ehdr->ExtPageType =
+ pReply->ExtPageType;
+ }
+ pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
+ pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
+ pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
+ pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
+
+ }
+
+ if (retry_count)
+ printk(MYIOC_s_INFO_FMT "Retry completed "
+ "ret=0x%x timeleft=%ld\n",
+ ioc->name, ret, timeleft);
+
+ dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
+ ret, le32_to_cpu(pReply->IOCLogInfo)));
+
+out:
+
+ CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
+ mutex_unlock(&ioc->mptbase_cmds.mutex);
+ if (issue_hard_reset) {
+ issue_hard_reset = 0;
+ printk(MYIOC_s_WARN_FMT
+ "Issuing Reset from %s!!, doorbell=0x%08x\n",
+ ioc->name, __func__, mpt_GetIocState(ioc, 0));
+ if (retry_count == 0) {
+ if (mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP) != 0)
+ retry_count++;
+ } else
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+
+ mpt_free_msg_frame(ioc, mf);
+ /* attempt one retry for a timed out command */
+ if (retry_count < 2) {
+ printk(MYIOC_s_INFO_FMT
+ "Attempting Retry Config request"
+ " type 0x%x, page 0x%x,"
+ " action %d\n", ioc->name, page_type,
+ pCfg->cfghdr.hdr->PageNumber, pCfg->action);
+ retry_count++;
+ goto retry_config;
+ }
+ }
+ return ret;
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_ioc_reset - Base cleanup for hard reset
+ * @ioc: Pointer to the adapter structure
+ * @reset_phase: Indicates pre- or post-reset functionality
+ *
+ * Remark: Frees resources with internally generated commands.
+ */
+static int
+mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ switch (reset_phase) {
+ case MPT_IOC_SETUP_RESET:
+ ioc->taskmgmt_quiesce_io = 1;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_POST_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+/* wake up mptbase_cmds */
+ if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->mptbase_cmds.status |=
+ MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->mptbase_cmds.done);
+ }
+/* wake up taskmgmt_cmds */
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->taskmgmt_cmds.status |=
+ MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->taskmgmt_cmds.done);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 1; /* currently means nothing really */
+}
+
+
+#ifdef CONFIG_PROC_FS /* { */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+procmpt_create(void)
+{
+ mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL);
+ if (mpt_proc_root_dir == NULL)
+ return -ENOTDIR;
+
+ proc_create("summary", S_IRUGO, mpt_proc_root_dir, &mpt_summary_proc_fops);
+ proc_create("version", S_IRUGO, mpt_proc_root_dir, &mpt_version_proc_fops);
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static void
+procmpt_destroy(void)
+{
+ remove_proc_entry("version", mpt_proc_root_dir);
+ remove_proc_entry("summary", mpt_proc_root_dir);
+ remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Handles read request from /proc/mpt/summary or /proc/mpt/iocN/summary.
+ */
+static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan);
+
+static int mpt_summary_proc_show(struct seq_file *m, void *v)
+{
+ MPT_ADAPTER *ioc = m->private;
+
+ if (ioc) {
+ seq_mpt_print_ioc_summary(ioc, m, 1);
+ } else {
+ list_for_each_entry(ioc, &ioc_list, list) {
+ seq_mpt_print_ioc_summary(ioc, m, 1);
+ }
+ }
+
+ return 0;
+}
+
+static int mpt_summary_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mpt_summary_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations mpt_summary_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mpt_summary_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mpt_version_proc_show(struct seq_file *m, void *v)
+{
+ u8 cb_idx;
+ int scsi, fc, sas, lan, ctl, targ, dmp;
+ char *drvname;
+
+ seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
+ seq_printf(m, " Fusion MPT base driver\n");
+
+ scsi = fc = sas = lan = ctl = targ = dmp = 0;
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ drvname = NULL;
+ if (MptCallbacks[cb_idx]) {
+ switch (MptDriverClass[cb_idx]) {
+ case MPTSPI_DRIVER:
+ if (!scsi++) drvname = "SPI host";
+ break;
+ case MPTFC_DRIVER:
+ if (!fc++) drvname = "FC host";
+ break;
+ case MPTSAS_DRIVER:
+ if (!sas++) drvname = "SAS host";
+ break;
+ case MPTLAN_DRIVER:
+ if (!lan++) drvname = "LAN";
+ break;
+ case MPTSTM_DRIVER:
+ if (!targ++) drvname = "SCSI target";
+ break;
+ case MPTCTL_DRIVER:
+ if (!ctl++) drvname = "ioctl";
+ break;
+ }
+
+ if (drvname)
+ seq_printf(m, " Fusion MPT %s driver\n", drvname);
+ }
+ }
+
+ return 0;
+}
+
+static int mpt_version_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mpt_version_proc_show, NULL);
+}
+
+static const struct file_operations mpt_version_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mpt_version_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
+{
+ MPT_ADAPTER *ioc = m->private;
+ char expVer[32];
+ int sz;
+ int p;
+
+ mpt_get_fw_exp_ver(expVer, ioc);
+
+ seq_printf(m, "%s:", ioc->name);
+ if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
+ seq_printf(m, " (f/w download boot flag set)");
+// if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL)
+// seq_printf(m, " CONFIG_CHECKSUM_FAIL!");
+
+ seq_printf(m, "\n ProductID = 0x%04x (%s)\n",
+ ioc->facts.ProductID,
+ ioc->prod_name);
+ seq_printf(m, " FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer);
+ if (ioc->facts.FWImageSize)
+ seq_printf(m, " (fw_size=%d)", ioc->facts.FWImageSize);
+ seq_printf(m, "\n MsgVersion = 0x%04x\n", ioc->facts.MsgVersion);
+ seq_printf(m, " FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit);
+ seq_printf(m, " EventState = 0x%02x\n", ioc->facts.EventState);
+
+ seq_printf(m, " CurrentHostMfaHighAddr = 0x%08x\n",
+ ioc->facts.CurrentHostMfaHighAddr);
+ seq_printf(m, " CurrentSenseBufferHighAddr = 0x%08x\n",
+ ioc->facts.CurrentSenseBufferHighAddr);
+
+ seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
+ seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
+
+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
+ (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
+ /*
+ * Rounding UP to nearest 4-kB boundary here...
+ */
+ sz = (ioc->req_sz * ioc->req_depth) + 128;
+ sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000;
+ seq_printf(m, " {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n",
+ ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz);
+ seq_printf(m, " {MaxReqSz=%d} {MaxReqDepth=%d}\n",
+ 4*ioc->facts.RequestFrameSize,
+ ioc->facts.GlobalCredits);
+
+ seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
+ (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
+ sz = (ioc->reply_sz * ioc->reply_depth) + 128;
+ seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
+ ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
+ seq_printf(m, " {MaxRepSz=%d} {MaxRepDepth=%d}\n",
+ ioc->facts.CurReplyFrameSize,
+ ioc->facts.ReplyQueueDepth);
+
+ seq_printf(m, " MaxDevices = %d\n",
+ (ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices);
+ seq_printf(m, " MaxBuses = %d\n", ioc->facts.MaxBuses);
+
+ /* per-port info */
+ for (p=0; p < ioc->facts.NumberOfPorts; p++) {
+ seq_printf(m, " PortNumber = %d (of %d)\n",
+ p+1,
+ ioc->facts.NumberOfPorts);
+ if (ioc->bus_type == FC) {
+ if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
+ u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+ seq_printf(m, " LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+ a[5], a[4], a[3], a[2], a[1], a[0]);
+ }
+ seq_printf(m, " WWN = %08X%08X:%08X%08X\n",
+ ioc->fc_port_page0[p].WWNN.High,
+ ioc->fc_port_page0[p].WWNN.Low,
+ ioc->fc_port_page0[p].WWPN.High,
+ ioc->fc_port_page0[p].WWPN.Low);
+ }
+ }
+
+ return 0;
+}
+
+static int mpt_iocinfo_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mpt_iocinfo_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations mpt_iocinfo_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = mpt_iocinfo_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_PROC_FS } */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static void
+mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc)
+{
+ buf[0] ='\0';
+ if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) {
+ sprintf(buf, " (Exp %02d%02d)",
+ (ioc->facts.FWVersion.Word >> 16) & 0x00FF, /* Month */
+ (ioc->facts.FWVersion.Word >> 8) & 0x1F); /* Day */
+
+ /* insider hack! */
+ if ((ioc->facts.FWVersion.Word >> 8) & 0x80)
+ strcat(buf, " [MDBG]");
+ }
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_print_ioc_summary - Write ASCII summary of IOC to a buffer.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @buffer: Pointer to buffer where IOC summary info should be written
+ * @size: Pointer to number of bytes we wrote (set by this routine)
+ * @len: Offset at which to start writing in buffer
+ * @showlan: Display LAN stuff?
+ *
+ * This routine writes (english readable) ASCII text, which represents
+ * a summary of IOC information, to a buffer.
+ */
+void
+mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan)
+{
+ char expVer[32];
+ int y;
+
+ mpt_get_fw_exp_ver(expVer, ioc);
+
+ /*
+ * Shorter summary of attached ioc's...
+ */
+ y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
+ ioc->name,
+ ioc->prod_name,
+ MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
+ ioc->facts.FWVersion.Word,
+ expVer,
+ ioc->facts.NumberOfPorts,
+ ioc->req_depth);
+
+ if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
+ u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+ y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
+ a[5], a[4], a[3], a[2], a[1], a[0]);
+ }
+
+ y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
+
+ if (!ioc->active)
+ y += sprintf(buffer+len+y, " (disabled)");
+
+ y += sprintf(buffer+len+y, "\n");
+
+ *size = y;
+}
+
+static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
+{
+ char expVer[32];
+
+ mpt_get_fw_exp_ver(expVer, ioc);
+
+ /*
+ * Shorter summary of attached ioc's...
+ */
+ seq_printf(m, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
+ ioc->name,
+ ioc->prod_name,
+ MPT_FW_REV_MAGIC_ID_STRING, /* "FwRev=" or somesuch */
+ ioc->facts.FWVersion.Word,
+ expVer,
+ ioc->facts.NumberOfPorts,
+ ioc->req_depth);
+
+ if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
+ u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+ seq_printf(m, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
+ a[5], a[4], a[3], a[2], a[1], a[0]);
+ }
+
+ seq_printf(m, ", IRQ=%d", ioc->pci_irq);
+
+ if (!ioc->active)
+ seq_printf(m, " (disabled)");
+
+ seq_putc(m, '\n');
+}
+
+/**
+ * mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ * If -1 is return, then it was not possible to set the flags
+ **/
+int
+mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
+{
+ unsigned long flags;
+ int retval;
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
+ (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
+ retval = -1;
+ goto out;
+ }
+ retval = 0;
+ ioc->taskmgmt_in_progress = 1;
+ ioc->taskmgmt_quiesce_io = 1;
+ if (ioc->alt_ioc) {
+ ioc->alt_ioc->taskmgmt_in_progress = 1;
+ ioc->alt_ioc->taskmgmt_quiesce_io = 1;
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return retval;
+}
+EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
+
+/**
+ * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task management
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+void
+mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->taskmgmt_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ if (ioc->alt_ioc) {
+ ioc->alt_ioc->taskmgmt_in_progress = 0;
+ ioc->alt_ioc->taskmgmt_quiesce_io = 0;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+}
+EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
+
+
+/**
+ * mpt_halt_firmware - Halts the firmware if it is operational and panic
+ * the kernel
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+void
+mpt_halt_firmware(MPT_ADAPTER *ioc)
+{
+ u32 ioc_raw_state;
+
+ ioc_raw_state = mpt_GetIocState(ioc, 0);
+
+ if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
+ printk(MYIOC_s_ERR_FMT "IOC is in FAULT state (%04xh)!!!\n",
+ ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
+ panic("%s: IOC Fault (%04xh)!!!\n", ioc->name,
+ ioc_raw_state & MPI_DOORBELL_DATA_MASK);
+ } else {
+ CHIPREG_WRITE32(&ioc->chip->Doorbell, 0xC0FFEE00);
+ panic("%s: Firmware is halted due to command timeout\n",
+ ioc->name);
+ }
+}
+EXPORT_SYMBOL(mpt_halt_firmware);
+
+/**
+ * mpt_SoftResetHandler - Issues a less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ * Message Unit Reset - instructs the IOC to reset the Reply Post and
+ * Free FIFO's. All the Message Frames on Reply Free FIFO are discarded.
+ * All posted buffers are freed, and event notification is turned off.
+ * IOC doesn't reply to any outstanding request. This will transfer IOC
+ * to READY state.
+ **/
+int
+mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ int rc;
+ int ii;
+ u8 cb_idx;
+ unsigned long flags;
+ u32 ioc_state;
+ unsigned long time_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SoftResetHandler Entered!\n",
+ ioc->name));
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ if (ioc_state == MPI_IOC_STATE_FAULT ||
+ ioc_state == MPI_IOC_STATE_RESET) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, either in FAULT or RESET state!\n", ioc->name));
+ return -1;
+ }
+
+ if (ioc->bus_type == FC) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "skipping, because the bus type is FC!\n", ioc->name));
+ return -1;
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ ioc->ioc_reset_in_progress = 1;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ rc = -1;
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->taskmgmt_in_progress) {
+ ioc->ioc_reset_in_progress = 0;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return -1;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ /* Disable reply interrupts (also blocks FreeQ) */
+ CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+ ioc->active = 0;
+ time_count = jiffies;
+
+ rc = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
+ }
+
+ if (rc)
+ goto out;
+
+ ioc_state = mpt_GetIocState(ioc, 0) & MPI_IOC_STATE_MASK;
+ if (ioc_state != MPI_IOC_STATE_READY)
+ goto out;
+
+ for (ii = 0; ii < 5; ii++) {
+ /* Get IOC facts! Allow 5 retries */
+ rc = GetIocFacts(ioc, sleepFlag,
+ MPT_HOSTEVENT_IOC_RECOVER);
+ if (rc == 0)
+ break;
+ if (sleepFlag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+ }
+ if (ii == 5)
+ goto out;
+
+ rc = PrimeIocFifos(ioc);
+ if (rc != 0)
+ goto out;
+
+ rc = SendIocInit(ioc, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ rc = SendEventNotification(ioc, 1, sleepFlag);
+ if (rc != 0)
+ goto out;
+
+ if (ioc->hard_resets < -1)
+ ioc->hard_resets++;
+
+ /*
+ * At this point, we know soft reset succeeded.
+ */
+
+ ioc->active = 1;
+ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
+
+ out:
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->ioc_reset_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ ioc->taskmgmt_in_progress = 0;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ if (ioc->active) { /* otherwise, hard reset coming */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx])
+ mpt_signal_reset(cb_idx, ioc,
+ MPT_IOC_POST_RESET);
+ }
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "SoftResetHandler: completed (%d seconds): %s\n",
+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000,
+ ((rc == 0) ? "SUCCESS" : "FAILED")));
+
+ return rc;
+}
+
+/**
+ * mpt_Soft_Hard_ResetHandler - Try less expensive reset
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ * Try for softreset first, only if it fails go for expensive
+ * HardReset.
+ **/
+int
+mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag) {
+ int ret = -1;
+
+ ret = mpt_SoftResetHandler(ioc, sleepFlag);
+ if (ret == 0)
+ return ret;
+ ret = mpt_HardResetHandler(ioc, sleepFlag);
+ return ret;
+}
+EXPORT_SYMBOL(mpt_Soft_Hard_ResetHandler);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Reset Handling
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_HardResetHandler - Generic reset handler
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleepFlag: Indicates if sleep or schedule must be called.
+ *
+ * Issues SCSI Task Management call based on input arg values.
+ * If TaskMgmt fails, returns associated SCSI request.
+ *
+ * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
+ * or a non-interrupt thread. In the former, must not call schedule().
+ *
+ * Note: A return of -1 is a FATAL error case, as it means a
+ * FW reload/initialization failed.
+ *
+ * Returns 0 for SUCCESS or -1 if FAILED.
+ */
+int
+mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
+{
+ int rc;
+ u8 cb_idx;
+ unsigned long flags;
+ unsigned long time_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
+#ifdef MFCNT
+ printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name);
+ printk("MF count 0x%x !\n", ioc->mfcnt);
+#endif
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ /* Reset the adapter. Prevent more than 1 call to
+ * mpt_do_ioc_recovery at any instant in time.
+ */
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return 0;
+ }
+ ioc->ioc_reset_in_progress = 1;
+ if (ioc->alt_ioc)
+ ioc->alt_ioc->ioc_reset_in_progress = 1;
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+
+ /* The SCSI driver needs to adjust timeouts on all current
+ * commands prior to the diagnostic reset being issued.
+ * Prevents timeouts occurring during a diagnostic reset...very bad.
+ * For all other protocol drivers, this is a no-op.
+ */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx]) {
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+ if (ioc->alt_ioc)
+ mpt_signal_reset(cb_idx, ioc->alt_ioc,
+ MPT_IOC_SETUP_RESET);
+ }
+ }
+
+ time_count = jiffies;
+ rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
+ if (rc != 0) {
+ printk(KERN_WARNING MYNAM
+ ": WARNING - (%d) Cannot recover %s, doorbell=0x%08x\n",
+ rc, ioc->name, mpt_GetIocState(ioc, 0));
+ } else {
+ if (ioc->hard_resets < -1)
+ ioc->hard_resets++;
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ ioc->ioc_reset_in_progress = 0;
+ ioc->taskmgmt_quiesce_io = 0;
+ ioc->taskmgmt_in_progress = 0;
+ if (ioc->alt_ioc) {
+ ioc->alt_ioc->ioc_reset_in_progress = 0;
+ ioc->alt_ioc->taskmgmt_quiesce_io = 0;
+ ioc->alt_ioc->taskmgmt_in_progress = 0;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptResetHandlers[cb_idx]) {
+ mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
+ if (ioc->alt_ioc)
+ mpt_signal_reset(cb_idx,
+ ioc->alt_ioc, MPT_IOC_POST_RESET);
+ }
+ }
+
+ dtmprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT
+ "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
+ jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
+ "SUCCESS" : "FAILED")));
+
+ return rc;
+}
+
+#ifdef CONFIG_FUSION_LOGGING
+static void
+mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
+{
+ char *ds = NULL;
+ u32 evData0;
+ int ii;
+ u8 event;
+ char *evStr = ioc->evStr;
+
+ event = le32_to_cpu(pEventReply->Event) & 0xFF;
+ evData0 = le32_to_cpu(pEventReply->Data[0]);
+
+ switch(event) {
+ case MPI_EVENT_NONE:
+ ds = "None";
+ break;
+ case MPI_EVENT_LOG_DATA:
+ ds = "Log Data";
+ break;
+ case MPI_EVENT_STATE_CHANGE:
+ ds = "State Change";
+ break;
+ case MPI_EVENT_UNIT_ATTENTION:
+ ds = "Unit Attention";
+ break;
+ case MPI_EVENT_IOC_BUS_RESET:
+ ds = "IOC Bus Reset";
+ break;
+ case MPI_EVENT_EXT_BUS_RESET:
+ ds = "External Bus Reset";
+ break;
+ case MPI_EVENT_RESCAN:
+ ds = "Bus Rescan Event";
+ break;
+ case MPI_EVENT_LINK_STATUS_CHANGE:
+ if (evData0 == MPI_EVENT_LINK_STATUS_FAILURE)
+ ds = "Link Status(FAILURE) Change";
+ else
+ ds = "Link Status(ACTIVE) Change";
+ break;
+ case MPI_EVENT_LOOP_STATE_CHANGE:
+ if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
+ ds = "Loop State(LIP) Change";
+ else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
+ ds = "Loop State(LPE) Change";
+ else
+ ds = "Loop State(LPB) Change";
+ break;
+ case MPI_EVENT_LOGOUT:
+ ds = "Logout";
+ break;
+ case MPI_EVENT_EVENT_CHANGE:
+ if (evData0)
+ ds = "Events ON";
+ else
+ ds = "Events OFF";
+ break;
+ case MPI_EVENT_INTEGRATED_RAID:
+ {
+ u8 ReasonCode = (u8)(evData0 >> 16);
+ switch (ReasonCode) {
+ case MPI_EVENT_RAID_RC_VOLUME_CREATED :
+ ds = "Integrated Raid: Volume Created";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_DELETED :
+ ds = "Integrated Raid: Volume Deleted";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED :
+ ds = "Integrated Raid: Volume Settings Changed";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED :
+ ds = "Integrated Raid: Volume Status Changed";
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED :
+ ds = "Integrated Raid: Volume Physdisk Changed";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_CREATED :
+ ds = "Integrated Raid: Physdisk Created";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_DELETED :
+ ds = "Integrated Raid: Physdisk Deleted";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED :
+ ds = "Integrated Raid: Physdisk Settings Changed";
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED :
+ ds = "Integrated Raid: Physdisk Status Changed";
+ break;
+ case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED :
+ ds = "Integrated Raid: Domain Validation Needed";
+ break;
+ case MPI_EVENT_RAID_RC_SMART_DATA :
+ ds = "Integrated Raid; Smart Data";
+ break;
+ case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED :
+ ds = "Integrated Raid: Replace Action Started";
+ break;
+ default:
+ ds = "Integrated Raid";
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:
+ ds = "SCSI Device Status Change";
+ break;
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ {
+ u8 id = (u8)(evData0);
+ u8 channel = (u8)(evData0 >> 8);
+ u8 ReasonCode = (u8)(evData0 >> 16);
+ switch (ReasonCode) {
+ case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Added: "
+ "id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Deleted: "
+ "id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: SMART Data: "
+ "id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: No Persistancy: "
+ "id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Unsupported Device "
+ "Discovered : id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Internal Device "
+ "Reset : id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Internal Task "
+ "Abort : id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Internal Abort "
+ "Task Set : id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Internal Clear "
+ "Task Set : id=%d channel=%d", id, channel);
+ break;
+ case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Internal Query "
+ "Task : id=%d channel=%d", id, channel);
+ break;
+ default:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Device Status Change: Unknown: "
+ "id=%d channel=%d", id, channel);
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_ON_BUS_TIMER_EXPIRED:
+ ds = "Bus Timer Expired";
+ break;
+ case MPI_EVENT_QUEUE_FULL:
+ {
+ u16 curr_depth = (u16)(evData0 >> 16);
+ u8 channel = (u8)(evData0 >> 8);
+ u8 id = (u8)(evData0);
+
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "Queue Full: channel=%d id=%d depth=%d",
+ channel, id, curr_depth);
+ break;
+ }
+ case MPI_EVENT_SAS_SES:
+ ds = "SAS SES Event";
+ break;
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ ds = "Persistent Table Full";
+ break;
+ case MPI_EVENT_SAS_PHY_LINK_STATUS:
+ {
+ u8 LinkRates = (u8)(evData0 >> 8);
+ u8 PhyNumber = (u8)(evData0);
+ LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >>
+ MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT;
+ switch (LinkRates) {
+ case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate Unknown",PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Phy Disabled",PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Failed Speed Nego",PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Sata OOB Completed",PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_1_5:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate 1.5 Gbps",PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_3_0:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate 3.0 Gbps", PhyNumber);
+ break;
+ case MPI_EVENT_SAS_PLS_LR_RATE_6_0:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d:"
+ " Rate 6.0 Gbps", PhyNumber);
+ break;
+ default:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS PHY Link Status: Phy=%d", PhyNumber);
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_SAS_DISCOVERY_ERROR:
+ ds = "SAS Discovery Error";
+ break;
+ case MPI_EVENT_IR_RESYNC_UPDATE:
+ {
+ u8 resync_complete = (u8)(evData0 >> 16);
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR Resync Update: Complete = %d:",resync_complete);
+ break;
+ }
+ case MPI_EVENT_IR2:
+ {
+ u8 id = (u8)(evData0);
+ u8 channel = (u8)(evData0 >> 8);
+ u8 phys_num = (u8)(evData0 >> 24);
+ u8 ReasonCode = (u8)(evData0 >> 16);
+
+ switch (ReasonCode) {
+ case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: LD State Changed: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: PD State Changed "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Bad Block Table Full: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_PD_INSERTED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: PD Inserted: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_PD_REMOVED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: PD Removed: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Foreign CFG Detected: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Rebuild Medium Error: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Dual Port Added: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "IR2: Dual Port Removed: "
+ "id=%d channel=%d phys_num=%d",
+ id, channel, phys_num);
+ break;
+ default:
+ ds = "IR2";
+ break;
+ }
+ break;
+ }
+ case MPI_EVENT_SAS_DISCOVERY:
+ {
+ if (evData0)
+ ds = "SAS Discovery: Start";
+ else
+ ds = "SAS Discovery: Stop";
+ break;
+ }
+ case MPI_EVENT_LOG_ENTRY_ADDED:
+ ds = "SAS Log Entry Added";
+ break;
+
+ case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ u8 phy_num = (u8)(evData0);
+ u8 port_num = (u8)(evData0 >> 8);
+ u8 port_width = (u8)(evData0 >> 16);
+ u8 primative = (u8)(evData0 >> 24);
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Broadcase Primative: phy=%d port=%d "
+ "width=%d primative=0x%02x",
+ phy_num, port_num, port_width, primative);
+ break;
+ }
+
+ case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ {
+ u8 reason = (u8)(evData0);
+
+ switch (reason) {
+ case MPI_EVENT_SAS_INIT_RC_ADDED:
+ ds = "SAS Initiator Status Change: Added";
+ break;
+ case MPI_EVENT_SAS_INIT_RC_REMOVED:
+ ds = "SAS Initiator Status Change: Deleted";
+ break;
+ default:
+ ds = "SAS Initiator Status Change";
+ break;
+ }
+ break;
+ }
+
+ case MPI_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ {
+ u8 max_init = (u8)(evData0);
+ u8 current_init = (u8)(evData0 >> 8);
+
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS Initiator Device Table Overflow: max initiators=%02d "
+ "current initators=%02d",
+ max_init, current_init);
+ break;
+ }
+ case MPI_EVENT_SAS_SMP_ERROR:
+ {
+ u8 status = (u8)(evData0);
+ u8 port_num = (u8)(evData0 >> 8);
+ u8 result = (u8)(evData0 >> 16);
+
+ if (status == MPI_EVENT_SAS_SMP_FUNCTION_RESULT_VALID)
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS SMP Error: port=%d result=0x%02x",
+ port_num, result);
+ else if (status == MPI_EVENT_SAS_SMP_CRC_ERROR)
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS SMP Error: port=%d : CRC Error",
+ port_num);
+ else if (status == MPI_EVENT_SAS_SMP_TIMEOUT)
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS SMP Error: port=%d : Timeout",
+ port_num);
+ else if (status == MPI_EVENT_SAS_SMP_NO_DESTINATION)
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS SMP Error: port=%d : No Destination",
+ port_num);
+ else if (status == MPI_EVENT_SAS_SMP_BAD_DESTINATION)
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS SMP Error: port=%d : Bad Destination",
+ port_num);
+ else
+ snprintf(evStr, EVENT_DESCR_STR_SZ,
+ "SAS SMP Error: port=%d : status=0x%02x",
+ port_num, status);
+ break;
+ }
+
+ case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+ {
+ u8 reason = (u8)(evData0);
+
+ switch (reason) {
+ case MPI_EVENT_SAS_EXP_RC_ADDED:
+ ds = "Expander Status Change: Added";
+ break;
+ case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
+ ds = "Expander Status Change: Deleted";
+ break;
+ default:
+ ds = "Expander Status Change";
+ break;
+ }
+ break;
+ }
+
+ /*
+ * MPT base "custom" events may be added here...
+ */
+ default:
+ ds = "Unknown";
+ break;
+ }
+ if (ds)
+ strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
+
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "MPT event:(%02Xh) : %s\n",
+ ioc->name, event, evStr));
+
+ devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
+ ": Event data:\n"));
+ for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
+ devtverboseprintk(ioc, printk(" %08x",
+ le32_to_cpu(pEventReply->Data[ii])));
+ devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
+}
+#endif
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * ProcessEventNotification - Route EventNotificationReply to all event handlers
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @pEventReply: Pointer to EventNotification reply frame
+ * @evHandlers: Pointer to integer, number of event handlers
+ *
+ * Routes a received EventNotificationReply to all currently registered
+ * event handlers.
+ * Returns sum of event handlers return values.
+ */
+static int
+ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply, int *evHandlers)
+{
+ u16 evDataLen;
+ u32 evData0 = 0;
+ int ii;
+ u8 cb_idx;
+ int r = 0;
+ int handlers = 0;
+ u8 event;
+
+ /*
+ * Do platform normalization of values
+ */
+ event = le32_to_cpu(pEventReply->Event) & 0xFF;
+ evDataLen = le16_to_cpu(pEventReply->EventDataLength);
+ if (evDataLen) {
+ evData0 = le32_to_cpu(pEventReply->Data[0]);
+ }
+
+#ifdef CONFIG_FUSION_LOGGING
+ if (evDataLen)
+ mpt_display_event_info(ioc, pEventReply);
+#endif
+
+ /*
+ * Do general / base driver event processing
+ */
+ switch(event) {
+ case MPI_EVENT_EVENT_CHANGE: /* 0A */
+ if (evDataLen) {
+ u8 evState = evData0 & 0xFF;
+
+ /* CHECKME! What if evState unexpectedly says OFF (0)? */
+
+ /* Update EventState field in cached IocFacts */
+ if (ioc->facts.Function) {
+ ioc->facts.EventState = evState;
+ }
+ }
+ break;
+ case MPI_EVENT_INTEGRATED_RAID:
+ mptbase_raid_process_event_data(ioc,
+ (MpiEventDataRaid_t *)pEventReply->Data);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Should this event be logged? Events are written sequentially.
+ * When buffer is full, start again at the top.
+ */
+ if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
+ int idx;
+
+ idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
+
+ ioc->events[idx].event = event;
+ ioc->events[idx].eventContext = ioc->eventContext;
+
+ for (ii = 0; ii < 2; ii++) {
+ if (ii < evDataLen)
+ ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]);
+ else
+ ioc->events[idx].data[ii] = 0;
+ }
+
+ ioc->eventContext++;
+ }
+
+
+ /*
+ * Call each currently registered protocol event handler.
+ */
+ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+ if (MptEvHandlers[cb_idx]) {
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Routing Event to event handler #%d\n",
+ ioc->name, cb_idx));
+ r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
+ handlers++;
+ }
+ }
+ /* FIXME? Examine results here? */
+
+ /*
+ * If needed, send (a single) EventAck.
+ */
+ if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "EventAck required\n",ioc->name));
+ if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SendEventAck returned %d\n",
+ ioc->name, ii));
+ }
+ }
+
+ *evHandlers = handlers;
+ return r;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_fc_log_info - Log information returned from Fibre Channel IOC.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @log_info: U32 LogInfo reply word from the IOC
+ *
+ * Refer to lsi/mpi_log_fc.h.
+ */
+static void
+mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
+{
+ char *desc = "unknown";
+
+ switch (log_info & 0xFF000000) {
+ case MPI_IOCLOGINFO_FC_INIT_BASE:
+ desc = "FCP Initiator";
+ break;
+ case MPI_IOCLOGINFO_FC_TARGET_BASE:
+ desc = "FCP Target";
+ break;
+ case MPI_IOCLOGINFO_FC_LAN_BASE:
+ desc = "LAN";
+ break;
+ case MPI_IOCLOGINFO_FC_MSG_BASE:
+ desc = "MPI Message Layer";
+ break;
+ case MPI_IOCLOGINFO_FC_LINK_BASE:
+ desc = "FC Link";
+ break;
+ case MPI_IOCLOGINFO_FC_CTX_BASE:
+ desc = "Context Manager";
+ break;
+ case MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET:
+ desc = "Invalid Field Offset";
+ break;
+ case MPI_IOCLOGINFO_FC_STATE_CHANGE:
+ desc = "State Change Info";
+ break;
+ }
+
+ printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubClass={%s}, Value=(0x%06x)\n",
+ ioc->name, log_info, desc, (log_info & 0xFFFFFF));
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_spi_log_info - Log information returned from SCSI Parallel IOC.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @log_info: U32 LogInfo word from the IOC
+ *
+ * Refer to lsi/sp_log.h.
+ */
+static void
+mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
+{
+ u32 info = log_info & 0x00FF0000;
+ char *desc = "unknown";
+
+ switch (info) {
+ case 0x00010000:
+ desc = "bug! MID not found";
+ break;
+
+ case 0x00020000:
+ desc = "Parity Error";
+ break;
+
+ case 0x00030000:
+ desc = "ASYNC Outbound Overrun";
+ break;
+
+ case 0x00040000:
+ desc = "SYNC Offset Error";
+ break;
+
+ case 0x00050000:
+ desc = "BM Change";
+ break;
+
+ case 0x00060000:
+ desc = "Msg In Overflow";
+ break;
+
+ case 0x00070000:
+ desc = "DMA Error";
+ break;
+
+ case 0x00080000:
+ desc = "Outbound DMA Overrun";
+ break;
+
+ case 0x00090000:
+ desc = "Task Management";
+ break;
+
+ case 0x000A0000:
+ desc = "Device Problem";
+ break;
+
+ case 0x000B0000:
+ desc = "Invalid Phase Change";
+ break;
+
+ case 0x000C0000:
+ desc = "Untagged Table Size";
+ break;
+
+ }
+
+ printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
+}
+
+/* strings for sas loginfo */
+ static char *originator_str[] = {
+ "IOP", /* 00h */
+ "PL", /* 01h */
+ "IR" /* 02h */
+ };
+ static char *iop_code_str[] = {
+ NULL, /* 00h */
+ "Invalid SAS Address", /* 01h */
+ NULL, /* 02h */
+ "Invalid Page", /* 03h */
+ "Diag Message Error", /* 04h */
+ "Task Terminated", /* 05h */
+ "Enclosure Management", /* 06h */
+ "Target Mode" /* 07h */
+ };
+ static char *pl_code_str[] = {
+ NULL, /* 00h */
+ "Open Failure", /* 01h */
+ "Invalid Scatter Gather List", /* 02h */
+ "Wrong Relative Offset or Frame Length", /* 03h */
+ "Frame Transfer Error", /* 04h */
+ "Transmit Frame Connected Low", /* 05h */
+ "SATA Non-NCQ RW Error Bit Set", /* 06h */
+ "SATA Read Log Receive Data Error", /* 07h */
+ "SATA NCQ Fail All Commands After Error", /* 08h */
+ "SATA Error in Receive Set Device Bit FIS", /* 09h */
+ "Receive Frame Invalid Message", /* 0Ah */
+ "Receive Context Message Valid Error", /* 0Bh */
+ "Receive Frame Current Frame Error", /* 0Ch */
+ "SATA Link Down", /* 0Dh */
+ "Discovery SATA Init W IOS", /* 0Eh */
+ "Config Invalid Page", /* 0Fh */
+ "Discovery SATA Init Timeout", /* 10h */
+ "Reset", /* 11h */
+ "Abort", /* 12h */
+ "IO Not Yet Executed", /* 13h */
+ "IO Executed", /* 14h */
+ "Persistent Reservation Out Not Affiliation "
+ "Owner", /* 15h */
+ "Open Transmit DMA Abort", /* 16h */
+ "IO Device Missing Delay Retry", /* 17h */
+ "IO Cancelled Due to Receive Error", /* 18h */
+ NULL, /* 19h */
+ NULL, /* 1Ah */
+ NULL, /* 1Bh */
+ NULL, /* 1Ch */
+ NULL, /* 1Dh */
+ NULL, /* 1Eh */
+ NULL, /* 1Fh */
+ "Enclosure Management" /* 20h */
+ };
+ static char *ir_code_str[] = {
+ "Raid Action Error", /* 00h */
+ NULL, /* 00h */
+ NULL, /* 01h */
+ NULL, /* 02h */
+ NULL, /* 03h */
+ NULL, /* 04h */
+ NULL, /* 05h */
+ NULL, /* 06h */
+ NULL /* 07h */
+ };
+ static char *raid_sub_code_str[] = {
+ NULL, /* 00h */
+ "Volume Creation Failed: Data Passed too "
+ "Large", /* 01h */
+ "Volume Creation Failed: Duplicate Volumes "
+ "Attempted", /* 02h */
+ "Volume Creation Failed: Max Number "
+ "Supported Volumes Exceeded", /* 03h */
+ "Volume Creation Failed: DMA Error", /* 04h */
+ "Volume Creation Failed: Invalid Volume Type", /* 05h */
+ "Volume Creation Failed: Error Reading "
+ "MFG Page 4", /* 06h */
+ "Volume Creation Failed: Creating Internal "
+ "Structures", /* 07h */
+ NULL, /* 08h */
+ NULL, /* 09h */
+ NULL, /* 0Ah */
+ NULL, /* 0Bh */
+ NULL, /* 0Ch */
+ NULL, /* 0Dh */
+ NULL, /* 0Eh */
+ NULL, /* 0Fh */
+ "Activation failed: Already Active Volume", /* 10h */
+ "Activation failed: Unsupported Volume Type", /* 11h */
+ "Activation failed: Too Many Active Volumes", /* 12h */
+ "Activation failed: Volume ID in Use", /* 13h */
+ "Activation failed: Reported Failure", /* 14h */
+ "Activation failed: Importing a Volume", /* 15h */
+ NULL, /* 16h */
+ NULL, /* 17h */
+ NULL, /* 18h */
+ NULL, /* 19h */
+ NULL, /* 1Ah */
+ NULL, /* 1Bh */
+ NULL, /* 1Ch */
+ NULL, /* 1Dh */
+ NULL, /* 1Eh */
+ NULL, /* 1Fh */
+ "Phys Disk failed: Too Many Phys Disks", /* 20h */
+ "Phys Disk failed: Data Passed too Large", /* 21h */
+ "Phys Disk failed: DMA Error", /* 22h */
+ "Phys Disk failed: Invalid <channel:id>", /* 23h */
+ "Phys Disk failed: Creating Phys Disk Config "
+ "Page", /* 24h */
+ NULL, /* 25h */
+ NULL, /* 26h */
+ NULL, /* 27h */
+ NULL, /* 28h */
+ NULL, /* 29h */
+ NULL, /* 2Ah */
+ NULL, /* 2Bh */
+ NULL, /* 2Ch */
+ NULL, /* 2Dh */
+ NULL, /* 2Eh */
+ NULL, /* 2Fh */
+ "Compatibility Error: IR Disabled", /* 30h */
+ "Compatibility Error: Inquiry Command Failed", /* 31h */
+ "Compatibility Error: Device not Direct Access "
+ "Device ", /* 32h */
+ "Compatibility Error: Removable Device Found", /* 33h */
+ "Compatibility Error: Device SCSI Version not "
+ "2 or Higher", /* 34h */
+ "Compatibility Error: SATA Device, 48 BIT LBA "
+ "not Supported", /* 35h */
+ "Compatibility Error: Device doesn't have "
+ "512 Byte Block Sizes", /* 36h */
+ "Compatibility Error: Volume Type Check Failed", /* 37h */
+ "Compatibility Error: Volume Type is "
+ "Unsupported by FW", /* 38h */
+ "Compatibility Error: Disk Drive too Small for "
+ "use in Volume", /* 39h */
+ "Compatibility Error: Phys Disk for Create "
+ "Volume not Found", /* 3Ah */
+ "Compatibility Error: Too Many or too Few "
+ "Disks for Volume Type", /* 3Bh */
+ "Compatibility Error: Disk stripe Sizes "
+ "Must be 64KB", /* 3Ch */
+ "Compatibility Error: IME Size Limited to < 2TB", /* 3Dh */
+ };
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_sas_log_info - Log information returned from SAS IOC.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @log_info: U32 LogInfo reply word from the IOC
+ * @cb_idx: callback function's handle
+ *
+ * Refer to lsi/mpi_log_sas.h.
+ **/
+static void
+mpt_sas_log_info(MPT_ADAPTER *ioc, u32 log_info, u8 cb_idx)
+{
+union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ }dw;
+};
+ union loginfo_type sas_loginfo;
+ char *originator_desc = NULL;
+ char *code_desc = NULL;
+ char *sub_code_desc = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if ((sas_loginfo.dw.bus_type != 3 /*SAS*/) &&
+ (sas_loginfo.dw.originator < ARRAY_SIZE(originator_str)))
+ return;
+
+ originator_desc = originator_str[sas_loginfo.dw.originator];
+
+ switch (sas_loginfo.dw.originator) {
+
+ case 0: /* IOP */
+ if (sas_loginfo.dw.code <
+ ARRAY_SIZE(iop_code_str))
+ code_desc = iop_code_str[sas_loginfo.dw.code];
+ break;
+ case 1: /* PL */
+ if (sas_loginfo.dw.code <
+ ARRAY_SIZE(pl_code_str))
+ code_desc = pl_code_str[sas_loginfo.dw.code];
+ break;
+ case 2: /* IR */
+ if (sas_loginfo.dw.code >=
+ ARRAY_SIZE(ir_code_str))
+ break;
+ code_desc = ir_code_str[sas_loginfo.dw.code];
+ if (sas_loginfo.dw.subcode >=
+ ARRAY_SIZE(raid_sub_code_str))
+ break;
+ if (sas_loginfo.dw.code == 0)
+ sub_code_desc =
+ raid_sub_code_str[sas_loginfo.dw.subcode];
+ break;
+ default:
+ return;
+ }
+
+ if (sub_code_desc != NULL)
+ printk(MYIOC_s_INFO_FMT
+ "LogInfo(0x%08x): Originator={%s}, Code={%s},"
+ " SubCode={%s} cb_idx %s\n",
+ ioc->name, log_info, originator_desc, code_desc,
+ sub_code_desc, MptCallbacksName[cb_idx]);
+ else if (code_desc != NULL)
+ printk(MYIOC_s_INFO_FMT
+ "LogInfo(0x%08x): Originator={%s}, Code={%s},"
+ " SubCode(0x%04x) cb_idx %s\n",
+ ioc->name, log_info, originator_desc, code_desc,
+ sas_loginfo.dw.subcode, MptCallbacksName[cb_idx]);
+ else
+ printk(MYIOC_s_INFO_FMT
+ "LogInfo(0x%08x): Originator={%s}, Code=(0x%02x),"
+ " SubCode(0x%04x) cb_idx %s\n",
+ ioc->name, log_info, originator_desc,
+ sas_loginfo.dw.code, sas_loginfo.dw.subcode,
+ MptCallbacksName[cb_idx]);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mpt_iocstatus_info_config - IOCSTATUS information for config pages
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @ioc_status: U32 IOCStatus word from IOC
+ * @mf: Pointer to MPT request frame
+ *
+ * Refer to lsi/mpi.h.
+ **/
+static void
+mpt_iocstatus_info_config(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
+{
+ Config_t *pReq = (Config_t *)mf;
+ char extend_desc[EVENT_DESCR_STR_SZ];
+ char *desc = NULL;
+ u32 form;
+ u8 page_type;
+
+ if (pReq->Header.PageType == MPI_CONFIG_PAGETYPE_EXTENDED)
+ page_type = pReq->ExtPageType;
+ else
+ page_type = pReq->Header.PageType;
+
+ /*
+ * ignore invalid page messages for GET_NEXT_HANDLE
+ */
+ form = le32_to_cpu(pReq->PageAddress);
+ if (ioc_status == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ if (page_type == MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE ||
+ page_type == MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER ||
+ page_type == MPI_CONFIG_EXTPAGETYPE_ENCLOSURE) {
+ if ((form >> MPI_SAS_DEVICE_PGAD_FORM_SHIFT) ==
+ MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE)
+ return;
+ }
+ if (page_type == MPI_CONFIG_PAGETYPE_FC_DEVICE)
+ if ((form & MPI_FC_DEVICE_PGAD_FORM_MASK) ==
+ MPI_FC_DEVICE_PGAD_FORM_NEXT_DID)
+ return;
+ }
+
+ snprintf(extend_desc, EVENT_DESCR_STR_SZ,
+ "type=%02Xh, page=%02Xh, action=%02Xh, form=%08Xh",
+ page_type, pReq->Header.PageNumber, pReq->Action, form);
+
+ switch (ioc_status) {
+
+ case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
+ desc = "Config Page Invalid Action";
+ break;
+
+ case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */
+ desc = "Config Page Invalid Type";
+ break;
+
+ case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */
+ desc = "Config Page Invalid Page";
+ break;
+
+ case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */
+ desc = "Config Page Invalid Data";
+ break;
+
+ case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */
+ desc = "Config Page No Defaults";
+ break;
+
+ case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */
+ desc = "Config Page Can't Commit";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s: %s\n",
+ ioc->name, ioc_status, desc, extend_desc));
+}
+
+/**
+ * mpt_iocstatus_info - IOCSTATUS information returned from IOC.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @ioc_status: U32 IOCStatus word from IOC
+ * @mf: Pointer to MPT request frame
+ *
+ * Refer to lsi/mpi.h.
+ **/
+static void
+mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
+{
+ u32 status = ioc_status & MPI_IOCSTATUS_MASK;
+ char *desc = NULL;
+
+ switch (status) {
+
+/****************************************************************************/
+/* Common IOCStatus values for all replies */
+/****************************************************************************/
+
+ case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
+ desc = "Invalid Function";
+ break;
+
+ case MPI_IOCSTATUS_BUSY: /* 0x0002 */
+ desc = "Busy";
+ break;
+
+ case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
+ desc = "Invalid SGL";
+ break;
+
+ case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
+ desc = "Internal Error";
+ break;
+
+ case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
+ desc = "Reserved";
+ break;
+
+ case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
+ desc = "Insufficient Resources";
+ break;
+
+ case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
+ desc = "Invalid Field";
+ break;
+
+ case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
+ desc = "Invalid State";
+ break;
+
+/****************************************************************************/
+/* Config IOCStatus values */
+/****************************************************************************/
+
+ case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
+ case MPI_IOCSTATUS_CONFIG_INVALID_TYPE: /* 0x0021 */
+ case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: /* 0x0022 */
+ case MPI_IOCSTATUS_CONFIG_INVALID_DATA: /* 0x0023 */
+ case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS: /* 0x0024 */
+ case MPI_IOCSTATUS_CONFIG_CANT_COMMIT: /* 0x0025 */
+ mpt_iocstatus_info_config(ioc, status, mf);
+ break;
+
+/****************************************************************************/
+/* SCSIIO Reply (SPI, FCP, SAS) initiator values */
+/* */
+/* Look at mptscsih_iocstatus_info_scsiio in mptscsih.c */
+/* */
+/****************************************************************************/
+
+ case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
+ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
+ case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
+ case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
+ case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
+ case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
+ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
+ case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
+ case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
+ case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
+ case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
+ case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
+ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
+ break;
+
+/****************************************************************************/
+/* SCSI Target values */
+/****************************************************************************/
+
+ case MPI_IOCSTATUS_TARGET_PRIORITY_IO: /* 0x0060 */
+ desc = "Target: Priority IO";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_INVALID_PORT: /* 0x0061 */
+ desc = "Target: Invalid Port";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX: /* 0x0062 */
+ desc = "Target Invalid IO Index:";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_ABORTED: /* 0x0063 */
+ desc = "Target: Aborted";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: /* 0x0064 */
+ desc = "Target: No Conn Retryable";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_NO_CONNECTION: /* 0x0065 */
+ desc = "Target: No Connection";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: /* 0x006A */
+ desc = "Target: Transfer Count Mismatch";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT: /* 0x006B */
+ desc = "Target: STS Data not Sent";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: /* 0x006D */
+ desc = "Target: Data Offset Error";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: /* 0x006E */
+ desc = "Target: Too Much Write Data";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_IU_TOO_SHORT: /* 0x006F */
+ desc = "Target: IU Too Short";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: /* 0x0070 */
+ desc = "Target: ACK NAK Timeout";
+ break;
+
+ case MPI_IOCSTATUS_TARGET_NAK_RECEIVED: /* 0x0071 */
+ desc = "Target: Nak Received";
+ break;
+
+/****************************************************************************/
+/* Fibre Channel Direct Access values */
+/****************************************************************************/
+
+ case MPI_IOCSTATUS_FC_ABORTED: /* 0x0066 */
+ desc = "FC: Aborted";
+ break;
+
+ case MPI_IOCSTATUS_FC_RX_ID_INVALID: /* 0x0067 */
+ desc = "FC: RX ID Invalid";
+ break;
+
+ case MPI_IOCSTATUS_FC_DID_INVALID: /* 0x0068 */
+ desc = "FC: DID Invalid";
+ break;
+
+ case MPI_IOCSTATUS_FC_NODE_LOGGED_OUT: /* 0x0069 */
+ desc = "FC: Node Logged Out";
+ break;
+
+ case MPI_IOCSTATUS_FC_EXCHANGE_CANCELED: /* 0x006C */
+ desc = "FC: Exchange Canceled";
+ break;
+
+/****************************************************************************/
+/* LAN values */
+/****************************************************************************/
+
+ case MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND: /* 0x0080 */
+ desc = "LAN: Device not Found";
+ break;
+
+ case MPI_IOCSTATUS_LAN_DEVICE_FAILURE: /* 0x0081 */
+ desc = "LAN: Device Failure";
+ break;
+
+ case MPI_IOCSTATUS_LAN_TRANSMIT_ERROR: /* 0x0082 */
+ desc = "LAN: Transmit Error";
+ break;
+
+ case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED: /* 0x0083 */
+ desc = "LAN: Transmit Aborted";
+ break;
+
+ case MPI_IOCSTATUS_LAN_RECEIVE_ERROR: /* 0x0084 */
+ desc = "LAN: Receive Error";
+ break;
+
+ case MPI_IOCSTATUS_LAN_RECEIVE_ABORTED: /* 0x0085 */
+ desc = "LAN: Receive Aborted";
+ break;
+
+ case MPI_IOCSTATUS_LAN_PARTIAL_PACKET: /* 0x0086 */
+ desc = "LAN: Partial Packet";
+ break;
+
+ case MPI_IOCSTATUS_LAN_CANCELED: /* 0x0087 */
+ desc = "LAN: Canceled";
+ break;
+
+/****************************************************************************/
+/* Serial Attached SCSI values */
+/****************************************************************************/
+
+ case MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED: /* 0x0090 */
+ desc = "SAS: SMP Request Failed";
+ break;
+
+ case MPI_IOCSTATUS_SAS_SMP_DATA_OVERRUN: /* 0x0090 */
+ desc = "SAS: SMP Data Overrun";
+ break;
+
+ default:
+ desc = "Others";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOCStatus(0x%04X): %s\n",
+ ioc->name, status, desc));
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+EXPORT_SYMBOL(mpt_attach);
+EXPORT_SYMBOL(mpt_detach);
+#ifdef CONFIG_PM
+EXPORT_SYMBOL(mpt_resume);
+EXPORT_SYMBOL(mpt_suspend);
+#endif
+EXPORT_SYMBOL(ioc_list);
+EXPORT_SYMBOL(mpt_register);
+EXPORT_SYMBOL(mpt_deregister);
+EXPORT_SYMBOL(mpt_event_register);
+EXPORT_SYMBOL(mpt_event_deregister);
+EXPORT_SYMBOL(mpt_reset_register);
+EXPORT_SYMBOL(mpt_reset_deregister);
+EXPORT_SYMBOL(mpt_device_driver_register);
+EXPORT_SYMBOL(mpt_device_driver_deregister);
+EXPORT_SYMBOL(mpt_get_msg_frame);
+EXPORT_SYMBOL(mpt_put_msg_frame);
+EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
+EXPORT_SYMBOL(mpt_free_msg_frame);
+EXPORT_SYMBOL(mpt_send_handshake_request);
+EXPORT_SYMBOL(mpt_verify_adapter);
+EXPORT_SYMBOL(mpt_GetIocState);
+EXPORT_SYMBOL(mpt_print_ioc_summary);
+EXPORT_SYMBOL(mpt_HardResetHandler);
+EXPORT_SYMBOL(mpt_config);
+EXPORT_SYMBOL(mpt_findImVolumes);
+EXPORT_SYMBOL(mpt_alloc_fw_memory);
+EXPORT_SYMBOL(mpt_free_fw_memory);
+EXPORT_SYMBOL(mptbase_sas_persist_operation);
+EXPORT_SYMBOL(mpt_raid_phys_disk_pg0);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * fusion_init - Fusion MPT base driver initialization routine.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int __init
+fusion_init(void)
+{
+ u8 cb_idx;
+
+ show_mptmod_ver(my_NAME, my_VERSION);
+ printk(KERN_INFO COPYRIGHT "\n");
+
+ for (cb_idx = 0; cb_idx < MPT_MAX_PROTOCOL_DRIVERS; cb_idx++) {
+ MptCallbacks[cb_idx] = NULL;
+ MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
+ MptEvHandlers[cb_idx] = NULL;
+ MptResetHandlers[cb_idx] = NULL;
+ }
+
+ /* Register ourselves (mptbase) in order to facilitate
+ * EventNotification handling.
+ */
+ mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER,
+ "mptbase_reply");
+
+ /* Register for hard reset handling callbacks.
+ */
+ mpt_reset_register(mpt_base_index, mpt_ioc_reset);
+
+#ifdef CONFIG_PROC_FS
+ (void) procmpt_create();
+#endif
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * fusion_exit - Perform driver unload cleanup.
+ *
+ * This routine frees all resources associated with each MPT adapter
+ * and removes all %MPT_PROCFS_MPTBASEDIR entries.
+ */
+static void __exit
+fusion_exit(void)
+{
+
+ mpt_reset_deregister(mpt_base_index);
+
+#ifdef CONFIG_PROC_FS
+ procmpt_destroy();
+#endif
+}
+
+module_init(fusion_init);
+module_exit(fusion_exit);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
new file mode 100644
index 00000000..fe902338
--- /dev/null
+++ b/drivers/message/fusion/mptbase.h
@@ -0,0 +1,995 @@
+/*
+ * linux/drivers/message/fusion/mptbase.h
+ * High performance SCSI + LAN / Fibre Channel device drivers.
+ * For use with PCI chip/adapter(s):
+ * LSIFC9xx/LSI409xx Fibre Channel
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef MPTBASE_H_INCLUDED
+#define MPTBASE_H_INCLUDED
+/*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/mutex.h>
+
+#include "lsi/mpi_type.h"
+#include "lsi/mpi.h" /* Fusion MPI(nterface) basic defs */
+#include "lsi/mpi_ioc.h" /* Fusion MPT IOC(ontroller) defs */
+#include "lsi/mpi_cnfg.h" /* IOC configuration support */
+#include "lsi/mpi_init.h" /* SCSI Host (initiator) protocol support */
+#include "lsi/mpi_lan.h" /* LAN over FC protocol support */
+#include "lsi/mpi_raid.h" /* Integrated Mirroring support */
+
+#include "lsi/mpi_fc.h" /* Fibre Channel (lowlevel) support */
+#include "lsi/mpi_targ.h" /* SCSI/FCP Target protcol support */
+#include "lsi/mpi_tool.h" /* Tools support */
+#include "lsi/mpi_sas.h" /* SAS support */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#ifndef MODULEAUTHOR
+#define MODULEAUTHOR "LSI Corporation"
+#endif
+
+#ifndef COPYRIGHT
+#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
+#endif
+
+#define MPT_LINUX_VERSION_COMMON "3.04.19"
+#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.19"
+#define WHAT_MAGIC_STRING "@" "(" "#" ")"
+
+#define show_mptmod_ver(s,ver) \
+ printk(KERN_INFO "%s %s\n", s, ver);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Fusion MPT(linux) driver configurable stuff...
+ */
+#define MPT_MAX_ADAPTERS 18
+#define MPT_MAX_PROTOCOL_DRIVERS 16
+#define MPT_MAX_BUS 1 /* Do not change */
+#define MPT_MAX_FC_DEVICES 255
+#define MPT_MAX_SCSI_DEVICES 16
+#define MPT_LAST_LUN 255
+#define MPT_SENSE_BUFFER_ALLOC 64
+ /* allow for 256 max sense alloc, but only 255 max request */
+#if MPT_SENSE_BUFFER_ALLOC >= 256
+# undef MPT_SENSE_BUFFER_ALLOC
+# define MPT_SENSE_BUFFER_ALLOC 256
+# define MPT_SENSE_BUFFER_SIZE 255
+#else
+# define MPT_SENSE_BUFFER_SIZE MPT_SENSE_BUFFER_ALLOC
+#endif
+
+#define MPT_NAME_LENGTH 32
+#define MPT_KOBJ_NAME_LEN 20
+
+#define MPT_PROCFS_MPTBASEDIR "mpt"
+ /* chg it to "driver/fusion" ? */
+#define MPT_PROCFS_SUMMARY_ALL_NODE MPT_PROCFS_MPTBASEDIR "/summary"
+#define MPT_PROCFS_SUMMARY_ALL_PATHNAME "/proc/" MPT_PROCFS_SUMMARY_ALL_NODE
+#define MPT_FW_REV_MAGIC_ID_STRING "FwRev="
+
+#define MPT_MAX_REQ_DEPTH 1023
+#define MPT_DEFAULT_REQ_DEPTH 256
+#define MPT_MIN_REQ_DEPTH 128
+
+#define MPT_MAX_REPLY_DEPTH MPT_MAX_REQ_DEPTH
+#define MPT_DEFAULT_REPLY_DEPTH 128
+#define MPT_MIN_REPLY_DEPTH 8
+#define MPT_MAX_REPLIES_PER_ISR 32
+
+#define MPT_MAX_FRAME_SIZE 128
+#define MPT_DEFAULT_FRAME_SIZE 128
+
+#define MPT_REPLY_FRAME_SIZE 0x50 /* Must be a multiple of 8 */
+
+#define MPT_SG_REQ_128_SCALE 1
+#define MPT_SG_REQ_96_SCALE 2
+#define MPT_SG_REQ_64_SCALE 4
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+#define MPT_COALESCING_TIMEOUT 0x10
+
+
+/*
+ * SCSI transfer rate defines.
+ */
+#define MPT_ULTRA320 0x08
+#define MPT_ULTRA160 0x09
+#define MPT_ULTRA2 0x0A
+#define MPT_ULTRA 0x0C
+#define MPT_FAST 0x19
+#define MPT_SCSI 0x32
+#define MPT_ASYNC 0xFF
+
+#define MPT_NARROW 0
+#define MPT_WIDE 1
+
+#define C0_1030 0x08
+#define XL_929 0x01
+
+
+/*
+ * Try to keep these at 2^N-1
+ */
+#define MPT_FC_CAN_QUEUE 1024
+#define MPT_SCSI_CAN_QUEUE 127
+#define MPT_SAS_CAN_QUEUE 127
+
+/*
+ * Set the MAX_SGE value based on user input.
+ */
+#ifdef CONFIG_FUSION_MAX_SGE
+#if CONFIG_FUSION_MAX_SGE < 16
+#define MPT_SCSI_SG_DEPTH 16
+#elif CONFIG_FUSION_MAX_SGE > 128
+#define MPT_SCSI_SG_DEPTH 128
+#else
+#define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE
+#endif
+#else
+#define MPT_SCSI_SG_DEPTH 40
+#endif
+
+#ifdef CONFIG_FUSION_MAX_FC_SGE
+#if CONFIG_FUSION_MAX_FC_SGE < 16
+#define MPT_SCSI_FC_SG_DEPTH 16
+#elif CONFIG_FUSION_MAX_FC_SGE > 256
+#define MPT_SCSI_FC_SG_DEPTH 256
+#else
+#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE
+#endif
+#else
+#define MPT_SCSI_FC_SG_DEPTH 40
+#endif
+
+/* debug print string length used for events and iocstatus */
+# define EVENT_DESCR_STR_SZ 100
+
+#define MPT_POLLING_INTERVAL 1000 /* in milliseconds */
+
+#ifdef __KERNEL__ /* { */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/proc_fs.h>
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Attempt semi-consistent error & warning msgs across
+ * MPT drivers. NOTE: Users of these macro defs must
+ * themselves define their own MYNAM.
+ */
+#define MYIOC_s_FMT MYNAM ": %s: "
+#define MYIOC_s_DEBUG_FMT KERN_DEBUG MYNAM ": %s: "
+#define MYIOC_s_INFO_FMT KERN_INFO MYNAM ": %s: "
+#define MYIOC_s_NOTE_FMT KERN_NOTICE MYNAM ": %s: "
+#define MYIOC_s_WARN_FMT KERN_WARNING MYNAM ": %s: WARNING - "
+#define MYIOC_s_ERR_FMT KERN_ERR MYNAM ": %s: ERROR - "
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * ATTO UL4D associated structures and defines
+ */
+#define ATTOFLAG_DISC 0x0001
+#define ATTOFLAG_TAGGED 0x0002
+#define ATTOFLAG_WIDE_ENB 0x0008
+#define ATTOFLAG_ID_ENB 0x0010
+#define ATTOFLAG_LUN_ENB 0x0060
+
+typedef struct _ATTO_DEVICE_INFO
+{
+ u8 Offset; /* 00h */
+ u8 Period; /* 01h */
+ u16 ATTOFlags; /* 02h */
+} ATTO_DEVICE_INFO, MPI_POINTER PTR_ATTO_DEVICE_INFO,
+ ATTODeviceInfo_t, MPI_POINTER pATTODeviceInfo_t;
+
+typedef struct _ATTO_CONFIG_PAGE_SCSI_PORT_2
+{
+ CONFIG_PAGE_HEADER Header; /* 00h */
+ u16 PortFlags; /* 04h */
+ u16 Unused1; /* 06h */
+ u32 Unused2; /* 08h */
+ ATTO_DEVICE_INFO DeviceSettings[16]; /* 0Ch */
+} fATTO_CONFIG_PAGE_SCSI_PORT_2, MPI_POINTER PTR_ATTO_CONFIG_PAGE_SCSI_PORT_2,
+ ATTO_SCSIPortPage2_t, MPI_POINTER pATTO_SCSIPortPage2_t;
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * MPT protocol driver defs...
+ */
+typedef enum {
+ MPTBASE_DRIVER, /* MPT base class */
+ MPTCTL_DRIVER, /* MPT ioctl class */
+ MPTSPI_DRIVER, /* MPT SPI host class */
+ MPTFC_DRIVER, /* MPT FC host class */
+ MPTSAS_DRIVER, /* MPT SAS host class */
+ MPTLAN_DRIVER, /* MPT LAN class */
+ MPTSTM_DRIVER, /* MPT SCSI target mode class */
+ MPTUNKNOWN_DRIVER
+} MPT_DRIVER_CLASS;
+
+struct mpt_pci_driver{
+ int (*probe) (struct pci_dev *dev, const struct pci_device_id *id);
+ void (*remove) (struct pci_dev *dev);
+};
+
+/*
+ * MPT adapter / port / bus / device info structures...
+ */
+
+typedef union _MPT_FRAME_TRACKER {
+ struct {
+ struct list_head list;
+ u32 arg1;
+ u32 pad;
+ void *argp1;
+ } linkage;
+ /*
+ * NOTE: When request frames are free, on the linkage structure
+ * contets are valid. All other values are invalid.
+ * In particular, do NOT reply on offset [2]
+ * (in words) being the * message context.
+ * The message context must be reset (computed via base address
+ * + an offset) prior to issuing any command.
+ *
+ * NOTE2: On non-32-bit systems, where pointers are LARGE,
+ * using the linkage pointers destroys our sacred MsgContext
+ * field contents. But we don't care anymore because these
+ * are now reset in mpt_put_msg_frame() just prior to sending
+ * a request off to the IOC.
+ */
+ struct {
+ u32 __hdr[2];
+ /*
+ * The following _MUST_ match the location of the
+ * MsgContext field in the MPT message headers.
+ */
+ union {
+ u32 MsgContext;
+ struct {
+ u16 req_idx; /* Request index */
+ u8 cb_idx; /* callback function index */
+ u8 rsvd;
+ } fld;
+ } msgctxu;
+ } hwhdr;
+ /*
+ * Remark: 32 bit identifier:
+ * 31-24: reserved
+ * 23-16: call back index
+ * 15-0 : request index
+ */
+} MPT_FRAME_TRACKER;
+
+/*
+ * We might want to view/access a frame as:
+ * 1) generic request header
+ * 2) SCSIIORequest
+ * 3) SCSIIOReply
+ * 4) MPIDefaultReply
+ * 5) frame tracker
+ */
+typedef struct _MPT_FRAME_HDR {
+ union {
+ MPIHeader_t hdr;
+ SCSIIORequest_t scsireq;
+ SCSIIOReply_t sreply;
+ ConfigReply_t configreply;
+ MPIDefaultReply_t reply;
+ MPT_FRAME_TRACKER frame;
+ } u;
+} MPT_FRAME_HDR;
+
+#define MPT_REQ_MSGFLAGS_DROPME 0x80
+
+typedef struct _MPT_SGL_HDR {
+ SGESimple32_t sge[1];
+} MPT_SGL_HDR;
+
+typedef struct _MPT_SGL64_HDR {
+ SGESimple64_t sge[1];
+} MPT_SGL64_HDR;
+
+/*
+ * System interface register set
+ */
+
+typedef struct _SYSIF_REGS
+{
+ u32 Doorbell; /* 00 System<->IOC Doorbell reg */
+ u32 WriteSequence; /* 04 Write Sequence register */
+ u32 Diagnostic; /* 08 Diagnostic register */
+ u32 TestBase; /* 0C Test Base Address */
+ u32 DiagRwData; /* 10 Read Write Data (fw download) */
+ u32 DiagRwAddress; /* 14 Read Write Address (fw download)*/
+ u32 Reserved1[6]; /* 18-2F reserved for future use */
+ u32 IntStatus; /* 30 Interrupt Status */
+ u32 IntMask; /* 34 Interrupt Mask */
+ u32 Reserved2[2]; /* 38-3F reserved for future use */
+ u32 RequestFifo; /* 40 Request Post/Free FIFO */
+ u32 ReplyFifo; /* 44 Reply Post/Free FIFO */
+ u32 RequestHiPriFifo; /* 48 Hi Priority Request FIFO */
+ u32 Reserved3; /* 4C-4F reserved for future use */
+ u32 HostIndex; /* 50 Host Index register */
+ u32 Reserved4[15]; /* 54-8F */
+ u32 Fubar; /* 90 For Fubar usage */
+ u32 Reserved5[1050];/* 94-10F8 */
+ u32 Reset_1078; /* 10FC Reset 1078 */
+} SYSIF_REGS;
+
+/*
+ * NOTE: Use MPI_{DOORBELL,WRITESEQ,DIAG}_xxx defs in lsi/mpi.h
+ * in conjunction with SYSIF_REGS accesses!
+ */
+
+
+/*
+ * Dynamic Multi-Pathing specific stuff...
+ */
+
+/* VirtTarget negoFlags field */
+#define MPT_TARGET_NO_NEGO_WIDE 0x01
+#define MPT_TARGET_NO_NEGO_SYNC 0x02
+#define MPT_TARGET_NO_NEGO_QAS 0x04
+#define MPT_TAPE_NEGO_IDP 0x08
+
+/*
+ * VirtDevice - FC LUN device or SCSI target device
+ */
+typedef struct _VirtTarget {
+ struct scsi_target *starget;
+ u8 tflags;
+ u8 ioc_id;
+ u8 id;
+ u8 channel;
+ u8 minSyncFactor; /* 0xFF is async */
+ u8 maxOffset; /* 0 if async */
+ u8 maxWidth; /* 0 if narrow, 1 if wide */
+ u8 negoFlags; /* bit field, see above */
+ u8 raidVolume; /* set, if RAID Volume */
+ u8 type; /* byte 0 of Inquiry data */
+ u8 deleted; /* target in process of being removed */
+ u8 inDMD; /* currently in the device
+ removal delay timer */
+ u32 num_luns;
+} VirtTarget;
+
+typedef struct _VirtDevice {
+ VirtTarget *vtarget;
+ u8 configured_lun;
+ int lun;
+} VirtDevice;
+
+/*
+ * Fibre Channel (SCSI) target device and associated defines...
+ */
+#define MPT_TARGET_DEFAULT_DV_STATUS 0x00
+#define MPT_TARGET_FLAGS_VALID_NEGO 0x01
+#define MPT_TARGET_FLAGS_VALID_INQUIRY 0x02
+#define MPT_TARGET_FLAGS_Q_YES 0x08
+#define MPT_TARGET_FLAGS_VALID_56 0x10
+#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20
+#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40
+#define MPT_TARGET_FLAGS_LED_ON 0x80
+
+/*
+ * IOCTL structure and associated defines
+ */
+
+#define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */
+
+#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */
+#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */
+#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */
+#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred
+ on the current*/
+#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */
+#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */
+#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from
+ complete routine */
+
+#define INITIALIZE_MGMT_STATUS(status) \
+ status = MPT_MGMT_STATUS_PENDING;
+#define CLEAR_MGMT_STATUS(status) \
+ status = 0;
+#define CLEAR_MGMT_PENDING_STATUS(status) \
+ status &= ~MPT_MGMT_STATUS_PENDING;
+#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
+ msg_context = value;
+
+typedef struct _MPT_MGMT {
+ struct mutex mutex;
+ struct completion done;
+ u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
+ u8 sense[MPT_SENSE_BUFFER_ALLOC];
+ u8 status; /* current command status */
+ int completion_code;
+ u32 msg_context;
+} MPT_MGMT;
+
+/*
+ * Event Structure and define
+ */
+#define MPTCTL_EVENT_LOG_SIZE (0x000000032)
+typedef struct _mpt_ioctl_events {
+ u32 event; /* Specified by define above */
+ u32 eventContext; /* Index or counter */
+ u32 data[2]; /* First 8 bytes of Event Data */
+} MPT_IOCTL_EVENTS;
+
+/*
+ * CONFIGPARM status defines
+ */
+#define MPT_CONFIG_GOOD MPI_IOCSTATUS_SUCCESS
+#define MPT_CONFIG_ERROR 0x002F
+
+/*
+ * Substructure to store SCSI specific configuration page data
+ */
+ /* dvStatus defines: */
+#define MPT_SCSICFG_USE_NVRAM 0x01 /* WriteSDP1 using NVRAM */
+#define MPT_SCSICFG_ALL_IDS 0x02 /* WriteSDP1 to all IDS */
+/* #define MPT_SCSICFG_BLK_NEGO 0x10 WriteSDP1 with WDTR and SDTR disabled */
+
+typedef struct _SpiCfgData {
+ u32 PortFlags;
+ int *nvram; /* table of device NVRAM values */
+ IOCPage4_t *pIocPg4; /* SEP devices addressing */
+ dma_addr_t IocPg4_dma; /* Phys Addr of IOCPage4 data */
+ int IocPg4Sz; /* IOCPage4 size */
+ u8 minSyncFactor; /* 0xFF if async */
+ u8 maxSyncOffset; /* 0 if async */
+ u8 maxBusWidth; /* 0 if narrow, 1 if wide */
+ u8 busType; /* SE, LVD, HD */
+ u8 sdp1version; /* SDP1 version */
+ u8 sdp1length; /* SDP1 length */
+ u8 sdp0version; /* SDP0 version */
+ u8 sdp0length; /* SDP0 length */
+ u8 dvScheduled; /* 1 if scheduled */
+ u8 noQas; /* Disable QAS for this adapter */
+ u8 Saf_Te; /* 1 to force all Processors as
+ * SAF-TE if Inquiry data length
+ * is too short to check for SAF-TE
+ */
+ u8 bus_reset; /* 1 to allow bus reset */
+ u8 rsvd[1];
+}SpiCfgData;
+
+typedef struct _SasCfgData {
+ u8 ptClear; /* 1 to automatically clear the
+ * persistent table.
+ * 0 to disable
+ * automatic clearing.
+ */
+}SasCfgData;
+
+/*
+ * Inactive volume link list of raid component data
+ * @inactive_list
+ */
+struct inactive_raid_component_info {
+ struct list_head list;
+ u8 volumeID; /* volume target id */
+ u8 volumeBus; /* volume channel */
+ IOC_3_PHYS_DISK d; /* phys disk info */
+};
+
+typedef struct _RaidCfgData {
+ IOCPage2_t *pIocPg2; /* table of Raid Volumes */
+ IOCPage3_t *pIocPg3; /* table of physical disks */
+ struct mutex inactive_list_mutex;
+ struct list_head inactive_list; /* link list for physical
+ disk that belong in
+ inactive volumes */
+}RaidCfgData;
+
+typedef struct _FcCfgData {
+ /* will ultimately hold fc_port_page0 also */
+ struct {
+ FCPortPage1_t *data;
+ dma_addr_t dma;
+ int pg_sz;
+ } fc_port_page1[2];
+} FcCfgData;
+
+#define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */
+#define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */
+
+/*
+ * data allocated for each fc rport device
+ */
+struct mptfc_rport_info
+{
+ struct list_head list;
+ struct fc_rport *rport;
+ struct scsi_target *starget;
+ FCDevicePage0_t pg0;
+ u8 flags;
+};
+
+typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
+typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
+ dma_addr_t dma_addr);
+typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc);
+
+/*
+ * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
+ */
+typedef struct _MPT_ADAPTER
+{
+ int id; /* Unique adapter id N {0,1,2,...} */
+ int pci_irq; /* This irq */
+ char name[MPT_NAME_LENGTH]; /* "iocN" */
+ char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */
+#ifdef CONFIG_FUSION_LOGGING
+ /* used in mpt_display_event_info */
+ char evStr[EVENT_DESCR_STR_SZ];
+#endif
+ char board_name[16];
+ char board_assembly[16];
+ char board_tracer[16];
+ u16 nvdata_version_persistent;
+ u16 nvdata_version_default;
+ int debug_level;
+ u8 io_missing_delay;
+ u16 device_missing_delay;
+ SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
+ SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
+ u8 bus_type;
+ u32 mem_phys; /* == f4020000 (mmap) */
+ u32 pio_mem_phys; /* Programmed IO (downloadboot) */
+ int mem_size; /* mmap memory size */
+ int number_of_buses;
+ int devices_per_bus;
+ int alloc_total;
+ u32 last_state;
+ int active;
+ u8 *alloc; /* frames alloc ptr */
+ dma_addr_t alloc_dma;
+ u32 alloc_sz;
+ MPT_FRAME_HDR *reply_frames; /* Reply msg frames - rounded up! */
+ u32 reply_frames_low_dma;
+ int reply_depth; /* Num Allocated reply frames */
+ int reply_sz; /* Reply frame size */
+ int num_chain; /* Number of chain buffers */
+ MPT_ADD_SGE add_sge; /* Pointer to add_sge
+ function */
+ MPT_ADD_CHAIN add_chain; /* Pointer to add_chain
+ function */
+ /* Pool of buffers for chaining. ReqToChain
+ * and ChainToChain track index of chain buffers.
+ * ChainBuffer (DMA) virt/phys addresses.
+ * FreeChainQ (lock) locking mechanisms.
+ */
+ int *ReqToChain;
+ int *RequestNB;
+ int *ChainToChain;
+ u8 *ChainBuffer;
+ dma_addr_t ChainBufferDMA;
+ struct list_head FreeChainQ;
+ spinlock_t FreeChainQlock;
+ /* We (host driver) get to manage our own RequestQueue! */
+ dma_addr_t req_frames_dma;
+ MPT_FRAME_HDR *req_frames; /* Request msg frames - rounded up! */
+ u32 req_frames_low_dma;
+ int req_depth; /* Number of request frames */
+ int req_sz; /* Request frame size (bytes) */
+ spinlock_t FreeQlock;
+ struct list_head FreeQ;
+ /* Pool of SCSI sense buffers for commands coming from
+ * the SCSI mid-layer. We have one 256 byte sense buffer
+ * for each REQ entry.
+ */
+ u8 *sense_buf_pool;
+ dma_addr_t sense_buf_pool_dma;
+ u32 sense_buf_low_dma;
+ u8 *HostPageBuffer; /* SAS - host page buffer support */
+ u32 HostPageBuffer_sz;
+ dma_addr_t HostPageBuffer_dma;
+ int mtrr_reg;
+ struct pci_dev *pcidev; /* struct pci_dev pointer */
+ int bars; /* bitmask of BAR's that must be configured */
+ int msi_enable;
+ u8 __iomem *memmap; /* mmap address */
+ struct Scsi_Host *sh; /* Scsi Host pointer */
+ SpiCfgData spi_data; /* Scsi config. data */
+ RaidCfgData raid_data; /* Raid config. data */
+ SasCfgData sas_data; /* Sas config. data */
+ FcCfgData fc_data; /* Fc config. data */
+ struct proc_dir_entry *ioc_dentry;
+ struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */
+ u32 biosVersion; /* BIOS version from IO Unit Page 2 */
+ int eventTypes; /* Event logging parameters */
+ int eventContext; /* Next event context */
+ int eventLogSize; /* Max number of cached events */
+ struct _mpt_ioctl_events *events; /* pointer to event log */
+ u8 *cached_fw; /* Pointer to FW */
+ dma_addr_t cached_fw_dma;
+ int hs_reply_idx;
+#ifndef MFCNT
+ u32 pad0;
+#else
+ u32 mfcnt;
+#endif
+ u32 NB_for_64_byte_frame;
+ u32 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
+ u16 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
+ IOCFactsReply_t facts;
+ PortFactsReply_t pfacts[2];
+ FCPortPage0_t fc_port_page0[2];
+ LANPage0_t lan_cnfg_page0;
+ LANPage1_t lan_cnfg_page1;
+
+ u8 ir_firmware; /* =1 if IR firmware detected */
+ /*
+ * Description: errata_flag_1064
+ * If a PCIX read occurs within 1 or 2 cycles after the chip receives
+ * a split completion for a read data, an internal address pointer incorrectly
+ * increments by 32 bytes
+ */
+ int errata_flag_1064;
+ int aen_event_read_flag; /* flag to indicate event log was read*/
+ u8 FirstWhoInit;
+ u8 upload_fw; /* If set, do a fw upload */
+ u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */
+ u8 pad1[4];
+ u8 DoneCtx;
+ u8 TaskCtx;
+ u8 InternalCtx;
+ struct list_head list;
+ struct net_device *netdev;
+ struct list_head sas_topology;
+ struct mutex sas_topology_mutex;
+
+ struct workqueue_struct *fw_event_q;
+ struct list_head fw_event_list;
+ spinlock_t fw_event_lock;
+ u8 fw_events_off; /* if '1', then ignore events */
+ char fw_event_q_name[MPT_KOBJ_NAME_LEN];
+
+ struct mutex sas_discovery_mutex;
+ u8 sas_discovery_runtime;
+ u8 sas_discovery_ignore_events;
+
+ /* port_info object for the host */
+ struct mptsas_portinfo *hba_port_info;
+ u64 hba_port_sas_addr;
+ u16 hba_port_num_phy;
+ struct list_head sas_device_info_list;
+ struct mutex sas_device_info_mutex;
+ u8 old_sas_discovery_protocal;
+ u8 sas_discovery_quiesce_io;
+ int sas_index; /* index refrencing */
+ MPT_MGMT sas_mgmt;
+ MPT_MGMT mptbase_cmds; /* for sending config pages */
+ MPT_MGMT internal_cmds;
+ MPT_MGMT taskmgmt_cmds;
+ MPT_MGMT ioctl_cmds;
+ spinlock_t taskmgmt_lock; /* diagnostic reset lock */
+ int taskmgmt_in_progress;
+ u8 taskmgmt_quiesce_io;
+ u8 ioc_reset_in_progress;
+ MPT_SCHEDULE_TARGET_RESET schedule_target_reset;
+ struct work_struct sas_persist_task;
+
+ struct work_struct fc_setup_reset_work;
+ struct list_head fc_rports;
+ struct work_struct fc_lsc_work;
+ u8 fc_link_speed[2];
+ spinlock_t fc_rescan_work_lock;
+ struct work_struct fc_rescan_work;
+ char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
+ struct workqueue_struct *fc_rescan_work_q;
+
+ /* driver forced bus resets count */
+ unsigned long hard_resets;
+ /* fw/external bus resets count */
+ unsigned long soft_resets;
+ /* cmd timeouts */
+ unsigned long timeouts;
+
+ struct scsi_cmnd **ScsiLookup;
+ spinlock_t scsi_lookup_lock;
+ u64 dma_mask;
+ u32 broadcast_aen_busy;
+ char reset_work_q_name[MPT_KOBJ_NAME_LEN];
+ struct workqueue_struct *reset_work_q;
+ struct delayed_work fault_reset_work;
+
+ u8 sg_addr_size;
+ u8 in_rescan;
+ u8 SGE_size;
+
+} MPT_ADAPTER;
+
+/*
+ * New return value convention:
+ * 1 = Ok to free associated request frame
+ * 0 = not Ok ...
+ */
+typedef int (*MPT_CALLBACK)(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
+typedef int (*MPT_EVHANDLER)(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply);
+typedef int (*MPT_RESETHANDLER)(MPT_ADAPTER *ioc, int reset_phase);
+/* reset_phase defs */
+#define MPT_IOC_PRE_RESET 0
+#define MPT_IOC_POST_RESET 1
+#define MPT_IOC_SETUP_RESET 2
+
+/*
+ * Invent MPT host event (super-set of MPI Events)
+ * Fitted to 1030's 64-byte [max] request frame size
+ */
+typedef struct _MPT_HOST_EVENT {
+ EventNotificationReply_t MpiEvent; /* 8 32-bit words! */
+ u32 pad[6];
+ void *next;
+} MPT_HOST_EVENT;
+
+#define MPT_HOSTEVENT_IOC_BRINGUP 0x91
+#define MPT_HOSTEVENT_IOC_RECOVER 0x92
+
+/* Define the generic types based on the size
+ * of the dma_addr_t type.
+ */
+typedef struct _mpt_sge {
+ u32 FlagsLength;
+ dma_addr_t Address;
+} MptSge_t;
+
+
+#define mpt_msg_flags(ioc) \
+ (ioc->sg_addr_size == sizeof(u64)) ? \
+ MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
+ MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
+
+#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
+ (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Funky (private) macros...
+ */
+#include "mptdebug.h"
+
+#define MPT_INDEX_2_MFPTR(ioc,idx) \
+ (MPT_FRAME_HDR*)( (u8*)(ioc)->req_frames + (ioc)->req_sz * (idx) )
+
+#define MFPTR_2_MPT_INDEX(ioc,mf) \
+ (int)( ((u8*)mf - (u8*)(ioc)->req_frames) / (ioc)->req_sz )
+
+#define MPT_INDEX_2_RFPTR(ioc,idx) \
+ (MPT_FRAME_HDR*)( (u8*)(ioc)->reply_frames + (ioc)->req_sz * (idx) )
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#define SCSI_STD_SENSE_BYTES 18
+#define SCSI_STD_INQUIRY_BYTES 36
+#define SCSI_MAX_INQUIRY_BYTES 96
+
+/*
+ * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers
+ * Private to the driver.
+ */
+/* LOCAL structure and fields used when processing
+ * internally generated commands. These include:
+ * bus scan, dv and config requests.
+ */
+typedef struct _MPT_LOCAL_REPLY {
+ ConfigPageHeader_t header;
+ int completion;
+ u8 sense[SCSI_STD_SENSE_BYTES];
+ u8 scsiStatus;
+ u8 skip;
+ u32 pad;
+} MPT_LOCAL_REPLY;
+
+#define MPT_HOST_BUS_UNKNOWN (0xFF)
+#define MPT_HOST_TOO_MANY_TM (0x05)
+#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
+#define MPT_HOST_NO_CHAIN (0xFFFFFFFF)
+#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF)
+#define MPT_NVRAM_SYNC_MASK (0x0000FF00)
+#define MPT_NVRAM_SYNC_SHIFT (8)
+#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000)
+#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000)
+#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000)
+#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000)
+#define MPT_NVRAM_WIDE_DISABLE (0x00100000)
+#define MPT_NVRAM_BOOT_CHOICE (0x00200000)
+
+/* The TM_STATE variable is used to provide strict single threading of TM
+ * requests as well as communicate TM error conditions.
+ */
+#define TM_STATE_NONE (0)
+#define TM_STATE_IN_PROGRESS (1)
+#define TM_STATE_ERROR (2)
+
+typedef enum {
+ FC,
+ SPI,
+ SAS
+} BUS_TYPE;
+
+typedef struct _MPT_SCSI_HOST {
+ MPT_ADAPTER *ioc;
+ ushort sel_timeout[MPT_MAX_FC_DEVICES];
+ char *info_kbuf;
+ long last_queue_full;
+ u16 spi_pending;
+ struct list_head target_reset_list;
+} MPT_SCSI_HOST;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * More Dynamic Multi-Pathing stuff...
+ */
+
+/* Forward decl, a strange C thing, to prevent gcc compiler warnings */
+struct scsi_cmnd;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Generic structure passed to the base mpt_config function.
+ */
+typedef struct _x_config_parms {
+ union {
+ ConfigExtendedPageHeader_t *ehdr;
+ ConfigPageHeader_t *hdr;
+ } cfghdr;
+ dma_addr_t physAddr;
+ u32 pageAddr; /* properly formatted */
+ u16 status;
+ u8 action;
+ u8 dir;
+ u8 timeout; /* seconds */
+} CONFIGPARMS;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Public entry points...
+ */
+extern int mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id);
+extern void mpt_detach(struct pci_dev *pdev);
+#ifdef CONFIG_PM
+extern int mpt_suspend(struct pci_dev *pdev, pm_message_t state);
+extern int mpt_resume(struct pci_dev *pdev);
+#endif
+extern u8 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass,
+ char *func_name);
+extern void mpt_deregister(u8 cb_idx);
+extern int mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc);
+extern void mpt_event_deregister(u8 cb_idx);
+extern int mpt_reset_register(u8 cb_idx, MPT_RESETHANDLER reset_func);
+extern void mpt_reset_deregister(u8 cb_idx);
+extern int mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, u8 cb_idx);
+extern void mpt_device_driver_deregister(u8 cb_idx);
+extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
+extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
+extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
+extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
+
+extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
+extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
+extern u32 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
+extern void mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
+extern int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+extern int mpt_Soft_Hard_ResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+extern int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
+extern int mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
+extern void mpt_free_fw_memory(MPT_ADAPTER *ioc);
+extern int mpt_findImVolumes(MPT_ADAPTER *ioc);
+extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
+extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
+extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ pRaidPhysDiskPage1_t phys_disk);
+extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
+ u8 phys_disk_num);
+extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
+extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
+extern void mpt_halt_firmware(MPT_ADAPTER *ioc);
+
+
+/*
+ * Public data decl's...
+ */
+extern struct list_head ioc_list;
+extern int mpt_fwfault_debug;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#endif /* } __KERNEL__ */
+
+#ifdef CONFIG_64BIT
+#define CAST_U32_TO_PTR(x) ((void *)(u64)x)
+#define CAST_PTR_TO_U32(x) ((u32)(u64)x)
+#else
+#define CAST_U32_TO_PTR(x) ((void *)x)
+#define CAST_PTR_TO_U32(x) ((u32)x)
+#endif
+
+#define MPT_PROTOCOL_FLAGS_c_c_c_c(pflags) \
+ ((pflags) & MPI_PORTFACTS_PROTOCOL_INITIATOR) ? 'I' : 'i', \
+ ((pflags) & MPI_PORTFACTS_PROTOCOL_TARGET) ? 'T' : 't', \
+ ((pflags) & MPI_PORTFACTS_PROTOCOL_LAN) ? 'L' : 'l', \
+ ((pflags) & MPI_PORTFACTS_PROTOCOL_LOGBUSADDR) ? 'B' : 'b'
+
+/*
+ * Shifted SGE Defines - Use in SGE with FlagsLength member.
+ * Otherwise, use MPI_xxx defines (refer to "lsi/mpi.h" header).
+ * Defaults: 32 bit SGE, SYSTEM_ADDRESS if direction bit is 0, read
+ */
+#define MPT_TRANSFER_IOC_TO_HOST (0x00000000)
+#define MPT_TRANSFER_HOST_TO_IOC (0x04000000)
+#define MPT_SGE_FLAGS_LAST_ELEMENT (0x80000000)
+#define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000)
+#define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000)
+#define MPT_SGE_FLAGS_DIRECTION (0x04000000)
+#define MPT_SGE_FLAGS_END_OF_LIST (0x01000000)
+
+#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000)
+#define MPT_SGE_FLAGS_SIMPLE_ELEMENT (0x10000000)
+#define MPT_SGE_FLAGS_CHAIN_ELEMENT (0x30000000)
+#define MPT_SGE_FLAGS_ELEMENT_MASK (0x30000000)
+
+#define MPT_SGE_FLAGS_SSIMPLE_READ \
+ (MPT_SGE_FLAGS_LAST_ELEMENT | \
+ MPT_SGE_FLAGS_END_OF_BUFFER | \
+ MPT_SGE_FLAGS_END_OF_LIST | \
+ MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPT_TRANSFER_IOC_TO_HOST)
+#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
+ (MPT_SGE_FLAGS_LAST_ELEMENT | \
+ MPT_SGE_FLAGS_END_OF_BUFFER | \
+ MPT_SGE_FLAGS_END_OF_LIST | \
+ MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPT_TRANSFER_HOST_TO_IOC)
+
+/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#endif
+
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
new file mode 100644
index 00000000..6e6e16aa
--- /dev/null
+++ b/drivers/message/fusion/mptctl.c
@@ -0,0 +1,3087 @@
+/*
+ * linux/drivers/message/fusion/mptctl.c
+ * mpt Ioctl driver.
+ * For use with LSI PCI chip/adapters
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h> /* for mdelay */
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/compat.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#define COPYRIGHT "Copyright (c) 1999-2008 LSI Corporation"
+#define MODULEAUTHOR "LSI Corporation"
+#include "mptbase.h"
+#include "mptctl.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME "Fusion MPT misc device (ioctl) driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptctl"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(my_VERSION);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+static DEFINE_MUTEX(mpctl_mutex);
+static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
+
+static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+struct buflist {
+ u8 *kptr;
+ int len;
+};
+
+/*
+ * Function prototypes. Called from OS entry point mptctl_ioctl.
+ * arg contents specific to function.
+ */
+static int mptctl_fw_download(unsigned long arg);
+static int mptctl_getiocinfo(unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo(unsigned long arg);
+static int mptctl_readtest(unsigned long arg);
+static int mptctl_mpt_command(unsigned long arg);
+static int mptctl_eventquery(unsigned long arg);
+static int mptctl_eventenable(unsigned long arg);
+static int mptctl_eventreport(unsigned long arg);
+static int mptctl_replace_fw(unsigned long arg);
+
+static int mptctl_do_reset(unsigned long arg);
+static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(unsigned long arg);
+
+static int mptctl_probe(struct pci_dev *, const struct pci_device_id *);
+static void mptctl_remove(struct pci_dev *);
+
+#ifdef CONFIG_COMPAT
+static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
+#endif
+/*
+ * Private function calls.
+ */
+static int mptctl_do_mpt_command(struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags,
+ struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
+static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
+ struct buflist *buflist, MPT_ADAPTER *ioc);
+
+/*
+ * Reset Handler cleanup function
+ */
+static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
+
+/*
+ * Event Handler function
+ */
+static int mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
+static struct fasync_struct *async_queue=NULL;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Scatter gather list (SGL) sizes and limits...
+ */
+//#define MAX_SCSI_FRAGS 9
+#define MAX_FRAGS_SPILL1 9
+#define MAX_FRAGS_SPILL2 15
+#define FRAGS_PER_BUCKET (MAX_FRAGS_SPILL2 + 1)
+
+//#define MAX_CHAIN_FRAGS 64
+//#define MAX_CHAIN_FRAGS (15+15+15+16)
+#define MAX_CHAIN_FRAGS (4 * MAX_FRAGS_SPILL2 + 1)
+
+// Define max sg LIST bytes ( == (#frags + #chains) * 8 bytes each)
+// Works out to: 592d bytes! (9+1)*8 + 4*(15+1)*8
+// ^----------------- 80 + 512
+#define MAX_SGL_BYTES ((MAX_FRAGS_SPILL1 + 1 + (4 * FRAGS_PER_BUCKET)) * 8)
+
+/* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */
+#define MAX_KMALLOC_SZ (128*1024)
+
+#define MPT_IOCTL_DEFAULT_TIMEOUT 10 /* Default timeout value (seconds) */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptctl_syscall_down - Down the MPT adapter syscall semaphore.
+ * @ioc: Pointer to MPT adapter
+ * @nonblock: boolean, non-zero if O_NONBLOCK is set
+ *
+ * All of the ioctl commands can potentially sleep, which is illegal
+ * with a spinlock held, thus we perform mutual exclusion here.
+ *
+ * Returns negative errno on error, or zero for success.
+ */
+static inline int
+mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
+{
+ int rc = 0;
+
+ if (nonblock) {
+ if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
+ rc = -EAGAIN;
+ } else {
+ if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
+ rc = -ERESTARTSYS;
+ }
+ return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This is the callback for any message we have posted. The message itself
+ * will be returned to the message pool when we return from the IRQ
+ *
+ * This runs in irq context so be short and sweet.
+ */
+static int
+mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
+{
+ char *sense_data;
+ int req_index;
+ int sz;
+
+ if (!req)
+ return 0;
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
+ "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function,
+ req, reply));
+
+ /*
+ * Handling continuation of the same reply. Processing the first
+ * reply, and eating the other replys that come later.
+ */
+ if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
+ goto out_continuation;
+
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+
+ if (!reply)
+ goto out;
+
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
+ memcpy(ioc->ioctl_cmds.reply, reply, sz);
+
+ if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
+ le16_to_cpu(reply->u.reply.IOCStatus),
+ le32_to_cpu(reply->u.reply.IOCLogInfo)));
+
+ if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (req->u.hdr.Function ==
+ MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+
+ if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "scsi_status (0x%02x), scsi_state (0x%02x), "
+ "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
+ reply->u.sreply.SCSIStatus,
+ reply->u.sreply.SCSIState,
+ le16_to_cpu(reply->u.sreply.TaskTag),
+ le32_to_cpu(reply->u.sreply.TransferCount)));
+
+ if (reply->u.sreply.SCSIState &
+ MPI_SCSI_STATE_AUTOSENSE_VALID) {
+ sz = req->u.scsireq.SenseBufferLength;
+ req_index =
+ le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
+ sense_data = ((u8 *)ioc->sense_buf_pool +
+ (req_index * MPT_SENSE_BUFFER_ALLOC));
+ memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
+ }
+ }
+
+ out:
+ /* We are done, issue wake up
+ */
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->ioctl_cmds.done);
+ if (ioc->bus_type == SAS)
+ ioc->schedule_target_reset(ioc);
+ } else {
+ ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->ioctl_cmds.done);
+ }
+ }
+
+ out_continuation:
+ if (reply && (reply->u.reply.MsgFlags &
+ MPI_MSGFLAGS_CONTINUATION_REPLY))
+ return 0;
+ return 1;
+}
+
+
+static int
+mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+ if (!mf)
+ return 0;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt completed (mf=%p, mr=%p)\n",
+ ioc->name, mf, mr));
+
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+
+ if (!mr)
+ goto out;
+
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->taskmgmt_cmds.reply, mr,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
+ out:
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->taskmgmt_cmds.done);
+ if (ioc->bus_type == SAS)
+ ioc->schedule_target_reset(ioc);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+mptctl_do_taskmgmt(MPT_ADAPTER *ioc, u8 tm_type, u8 bus_id, u8 target_id)
+{
+ MPT_FRAME_HDR *mf;
+ SCSITaskMgmt_t *pScsiTm;
+ SCSITaskMgmtReply_t *pScsiTmReply;
+ int ii;
+ int retval;
+ unsigned long timeout;
+ unsigned long time_count;
+ u16 iocstatus;
+
+
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ return -EPERM;
+ }
+
+ retval = 0;
+
+ mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
+ if (mf == NULL) {
+ dtmprintk(ioc,
+ printk(MYIOC_s_WARN_FMT "TaskMgmt, no msg frames!!\n",
+ ioc->name));
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ retval = -ENOMEM;
+ goto tm_done;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
+ ioc->name, mf));
+
+ pScsiTm = (SCSITaskMgmt_t *) mf;
+ memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
+ pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+ pScsiTm->TaskType = tm_type;
+ if ((tm_type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) &&
+ (ioc->bus_type == FC))
+ pScsiTm->MsgFlags =
+ MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
+ pScsiTm->TargetID = target_id;
+ pScsiTm->Bus = bus_id;
+ pScsiTm->ChainOffset = 0;
+ pScsiTm->Reserved = 0;
+ pScsiTm->Reserved1 = 0;
+ pScsiTm->TaskMsgContext = 0;
+ for (ii= 0; ii < 8; ii++)
+ pScsiTm->LUN[ii] = 0;
+ for (ii=0; ii < 7; ii++)
+ pScsiTm->Reserved2[ii] = 0;
+
+ switch (ioc->bus_type) {
+ case FC:
+ timeout = 40;
+ break;
+ case SAS:
+ timeout = 30;
+ break;
+ case SPI:
+ default:
+ timeout = 10;
+ break;
+ }
+
+ dtmprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT "TaskMgmt type=%d timeout=%ld\n",
+ ioc->name, tm_type, timeout));
+
+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ time_count = jiffies;
+ if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
+ (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
+ mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
+ else {
+ retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
+ sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
+ if (retval != 0) {
+ dfailprintk(ioc,
+ printk(MYIOC_s_ERR_FMT
+ "TaskMgmt send_handshake FAILED!"
+ " (ioc %p, mf %p, rc=%d) \n", ioc->name,
+ ioc, mf, retval));
+ mpt_free_msg_frame(ioc, mf);
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ goto tm_done;
+ }
+ }
+
+ /* Now wait for the command to complete */
+ ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
+
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt failed\n", ioc->name));
+ mpt_free_msg_frame(ioc, mf);
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ retval = 0;
+ else
+ retval = -1; /* return failure */
+ goto tm_done;
+ }
+
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt failed\n", ioc->name));
+ retval = -1; /* return failure */
+ goto tm_done;
+ }
+
+ pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
+ "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
+ "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
+ pScsiTmReply->TargetID, tm_type,
+ le16_to_cpu(pScsiTmReply->IOCStatus),
+ le32_to_cpu(pScsiTmReply->IOCLogInfo),
+ pScsiTmReply->ResponseCode,
+ le32_to_cpu(pScsiTmReply->TerminationCount)));
+
+ iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+
+ if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
+ iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
+ iocstatus == MPI_IOCSTATUS_SUCCESS)
+ retval = 0;
+ else {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt failed\n", ioc->name));
+ retval = -1; /* return failure */
+ }
+
+ tm_done:
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ return retval;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_timeout_expired
+ *
+ * Expecting an interrupt, however timed out.
+ *
+ */
+static void
+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+ unsigned long flags;
+ int ret_val = -1;
+ SCSIIORequest_t *scsi_req = (SCSIIORequest_t *) mf;
+ u8 function = mf->u.hdr.Function;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
+ ioc->name, __func__));
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+ mpt_free_msg_frame(ioc, mf);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+
+ CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+
+ if (ioc->bus_type == SAS) {
+ if (function == MPI_FUNCTION_SCSI_IO_REQUEST)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ scsi_req->Bus, scsi_req->TargetID);
+ else if (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ } else {
+ if ((function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH))
+ ret_val = mptctl_do_taskmgmt(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ scsi_req->Bus, 0);
+ if (!ret_val)
+ return;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling Reset! \n",
+ ioc->name));
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+}
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_ioc_reset
+ *
+ * Clean-up functionality. Used only if there has been a
+ * reload of the FW due.
+ *
+ */
+static int
+mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ switch(reset_phase) {
+ case MPT_IOC_SETUP_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_POST_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->ioctl_cmds.done);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* ASYNC Event Notification Support */
+static int
+mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
+{
+ u8 event;
+
+ event = le32_to_cpu(pEvReply->Event) & 0xFF;
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s() called\n",
+ ioc->name, __func__));
+ if(async_queue == NULL)
+ return 1;
+
+ /* Raise SIGIO for persistent events.
+ * TODO - this define is not in MPI spec yet,
+ * but they plan to set it to 0x21
+ */
+ if (event == 0x21 ) {
+ ioc->aen_event_read_flag=1;
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Raised SIGIO to application\n",
+ ioc->name));
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Raised SIGIO to application\n", ioc->name));
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ return 1;
+ }
+
+ /* This flag is set after SIGIO was raised, and
+ * remains set until the application has read
+ * the event log via ioctl=MPTEVENTREPORT
+ */
+ if(ioc->aen_event_read_flag)
+ return 1;
+
+ /* Signal only for the events that are
+ * requested for by the application
+ */
+ if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
+ ioc->aen_event_read_flag=1;
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Raised SIGIO to application\n", ioc->name));
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Raised SIGIO to application\n", ioc->name));
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ }
+ return 1;
+}
+
+static int
+mptctl_release(struct inode *inode, struct file *filep)
+{
+ fasync_helper(-1, filep, 0, &async_queue);
+ return 0;
+}
+
+static int
+mptctl_fasync(int fd, struct file *filep, int mode)
+{
+ MPT_ADAPTER *ioc;
+ int ret;
+
+ mutex_lock(&mpctl_mutex);
+ list_for_each_entry(ioc, &ioc_list, list)
+ ioc->aen_event_read_flag=0;
+
+ ret = fasync_helper(fd, filep, mode, &async_queue);
+ mutex_unlock(&mpctl_mutex);
+ return ret;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * MPT ioctl handler
+ * cmd - specify the particular IOCTL command to be issued
+ * arg - data specific to the command. Must not be null.
+ */
+static long
+__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ mpt_ioctl_header __user *uhdr = (void __user *) arg;
+ mpt_ioctl_header khdr;
+ int iocnum;
+ unsigned iocnumX;
+ int nonblock = (file->f_flags & O_NONBLOCK);
+ int ret;
+ MPT_ADAPTER *iocp = NULL;
+
+ if (copy_from_user(&khdr, uhdr, sizeof(khdr))) {
+ printk(KERN_ERR MYNAM "%s::mptctl_ioctl() @%d - "
+ "Unable to copy mpt_ioctl_header data @ %p\n",
+ __FILE__, __LINE__, uhdr);
+ return -EFAULT;
+ }
+ ret = -ENXIO; /* (-6) No such device or address */
+
+ /* Verify intended MPT adapter - set iocnum and the adapter
+ * pointer (iocp)
+ */
+ iocnumX = khdr.iocnum & 0xFF;
+ if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
+ (iocp == NULL))
+ return -ENODEV;
+
+ if (!iocp->active) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_ioctl() @%d - Controller disabled.\n",
+ __FILE__, __LINE__);
+ return -EFAULT;
+ }
+
+ /* Handle those commands that are just returning
+ * information stored in the driver.
+ * These commands should never time out and are unaffected
+ * by TM and FW reloads.
+ */
+ if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
+ return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+ } else if (cmd == MPTTARGETINFO) {
+ return mptctl_gettargetinfo(arg);
+ } else if (cmd == MPTTEST) {
+ return mptctl_readtest(arg);
+ } else if (cmd == MPTEVENTQUERY) {
+ return mptctl_eventquery(arg);
+ } else if (cmd == MPTEVENTENABLE) {
+ return mptctl_eventenable(arg);
+ } else if (cmd == MPTEVENTREPORT) {
+ return mptctl_eventreport(arg);
+ } else if (cmd == MPTFWREPLACE) {
+ return mptctl_replace_fw(arg);
+ }
+
+ /* All of these commands require an interrupt or
+ * are unknown/illegal.
+ */
+ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
+ return ret;
+
+ if (cmd == MPTFWDOWNLOAD)
+ ret = mptctl_fw_download(arg);
+ else if (cmd == MPTCOMMAND)
+ ret = mptctl_mpt_command(arg);
+ else if (cmd == MPTHARDRESET)
+ ret = mptctl_do_reset(arg);
+ else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
+ ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+ else if (cmd == HP_GETTARGETINFO)
+ ret = mptctl_hp_targetinfo(arg);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&iocp->ioctl_cmds.mutex);
+
+ return ret;
+}
+
+static long
+mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ mutex_lock(&mpctl_mutex);
+ ret = __mptctl_ioctl(file, cmd, arg);
+ mutex_unlock(&mpctl_mutex);
+ return ret;
+}
+
+static int mptctl_do_reset(unsigned long arg)
+{
+ struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
+ struct mpt_ioctl_diag_reset krinfo;
+ MPT_ADAPTER *iocp;
+
+ if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_do_reset - "
+ "Unable to copy mpt_ioctl_diag_reset struct @ %p\n",
+ __FILE__, __LINE__, urinfo);
+ return -EFAULT;
+ }
+
+ if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
+ printk(KERN_DEBUG MYNAM "%s@%d::mptctl_do_reset - ioc%d not found!\n",
+ __FILE__, __LINE__, krinfo.hdr.iocnum);
+ return -ENODEV; /* (-6) No such device or address */
+ }
+
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "mptctl_do_reset called.\n",
+ iocp->name));
+
+ if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) {
+ printk (MYIOC_s_ERR_FMT "%s@%d::mptctl_do_reset - reset failed.\n",
+ iocp->name, __FILE__, __LINE__);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * MPT FW download function. Cast the arg into the mpt_fw_xfer structure.
+ * This structure contains: iocnum, firmware length (bytes),
+ * pointer to user space memory where the fw image is stored.
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -ENXIO if no such device
+ * -EAGAIN if resource problem
+ * -ENOMEM if no memory for SGE
+ * -EMLINK if too many chain buffers required
+ * -EBADRQC if adapter does not support FW download
+ * -EBUSY if adapter is busy
+ * -ENOMSG if FW upload returned bad status
+ */
+static int
+mptctl_fw_download(unsigned long arg)
+{
+ struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
+ struct mpt_fw_xfer kfwdl;
+
+ if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) {
+ printk(KERN_ERR MYNAM "%s@%d::_ioctl_fwdl - "
+ "Unable to copy mpt_fw_xfer struct @ %p\n",
+ __FILE__, __LINE__, ufwdl);
+ return -EFAULT;
+ }
+
+ return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * FW Download engine.
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -ENXIO if no such device
+ * -EAGAIN if resource problem
+ * -ENOMEM if no memory for SGE
+ * -EMLINK if too many chain buffers required
+ * -EBADRQC if adapter does not support FW download
+ * -EBUSY if adapter is busy
+ * -ENOMSG if FW upload returned bad status
+ */
+static int
+mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+{
+ FWDownload_t *dlmsg;
+ MPT_FRAME_HDR *mf;
+ MPT_ADAPTER *iocp;
+ FWDownloadTCSGE_t *ptsge;
+ MptSge_t *sgl, *sgIn;
+ char *sgOut;
+ struct buflist *buflist;
+ struct buflist *bl;
+ dma_addr_t sgl_dma;
+ int ret;
+ int numfrags = 0;
+ int maxfrags;
+ int n = 0;
+ u32 sgdir;
+ u32 nib;
+ int fw_bytes_copied = 0;
+ int i;
+ int sge_offset = 0;
+ u16 iocstat;
+ pFWDownloadReply_t ReplyMsg = NULL;
+ unsigned long timeleft;
+
+ if (mpt_verify_adapter(ioc, &iocp) < 0) {
+ printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
+ ioc);
+ return -ENODEV; /* (-6) No such device or address */
+ } else {
+
+ /* Valid device. Get a message frame and construct the FW download message.
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+ return -EAGAIN;
+ }
+
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT
+ "mptctl_do_fwdl called. mptctl_id = %xh.\n", iocp->name, mptctl_id));
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.bufp = %p\n",
+ iocp->name, ufwbuf));
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.fwlen = %d\n",
+ iocp->name, (int)fwlen));
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: kfwdl.ioc = %04xh\n",
+ iocp->name, ioc));
+
+ dlmsg = (FWDownload_t*) mf;
+ ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
+ sgOut = (char *) (ptsge + 1);
+
+ /*
+ * Construct f/w download request
+ */
+ dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW;
+ dlmsg->Reserved = 0;
+ dlmsg->ChainOffset = 0;
+ dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
+ dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
+ if (iocp->facts.MsgVersion >= MPI_VERSION_01_05)
+ dlmsg->MsgFlags = MPI_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT;
+ else
+ dlmsg->MsgFlags = 0;
+
+
+ /* Set up the Transaction SGE.
+ */
+ ptsge->Reserved = 0;
+ ptsge->ContextSize = 0;
+ ptsge->DetailsLength = 12;
+ ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
+ ptsge->Reserved_0100_Checksum = 0;
+ ptsge->ImageOffset = 0;
+ ptsge->ImageSize = cpu_to_le32(fwlen);
+
+ /* Add the SGL
+ */
+
+ /*
+ * Need to kmalloc area(s) for holding firmware image bytes.
+ * But we need to do it piece meal, using a proper
+ * scatter gather list (with 128kB MAX hunks).
+ *
+ * A practical limit here might be # of sg hunks that fit into
+ * a single IOC request frame; 12 or 8 (see below), so:
+ * For FC9xx: 12 x 128kB == 1.5 mB (max)
+ * For C1030: 8 x 128kB == 1 mB (max)
+ * We could support chaining, but things get ugly(ier:)
+ *
+ * Set the sge_offset to the start of the sgl (bytes).
+ */
+ sgdir = 0x04000000; /* IOC will READ from sys mem */
+ sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t);
+ if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset,
+ &numfrags, &buflist, &sgl_dma, iocp)) == NULL)
+ return -ENOMEM;
+
+ /*
+ * We should only need SGL with 2 simple_32bit entries (up to 256 kB)
+ * for FC9xx f/w image, but calculate max number of sge hunks
+ * we can fit into a request frame, and limit ourselves to that.
+ * (currently no chain support)
+ * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE
+ * Request maxfrags
+ * 128 12
+ * 96 8
+ * 64 4
+ */
+ maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
+ sizeof(FWDownloadTCSGE_t))
+ / iocp->SGE_size;
+ if (numfrags > maxfrags) {
+ ret = -EMLINK;
+ goto fwdl_out;
+ }
+
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "DbG: sgl buffer = %p, sgfrags = %d\n",
+ iocp->name, sgl, numfrags));
+
+ /*
+ * Parse SG list, copying sgl itself,
+ * plus f/w image hunks from user space as we go...
+ */
+ ret = -EFAULT;
+ sgIn = sgl;
+ bl = buflist;
+ for (i=0; i < numfrags; i++) {
+
+ /* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE
+ * Skip everything but Simple. If simple, copy from
+ * user space into kernel space.
+ * Note: we should not have anything but Simple as
+ * Chain SGE are illegal.
+ */
+ nib = (sgIn->FlagsLength & 0x30000000) >> 28;
+ if (nib == 0 || nib == 3) {
+ ;
+ } else if (sgIn->Address) {
+ iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
+ n++;
+ if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
+ "Unable to copy f/w buffer hunk#%d @ %p\n",
+ iocp->name, __FILE__, __LINE__, n, ufwbuf);
+ goto fwdl_out;
+ }
+ fw_bytes_copied += bl->len;
+ }
+ sgIn++;
+ bl++;
+ sgOut += iocp->SGE_size;
+ }
+
+ DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
+
+ /*
+ * Finally, perform firmware download.
+ */
+ ReplyMsg = NULL;
+ SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
+ INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
+ mpt_put_msg_frame(mptctl_id, iocp, mf);
+
+ /* Now wait for the command to complete */
+retry_wait:
+ timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
+ if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
+ if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ mpt_free_msg_frame(iocp, mf);
+ goto fwdl_out;
+ }
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT
+ "FW download timeout, doorbell=0x%08x\n",
+ iocp->name, mpt_GetIocState(iocp, 0));
+ mptctl_timeout_expired(iocp, mf);
+ } else
+ goto retry_wait;
+ goto fwdl_out;
+ }
+
+ if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
+ mpt_free_msg_frame(iocp, mf);
+ ret = -ENODATA;
+ goto fwdl_out;
+ }
+
+ if (sgl)
+ kfree_sgl(sgl, sgl_dma, buflist, iocp);
+
+ ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
+ iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
+ if (iocstat == MPI_IOCSTATUS_SUCCESS) {
+ printk(MYIOC_s_INFO_FMT "F/W update successful!\n", iocp->name);
+ return 0;
+ } else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) {
+ printk(MYIOC_s_WARN_FMT "Hmmm... F/W download not supported!?!\n",
+ iocp->name);
+ printk(MYIOC_s_WARN_FMT "(time to go bang on somebodies door)\n",
+ iocp->name);
+ return -EBADRQC;
+ } else if (iocstat == MPI_IOCSTATUS_BUSY) {
+ printk(MYIOC_s_WARN_FMT "IOC_BUSY!\n", iocp->name);
+ printk(MYIOC_s_WARN_FMT "(try again later?)\n", iocp->name);
+ return -EBUSY;
+ } else {
+ printk(MYIOC_s_WARN_FMT "ioctl_fwdl() returned [bad] status = %04xh\n",
+ iocp->name, iocstat);
+ printk(MYIOC_s_WARN_FMT "(bad VooDoo)\n", iocp->name);
+ return -ENOMSG;
+ }
+ return 0;
+
+fwdl_out:
+
+ CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
+ SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
+ kfree_sgl(sgl, sgl_dma, buflist, iocp);
+ return ret;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * SGE Allocation routine
+ *
+ * Inputs: bytes - number of bytes to be transferred
+ * sgdir - data direction
+ * sge_offset - offset (in bytes) from the start of the request
+ * frame to the first SGE
+ * ioc - pointer to the mptadapter
+ * Outputs: frags - number of scatter gather elements
+ * blp - point to the buflist pointer
+ * sglbuf_dma - pointer to the (dma) sgl
+ * Returns: Null if failes
+ * pointer to the (virtual) sgl if successful.
+ */
+static MptSge_t *
+kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
+ struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc)
+{
+ MptSge_t *sglbuf = NULL; /* pointer to array of SGE */
+ /* and chain buffers */
+ struct buflist *buflist = NULL; /* kernel routine */
+ MptSge_t *sgl;
+ int numfrags = 0;
+ int fragcnt = 0;
+ int alloc_sz = min(bytes,MAX_KMALLOC_SZ); // avoid kernel warning msg!
+ int bytes_allocd = 0;
+ int this_alloc;
+ dma_addr_t pa; // phys addr
+ int i, buflist_ent;
+ int sg_spill = MAX_FRAGS_SPILL1;
+ int dir;
+ /* initialization */
+ *frags = 0;
+ *blp = NULL;
+
+ /* Allocate and initialize an array of kernel
+ * structures for the SG elements.
+ */
+ i = MAX_SGL_BYTES / 8;
+ buflist = kzalloc(i, GFP_USER);
+ if (!buflist)
+ return NULL;
+ buflist_ent = 0;
+
+ /* Allocate a single block of memory to store the sg elements and
+ * the chain buffers. The calling routine is responsible for
+ * copying the data in this array into the correct place in the
+ * request and chain buffers.
+ */
+ sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma);
+ if (sglbuf == NULL)
+ goto free_and_fail;
+
+ if (sgdir & 0x04000000)
+ dir = PCI_DMA_TODEVICE;
+ else
+ dir = PCI_DMA_FROMDEVICE;
+
+ /* At start:
+ * sgl = sglbuf = point to beginning of sg buffer
+ * buflist_ent = 0 = first kernel structure
+ * sg_spill = number of SGE that can be written before the first
+ * chain element.
+ *
+ */
+ sgl = sglbuf;
+ sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
+ while (bytes_allocd < bytes) {
+ this_alloc = min(alloc_sz, bytes-bytes_allocd);
+ buflist[buflist_ent].len = this_alloc;
+ buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev,
+ this_alloc,
+ &pa);
+ if (buflist[buflist_ent].kptr == NULL) {
+ alloc_sz = alloc_sz / 2;
+ if (alloc_sz == 0) {
+ printk(MYIOC_s_WARN_FMT "-SG: No can do - "
+ "not enough memory! :-(\n", ioc->name);
+ printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n",
+ ioc->name, numfrags);
+ goto free_and_fail;
+ }
+ continue;
+ } else {
+ dma_addr_t dma_addr;
+
+ bytes_allocd += this_alloc;
+ sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
+ dma_addr = pci_map_single(ioc->pcidev,
+ buflist[buflist_ent].kptr, this_alloc, dir);
+ sgl->Address = dma_addr;
+
+ fragcnt++;
+ numfrags++;
+ sgl++;
+ buflist_ent++;
+ }
+
+ if (bytes_allocd >= bytes)
+ break;
+
+ /* Need to chain? */
+ if (fragcnt == sg_spill) {
+ printk(MYIOC_s_WARN_FMT
+ "-SG: No can do - " "Chain required! :-(\n", ioc->name);
+ printk(MYIOC_s_WARN_FMT "(freeing %d frags)\n", ioc->name, numfrags);
+ goto free_and_fail;
+ }
+
+ /* overflow check... */
+ if (numfrags*8 > MAX_SGL_BYTES){
+ /* GRRRRR... */
+ printk(MYIOC_s_WARN_FMT "-SG: No can do - "
+ "too many SG frags! :-(\n", ioc->name);
+ printk(MYIOC_s_WARN_FMT "-SG: (freeing %d frags)\n",
+ ioc->name, numfrags);
+ goto free_and_fail;
+ }
+ }
+
+ /* Last sge fixup: set LE+eol+eob bits */
+ sgl[-1].FlagsLength |= 0xC1000000;
+
+ *frags = numfrags;
+ *blp = buflist;
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - "
+ "%d SG frags generated!\n", ioc->name, numfrags));
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: kbuf_alloc_2_sgl() - "
+ "last (big) alloc_sz=%d\n", ioc->name, alloc_sz));
+
+ return sglbuf;
+
+free_and_fail:
+ if (sglbuf != NULL) {
+ for (i = 0; i < numfrags; i++) {
+ dma_addr_t dma_addr;
+ u8 *kptr;
+ int len;
+
+ if ((sglbuf[i].FlagsLength >> 24) == 0x30)
+ continue;
+
+ dma_addr = sglbuf[i].Address;
+ kptr = buflist[i].kptr;
+ len = buflist[i].len;
+
+ pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ }
+ pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma);
+ }
+ kfree(buflist);
+ return NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Routine to free the SGL elements.
+ */
+static void
+kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc)
+{
+ MptSge_t *sg = sgl;
+ struct buflist *bl = buflist;
+ u32 nib;
+ int dir;
+ int n = 0;
+
+ if (sg->FlagsLength & 0x04000000)
+ dir = PCI_DMA_TODEVICE;
+ else
+ dir = PCI_DMA_FROMDEVICE;
+
+ nib = (sg->FlagsLength & 0xF0000000) >> 28;
+ while (! (nib & 0x4)) { /* eob */
+ /* skip ignore/chain. */
+ if (nib == 0 || nib == 3) {
+ ;
+ } else if (sg->Address) {
+ dma_addr_t dma_addr;
+ void *kptr;
+ int len;
+
+ dma_addr = sg->Address;
+ kptr = bl->kptr;
+ len = bl->len;
+ pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
+ pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ n++;
+ }
+ sg++;
+ bl++;
+ nib = (le32_to_cpu(sg->FlagsLength) & 0xF0000000) >> 28;
+ }
+
+ /* we're at eob! */
+ if (sg->Address) {
+ dma_addr_t dma_addr;
+ void *kptr;
+ int len;
+
+ dma_addr = sg->Address;
+ kptr = bl->kptr;
+ len = bl->len;
+ pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
+ pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ n++;
+ }
+
+ pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma);
+ kfree(buflist);
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n",
+ ioc->name, n));
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptctl_getiocinfo - Query the host adapter for IOC information.
+ * @arg: User space argument
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -ENODEV if no such device/adapter
+ */
+static int
+mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+{
+ struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_iocinfo *karg;
+ MPT_ADAPTER *ioc;
+ struct pci_dev *pdev;
+ int iocnum;
+ unsigned int port;
+ int cim_rev;
+ u8 revision;
+ struct scsi_device *sdev;
+ VirtDevice *vdevice;
+
+ /* Add of PCI INFO results in unaligned access for
+ * IA64 and Sparc. Reset long to int. Return no PCI
+ * data for obsolete format.
+ */
+ if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0))
+ cim_rev = 0;
+ else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1))
+ cim_rev = 1;
+ else if (data_size == sizeof(struct mpt_ioctl_iocinfo))
+ cim_rev = 2;
+ else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12))
+ cim_rev = 0; /* obsolete */
+ else
+ return -EFAULT;
+
+ karg = kmalloc(data_size, GFP_KERNEL);
+ if (karg == NULL) {
+ printk(KERN_ERR MYNAM "%s::mpt_ioctl_iocinfo() @%d - no memory available!\n",
+ __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(karg, uarg, data_size)) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_getiocinfo - "
+ "Unable to read in mpt_ioctl_iocinfo struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ kfree(karg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ kfree(karg);
+ return -ENODEV;
+ }
+
+ /* Verify the data transfer size is correct. */
+ if (karg->hdr.maxDataSize != data_size) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
+ "Structure size mismatch. Command not completed.\n",
+ ioc->name, __FILE__, __LINE__);
+ kfree(karg);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_getiocinfo called.\n",
+ ioc->name));
+
+ /* Fill in the data and return the structure to the calling
+ * program
+ */
+ if (ioc->bus_type == SAS)
+ karg->adapterType = MPT_IOCTL_INTERFACE_SAS;
+ else if (ioc->bus_type == FC)
+ karg->adapterType = MPT_IOCTL_INTERFACE_FC;
+ else
+ karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
+
+ if (karg->hdr.port > 1) {
+ kfree(karg);
+ return -EINVAL;
+ }
+ port = karg->hdr.port;
+
+ karg->port = port;
+ pdev = (struct pci_dev *) ioc->pcidev;
+
+ karg->pciId = pdev->device;
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ karg->hwRev = revision;
+ karg->subSystemDevice = pdev->subsystem_device;
+ karg->subSystemVendor = pdev->subsystem_vendor;
+
+ if (cim_rev == 1) {
+ /* Get the PCI bus, device, and function numbers for the IOC
+ */
+ karg->pciInfo.u.bits.busNumber = pdev->bus->number;
+ karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
+ karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
+ } else if (cim_rev == 2) {
+ /* Get the PCI bus, device, function and segment ID numbers
+ for the IOC */
+ karg->pciInfo.u.bits.busNumber = pdev->bus->number;
+ karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
+ karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
+ karg->pciInfo.segmentID = pci_domain_nr(pdev->bus);
+ }
+
+ /* Get number of devices
+ */
+ karg->numDevices = 0;
+ if (ioc->sh) {
+ shost_for_each_device(sdev, ioc->sh) {
+ vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
+ if (vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue;
+ karg->numDevices++;
+ }
+ }
+
+ /* Set the BIOS and FW Version
+ */
+ karg->FWVersion = ioc->facts.FWVersion.Word;
+ karg->BIOSVersion = ioc->biosVersion;
+
+ /* Set the Version Strings.
+ */
+ strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH);
+ karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0';
+
+ karg->busChangeEvent = 0;
+ karg->hostId = ioc->pfacts[port].PortSCSIID;
+ karg->rsvd[0] = karg->rsvd[1] = 0;
+
+ /* Copy the data from kernel memory to user memory
+ */
+ if (copy_to_user((char __user *)arg, karg, data_size)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_getiocinfo - "
+ "Unable to write out mpt_ioctl_iocinfo struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, uarg);
+ kfree(karg);
+ return -EFAULT;
+ }
+
+ kfree(karg);
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptctl_gettargetinfo - Query the host adapter for target information.
+ * @arg: User space argument
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -ENODEV if no such device/adapter
+ */
+static int
+mptctl_gettargetinfo (unsigned long arg)
+{
+ struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_targetinfo karg;
+ MPT_ADAPTER *ioc;
+ VirtDevice *vdevice;
+ char *pmem;
+ int *pdata;
+ int iocnum;
+ int numDevices = 0;
+ int lun;
+ int maxWordsLeft;
+ int numBytes;
+ u8 port;
+ struct scsi_device *sdev;
+
+ if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_gettargetinfo - "
+ "Unable to read in mpt_ioctl_targetinfo struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_gettargetinfo called.\n",
+ ioc->name));
+ /* Get the port number and set the maximum number of bytes
+ * in the returned structure.
+ * Ignore the port setting.
+ */
+ numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
+ maxWordsLeft = numBytes/sizeof(int);
+ port = karg.hdr.port;
+
+ if (maxWordsLeft <= 0) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n",
+ ioc->name, __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+
+ /* Fill in the data and return the structure to the calling
+ * program
+ */
+
+ /* struct mpt_ioctl_targetinfo does not contain sufficient space
+ * for the target structures so when the IOCTL is called, there is
+ * not sufficient stack space for the structure. Allocate memory,
+ * populate the memory, copy back to the user, then free memory.
+ * targetInfo format:
+ * bits 31-24: reserved
+ * 23-16: LUN
+ * 15- 8: Bus Number
+ * 7- 0: Target ID
+ */
+ pmem = kzalloc(numBytes, GFP_KERNEL);
+ if (!pmem) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo() - no memory available!\n",
+ ioc->name, __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+ pdata = (int *) pmem;
+
+ /* Get number of devices
+ */
+ if (ioc->sh){
+ shost_for_each_device(sdev, ioc->sh) {
+ if (!maxWordsLeft)
+ continue;
+ vdevice = sdev->hostdata;
+ if (vdevice == NULL || vdevice->vtarget == NULL)
+ continue;
+ if (vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue;
+ lun = (vdevice->vtarget->raidVolume) ? 0x80 : vdevice->lun;
+ *pdata = (((u8)lun << 16) + (vdevice->vtarget->channel << 8) +
+ (vdevice->vtarget->id ));
+ pdata++;
+ numDevices++;
+ --maxWordsLeft;
+ }
+ }
+ karg.numDevices = numDevices;
+
+ /* Copy part of the data from kernel memory to user memory
+ */
+ if (copy_to_user((char __user *)arg, &karg,
+ sizeof(struct mpt_ioctl_targetinfo))) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - "
+ "Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, uarg);
+ kfree(pmem);
+ return -EFAULT;
+ }
+
+ /* Copy the remaining data from kernel memory to user memory
+ */
+ if (copy_to_user(uarg->targetInfo, pmem, numBytes)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_gettargetinfo - "
+ "Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, pdata);
+ kfree(pmem);
+ return -EFAULT;
+ }
+
+ kfree(pmem);
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* MPT IOCTL Test function.
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -ENODEV if no such device/adapter
+ */
+static int
+mptctl_readtest (unsigned long arg)
+{
+ struct mpt_ioctl_test __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_test karg;
+ MPT_ADAPTER *ioc;
+ int iocnum;
+
+ if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_readtest - "
+ "Unable to read in mpt_ioctl_test struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_readtest() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_readtest called.\n",
+ ioc->name));
+ /* Fill in the data and return the structure to the calling
+ * program
+ */
+
+#ifdef MFCNT
+ karg.chip_type = ioc->mfcnt;
+#else
+ karg.chip_type = ioc->pcidev->device;
+#endif
+ strncpy (karg.name, ioc->name, MPT_MAX_NAME);
+ karg.name[MPT_MAX_NAME-1]='\0';
+ strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH);
+ karg.product[MPT_PRODUCT_LENGTH-1]='\0';
+
+ /* Copy the data from kernel memory to user memory
+ */
+ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_readtest - "
+ "Unable to write out mpt_ioctl_test struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptctl_eventquery - Query the host adapter for the event types
+ * that are being logged.
+ * @arg: User space argument
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -ENODEV if no such device/adapter
+ */
+static int
+mptctl_eventquery (unsigned long arg)
+{
+ struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_eventquery karg;
+ MPT_ADAPTER *ioc;
+ int iocnum;
+
+ if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_eventquery - "
+ "Unable to read in mpt_ioctl_eventquery struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventquery called.\n",
+ ioc->name));
+ karg.eventEntries = MPTCTL_EVENT_LOG_SIZE;
+ karg.eventTypes = ioc->eventTypes;
+
+ /* Copy the data from kernel memory to user memory
+ */
+ if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventquery - "
+ "Unable to write out mpt_ioctl_eventquery struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptctl_eventenable (unsigned long arg)
+{
+ struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_eventenable karg;
+ MPT_ADAPTER *ioc;
+ int iocnum;
+
+ if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_eventenable - "
+ "Unable to read in mpt_ioctl_eventenable struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventenable called.\n",
+ ioc->name));
+ if (ioc->events == NULL) {
+ /* Have not yet allocated memory - do so now.
+ */
+ int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
+ ioc->events = kzalloc(sz, GFP_KERNEL);
+ if (!ioc->events) {
+ printk(MYIOC_s_ERR_FMT
+ ": ERROR - Insufficient memory to add adapter!\n",
+ ioc->name);
+ return -ENOMEM;
+ }
+ ioc->alloc_total += sz;
+
+ ioc->eventContext = 0;
+ }
+
+ /* Update the IOC event logging flag.
+ */
+ ioc->eventTypes = karg.eventTypes;
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptctl_eventreport (unsigned long arg)
+{
+ struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_eventreport karg;
+ MPT_ADAPTER *ioc;
+ int iocnum;
+ int numBytes, maxEvents, max;
+
+ if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_eventreport - "
+ "Unable to read in mpt_ioctl_eventreport struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_eventreport called.\n",
+ ioc->name));
+
+ numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
+ maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
+
+
+ max = MPTCTL_EVENT_LOG_SIZE < maxEvents ? MPTCTL_EVENT_LOG_SIZE : maxEvents;
+
+ /* If fewer than 1 event is requested, there must have
+ * been some type of error.
+ */
+ if ((max < 1) || !ioc->events)
+ return -ENODATA;
+
+ /* reset this flag so SIGIO can restart */
+ ioc->aen_event_read_flag=0;
+
+ /* Copy the data from kernel memory to user memory
+ */
+ numBytes = max * sizeof(MPT_IOCTL_EVENTS);
+ if (copy_to_user(uarg->eventData, ioc->events, numBytes)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_eventreport - "
+ "Unable to write out mpt_ioctl_eventreport struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, ioc->events);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptctl_replace_fw (unsigned long arg)
+{
+ struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_replace_fw karg;
+ MPT_ADAPTER *ioc;
+ int iocnum;
+ int newFwSize;
+
+ if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_replace_fw - "
+ "Unable to read in mpt_ioctl_replace_fw struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_replace_fw called.\n",
+ ioc->name));
+ /* If caching FW, Free the old FW image
+ */
+ if (ioc->cached_fw == NULL)
+ return 0;
+
+ mpt_free_fw_memory(ioc);
+
+ /* Allocate memory for the new FW image
+ */
+ newFwSize = karg.newImageSize;
+
+ if (newFwSize & 0x01)
+ newFwSize += 1;
+ if (newFwSize & 0x02)
+ newFwSize += 2;
+
+ mpt_alloc_fw_memory(ioc, newFwSize);
+ if (ioc->cached_fw == NULL)
+ return -ENOMEM;
+
+ /* Copy the data from user memory to kernel space
+ */
+ if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_replace_fw - "
+ "Unable to read in mpt_ioctl_replace_fw image "
+ "@ %p\n", ioc->name, __FILE__, __LINE__, uarg);
+ mpt_free_fw_memory(ioc);
+ return -EFAULT;
+ }
+
+ /* Update IOCFactsReply
+ */
+ ioc->facts.FWImageSize = newFwSize;
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* MPT IOCTL MPTCOMMAND function.
+ * Cast the arg into the mpt_ioctl_mpt_command structure.
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EBUSY if previous command timeout and IOC reset is not complete.
+ * -EFAULT if data unavailable
+ * -ENODEV if no such device/adapter
+ * -ETIME if timer expires
+ * -ENOMEM if memory allocation error
+ */
+static int
+mptctl_mpt_command (unsigned long arg)
+{
+ struct mpt_ioctl_command __user *uarg = (void __user *) arg;
+ struct mpt_ioctl_command karg;
+ MPT_ADAPTER *ioc;
+ int iocnum;
+ int rc;
+
+
+ if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_mpt_command - "
+ "Unable to read in mpt_ioctl_command struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+
+ rc = mptctl_do_mpt_command (karg, &uarg->MF);
+
+ return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands.
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EBUSY if previous command timeout and IOC reset is not complete.
+ * -EFAULT if data unavailable
+ * -ENODEV if no such device/adapter
+ * -ETIME if timer expires
+ * -ENOMEM if memory allocation error
+ * -EPERM if SCSI I/O and target is untagged
+ */
+static int
+mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+{
+ MPT_ADAPTER *ioc;
+ MPT_FRAME_HDR *mf = NULL;
+ MPIHeader_t *hdr;
+ char *psge;
+ struct buflist bufIn; /* data In buffer */
+ struct buflist bufOut; /* data Out buffer */
+ dma_addr_t dma_addr_in;
+ dma_addr_t dma_addr_out;
+ int sgSize = 0; /* Num SG elements */
+ int iocnum, flagsLength;
+ int sz, rc = 0;
+ int msgContext;
+ u16 req_idx;
+ ulong timeout;
+ unsigned long timeleft;
+ struct scsi_device *sdev;
+ unsigned long flags;
+ u8 function;
+
+ /* bufIn and bufOut are used for user to kernel space transfers
+ */
+ bufIn.kptr = bufOut.kptr = NULL;
+ bufIn.len = bufOut.len = 0;
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
+ "Busy with diagnostic reset\n", __FILE__, __LINE__);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ /* Verify that the final request frame will not be too large.
+ */
+ sz = karg.dataSgeOffset * 4;
+ if (karg.dataInSize > 0)
+ sz += ioc->SGE_size;
+ if (karg.dataOutSize > 0)
+ sz += ioc->SGE_size;
+
+ if (sz > ioc->req_sz) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "Request frame too large (%d) maximum (%d)\n",
+ ioc->name, __FILE__, __LINE__, sz, ioc->req_sz);
+ return -EFAULT;
+ }
+
+ /* Get a free request frame and save the message context.
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL)
+ return -EAGAIN;
+
+ hdr = (MPIHeader_t *) mf;
+ msgContext = le32_to_cpu(hdr->MsgContext);
+ req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+
+ /* Copy the request frame
+ * Reset the saved message context.
+ * Request frame in user space
+ */
+ if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "Unable to read MF from mpt_ioctl_command struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, mfPtr);
+ function = -1;
+ rc = -EFAULT;
+ goto done_free_mem;
+ }
+ hdr->MsgContext = cpu_to_le32(msgContext);
+ function = hdr->Function;
+
+
+ /* Verify that this request is allowed.
+ */
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
+ ioc->name, hdr->Function, mf));
+
+ switch (function) {
+ case MPI_FUNCTION_IOC_FACTS:
+ case MPI_FUNCTION_PORT_FACTS:
+ karg.dataOutSize = karg.dataInSize = 0;
+ break;
+
+ case MPI_FUNCTION_CONFIG:
+ {
+ Config_t *config_frame;
+ config_frame = (Config_t *)mf;
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\ttype=0x%02x ext_type=0x%02x "
+ "number=0x%02x action=0x%02x\n", ioc->name,
+ config_frame->Header.PageType,
+ config_frame->ExtPageType,
+ config_frame->Header.PageNumber,
+ config_frame->Action));
+ break;
+ }
+
+ case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND:
+ case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND:
+ case MPI_FUNCTION_FW_UPLOAD:
+ case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
+ case MPI_FUNCTION_FW_DOWNLOAD:
+ case MPI_FUNCTION_FC_PRIMITIVE_SEND:
+ case MPI_FUNCTION_TOOLBOX:
+ case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
+ break;
+
+ case MPI_FUNCTION_SCSI_IO_REQUEST:
+ if (ioc->sh) {
+ SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
+ int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
+ int scsidir = 0;
+ int dataSize;
+ u32 id;
+
+ id = (ioc->devices_per_bus == 0) ? 256 : ioc->devices_per_bus;
+ if (pScsiReq->TargetID > id) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "Target ID out of bounds. \n",
+ ioc->name, __FILE__, __LINE__);
+ rc = -ENODEV;
+ goto done_free_mem;
+ }
+
+ if (pScsiReq->Bus >= ioc->number_of_buses) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "Target Bus out of bounds. \n",
+ ioc->name, __FILE__, __LINE__);
+ rc = -ENODEV;
+ goto done_free_mem;
+ }
+
+ pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
+ pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
+
+
+ /* verify that app has not requested
+ * more sense data than driver
+ * can provide, if so, reset this parameter
+ * set the sense buffer pointer low address
+ * update the control field to specify Q type
+ */
+ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
+ pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+ else
+ pScsiReq->SenseBufferLength = karg.maxSenseBytes;
+
+ pScsiReq->SenseBufferLowAddr =
+ cpu_to_le32(ioc->sense_buf_low_dma
+ + (req_idx * MPT_SENSE_BUFFER_ALLOC));
+
+ shost_for_each_device(sdev, ioc->sh) {
+ struct scsi_target *starget = scsi_target(sdev);
+ VirtTarget *vtarget = starget->hostdata;
+
+ if (vtarget == NULL)
+ continue;
+
+ if ((pScsiReq->TargetID == vtarget->id) &&
+ (pScsiReq->Bus == vtarget->channel) &&
+ (vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
+ qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
+ }
+
+ /* Have the IOCTL driver set the direction based
+ * on the dataOutSize (ordering issue with Sparc).
+ */
+ if (karg.dataOutSize > 0) {
+ scsidir = MPI_SCSIIO_CONTROL_WRITE;
+ dataSize = karg.dataOutSize;
+ } else {
+ scsidir = MPI_SCSIIO_CONTROL_READ;
+ dataSize = karg.dataInSize;
+ }
+
+ pScsiReq->Control = cpu_to_le32(scsidir | qtag);
+ pScsiReq->DataLength = cpu_to_le32(dataSize);
+
+
+ } else {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "SCSI driver is not loaded. \n",
+ ioc->name, __FILE__, __LINE__);
+ rc = -EFAULT;
+ goto done_free_mem;
+ }
+ break;
+
+ case MPI_FUNCTION_SMP_PASSTHROUGH:
+ /* Check mf->PassthruFlags to determine if
+ * transfer is ImmediateMode or not.
+ * Immediate mode returns data in the ReplyFrame.
+ * Else, we are sending request and response data
+ * in two SGLs at the end of the mf.
+ */
+ break;
+
+ case MPI_FUNCTION_SATA_PASSTHROUGH:
+ if (!ioc->sh) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "SCSI driver is not loaded. \n",
+ ioc->name, __FILE__, __LINE__);
+ rc = -EFAULT;
+ goto done_free_mem;
+ }
+ break;
+
+ case MPI_FUNCTION_RAID_ACTION:
+ /* Just add a SGE
+ */
+ break;
+
+ case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ if (ioc->sh) {
+ SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
+ int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
+ int scsidir = MPI_SCSIIO_CONTROL_READ;
+ int dataSize;
+
+ pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
+ pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
+
+
+ /* verify that app has not requested
+ * more sense data than driver
+ * can provide, if so, reset this parameter
+ * set the sense buffer pointer low address
+ * update the control field to specify Q type
+ */
+ if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
+ pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+ else
+ pScsiReq->SenseBufferLength = karg.maxSenseBytes;
+
+ pScsiReq->SenseBufferLowAddr =
+ cpu_to_le32(ioc->sense_buf_low_dma
+ + (req_idx * MPT_SENSE_BUFFER_ALLOC));
+
+ /* All commands to physical devices are tagged
+ */
+
+ /* Have the IOCTL driver set the direction based
+ * on the dataOutSize (ordering issue with Sparc).
+ */
+ if (karg.dataOutSize > 0) {
+ scsidir = MPI_SCSIIO_CONTROL_WRITE;
+ dataSize = karg.dataOutSize;
+ } else {
+ scsidir = MPI_SCSIIO_CONTROL_READ;
+ dataSize = karg.dataInSize;
+ }
+
+ pScsiReq->Control = cpu_to_le32(scsidir | qtag);
+ pScsiReq->DataLength = cpu_to_le32(dataSize);
+
+ } else {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "SCSI driver is not loaded. \n",
+ ioc->name, __FILE__, __LINE__);
+ rc = -EFAULT;
+ goto done_free_mem;
+ }
+ break;
+
+ case MPI_FUNCTION_SCSI_TASK_MGMT:
+ {
+ SCSITaskMgmt_t *pScsiTm;
+ pScsiTm = (SCSITaskMgmt_t *)mf;
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "\tTaskType=0x%x MsgFlags=0x%x "
+ "TaskMsgContext=0x%x id=%d channel=%d\n",
+ ioc->name, pScsiTm->TaskType, le32_to_cpu
+ (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
+ pScsiTm->TargetID, pScsiTm->Bus));
+ break;
+ }
+
+ case MPI_FUNCTION_IOC_INIT:
+ {
+ IOCInit_t *pInit = (IOCInit_t *) mf;
+ u32 high_addr, sense_high;
+
+ /* Verify that all entries in the IOC INIT match
+ * existing setup (and in LE format).
+ */
+ if (sizeof(dma_addr_t) == sizeof(u64)) {
+ high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32));
+ sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
+ } else {
+ high_addr = 0;
+ sense_high= 0;
+ }
+
+ if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) ||
+ (pInit->MaxBuses != ioc->facts.MaxBuses) ||
+ (pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) ||
+ (pInit->HostMfaHighAddr != high_addr) ||
+ (pInit->SenseBufferHighAddr != sense_high)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n",
+ ioc->name, __FILE__, __LINE__);
+ rc = -EFAULT;
+ goto done_free_mem;
+ }
+ }
+ break;
+ default:
+ /*
+ * MPI_FUNCTION_PORT_ENABLE
+ * MPI_FUNCTION_TARGET_CMD_BUFFER_POST
+ * MPI_FUNCTION_TARGET_ASSIST
+ * MPI_FUNCTION_TARGET_STATUS_SEND
+ * MPI_FUNCTION_TARGET_MODE_ABORT
+ * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET
+ * MPI_FUNCTION_IO_UNIT_RESET
+ * MPI_FUNCTION_HANDSHAKE
+ * MPI_FUNCTION_REPLY_FRAME_REMOVAL
+ * MPI_FUNCTION_EVENT_NOTIFICATION
+ * (driver handles event notification)
+ * MPI_FUNCTION_EVENT_ACK
+ */
+
+ /* What to do with these??? CHECK ME!!!
+ MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
+ MPI_FUNCTION_FC_LINK_SRVC_RSP
+ MPI_FUNCTION_FC_ABORT
+ MPI_FUNCTION_LAN_SEND
+ MPI_FUNCTION_LAN_RECEIVE
+ MPI_FUNCTION_LAN_RESET
+ */
+
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "Illegal request (function 0x%x) \n",
+ ioc->name, __FILE__, __LINE__, hdr->Function);
+ rc = -EFAULT;
+ goto done_free_mem;
+ }
+
+ /* Add the SGL ( at most one data in SGE and one data out SGE )
+ * In the case of two SGE's - the data out (write) will always
+ * preceede the data in (read) SGE. psgList is used to free the
+ * allocated memory.
+ */
+ psge = (char *) (((int *) mf) + karg.dataSgeOffset);
+ flagsLength = 0;
+
+ if (karg.dataOutSize > 0)
+ sgSize ++;
+
+ if (karg.dataInSize > 0)
+ sgSize ++;
+
+ if (sgSize > 0) {
+
+ /* Set up the dataOut memory allocation */
+ if (karg.dataOutSize > 0) {
+ if (karg.dataInSize > 0) {
+ flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_END_OF_BUFFER |
+ MPI_SGE_FLAGS_DIRECTION)
+ << MPI_SGE_FLAGS_SHIFT;
+ } else {
+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
+ }
+ flagsLength |= karg.dataOutSize;
+ bufOut.len = karg.dataOutSize;
+ bufOut.kptr = pci_alloc_consistent(
+ ioc->pcidev, bufOut.len, &dma_addr_out);
+
+ if (bufOut.kptr == NULL) {
+ rc = -ENOMEM;
+ goto done_free_mem;
+ } else {
+ /* Set up this SGE.
+ * Copy to MF and to sglbuf
+ */
+ ioc->add_sge(psge, flagsLength, dma_addr_out);
+ psge += ioc->SGE_size;
+
+ /* Copy user data to kernel space.
+ */
+ if (copy_from_user(bufOut.kptr,
+ karg.dataOutBufPtr,
+ bufOut.len)) {
+ printk(MYIOC_s_ERR_FMT
+ "%s@%d::mptctl_do_mpt_command - Unable "
+ "to read user data "
+ "struct @ %p\n",
+ ioc->name, __FILE__, __LINE__,karg.dataOutBufPtr);
+ rc = -EFAULT;
+ goto done_free_mem;
+ }
+ }
+ }
+
+ if (karg.dataInSize > 0) {
+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
+ flagsLength |= karg.dataInSize;
+
+ bufIn.len = karg.dataInSize;
+ bufIn.kptr = pci_alloc_consistent(ioc->pcidev,
+ bufIn.len, &dma_addr_in);
+
+ if (bufIn.kptr == NULL) {
+ rc = -ENOMEM;
+ goto done_free_mem;
+ } else {
+ /* Set up this SGE
+ * Copy to MF and to sglbuf
+ */
+ ioc->add_sge(psge, flagsLength, dma_addr_in);
+ }
+ }
+ } else {
+ /* Add a NULL SGE
+ */
+ ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
+ }
+
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
+ if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
+
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ goto done_free_mem;
+ }
+
+ DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
+
+ if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
+ (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
+ mpt_put_msg_frame_hi_pri(mptctl_id, ioc, mf);
+ else {
+ rc =mpt_send_handshake_request(mptctl_id, ioc,
+ sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
+ if (rc != 0) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "send_handshake FAILED! (ioc %p, mf %p)\n",
+ ioc->name, ioc, mf));
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ rc = -ENODATA;
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ goto done_free_mem;
+ }
+ }
+
+ } else
+ mpt_put_msg_frame(mptctl_id, ioc, mf);
+
+ /* Now wait for the command to complete */
+ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
+retry_wait:
+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
+ HZ*timeout);
+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ rc = -ETIME;
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
+ ioc->name, __func__));
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ goto done_free_mem;
+ }
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT
+ "mpt cmd timeout, doorbell=0x%08x"
+ " function=0x%x\n",
+ ioc->name, mpt_GetIocState(ioc, 0), function);
+ if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ mptctl_timeout_expired(ioc, mf);
+ mf = NULL;
+ } else
+ goto retry_wait;
+ goto done_free_mem;
+ }
+
+ if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+
+
+ mf = NULL;
+
+ /* If a valid reply frame, copy to the user.
+ * Offset 2: reply length in U32's
+ */
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
+ if (karg.maxReplyBytes < ioc->reply_sz) {
+ sz = min(karg.maxReplyBytes,
+ 4*ioc->ioctl_cmds.reply[2]);
+ } else {
+ sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
+ }
+ if (sz > 0) {
+ if (copy_to_user(karg.replyFrameBufPtr,
+ ioc->ioctl_cmds.reply, sz)){
+ printk(MYIOC_s_ERR_FMT
+ "%s@%d::mptctl_do_mpt_command - "
+ "Unable to write out reply frame %p\n",
+ ioc->name, __FILE__, __LINE__, karg.replyFrameBufPtr);
+ rc = -ENODATA;
+ goto done_free_mem;
+ }
+ }
+ }
+
+ /* If valid sense data, copy to user.
+ */
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
+ sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
+ if (sz > 0) {
+ if (copy_to_user(karg.senseDataPtr,
+ ioc->ioctl_cmds.sense, sz)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "Unable to write sense data to user %p\n",
+ ioc->name, __FILE__, __LINE__,
+ karg.senseDataPtr);
+ rc = -ENODATA;
+ goto done_free_mem;
+ }
+ }
+ }
+
+ /* If the overall status is _GOOD and data in, copy data
+ * to user.
+ */
+ if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
+ (karg.dataInSize > 0) && (bufIn.kptr)) {
+
+ if (copy_to_user(karg.dataInBufPtr,
+ bufIn.kptr, karg.dataInSize)) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
+ "Unable to write data to user %p\n",
+ ioc->name, __FILE__, __LINE__,
+ karg.dataInBufPtr);
+ rc = -ENODATA;
+ }
+ }
+
+done_free_mem:
+
+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
+
+ /* Free the allocated memory.
+ */
+ if (bufOut.kptr != NULL) {
+ pci_free_consistent(ioc->pcidev,
+ bufOut.len, (void *) bufOut.kptr, dma_addr_out);
+ }
+
+ if (bufIn.kptr != NULL) {
+ pci_free_consistent(ioc->pcidev,
+ bufIn.len, (void *) bufIn.kptr, dma_addr_in);
+ }
+
+ /* mf is null if command issued successfully
+ * otherwise, failure occurred after mf acquired.
+ */
+ if (mf)
+ mpt_free_msg_frame(ioc, mf);
+
+ return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Prototype Routine for the HOST INFO command.
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -EBUSY if previous command timeout and IOC reset is not complete.
+ * -ENODEV if no such device/adapter
+ * -ETIME if timer expires
+ * -ENOMEM if memory allocation error
+ */
+static int
+mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+{
+ hp_host_info_t __user *uarg = (void __user *) arg;
+ MPT_ADAPTER *ioc;
+ struct pci_dev *pdev;
+ char *pbuf=NULL;
+ dma_addr_t buf_dma;
+ hp_host_info_t karg;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ int iocnum;
+ int rc, cim_rev;
+ ToolboxIstwiReadWriteRequest_t *IstwiRWRequest;
+ MPT_FRAME_HDR *mf = NULL;
+ MPIHeader_t *mpi_hdr;
+ unsigned long timeleft;
+ int retval;
+
+ /* Reset long to int. Should affect IA64 and SPARC only
+ */
+ if (data_size == sizeof(hp_host_info_t))
+ cim_rev = 1;
+ else if (data_size == sizeof(hp_host_info_rev0_t))
+ cim_rev = 0; /* obsolete */
+ else
+ return -EFAULT;
+
+ if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_host_info - "
+ "Unable to read in hp_host_info struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": mptctl_hp_hostinfo called.\n",
+ ioc->name));
+
+ /* Fill in the data and return the structure to the calling
+ * program
+ */
+ pdev = (struct pci_dev *) ioc->pcidev;
+
+ karg.vendor = pdev->vendor;
+ karg.device = pdev->device;
+ karg.subsystem_id = pdev->subsystem_device;
+ karg.subsystem_vendor = pdev->subsystem_vendor;
+ karg.devfn = pdev->devfn;
+ karg.bus = pdev->bus->number;
+
+ /* Save the SCSI host no. if
+ * SCSI driver loaded
+ */
+ if (ioc->sh != NULL)
+ karg.host_no = ioc->sh->host_no;
+ else
+ karg.host_no = -1;
+
+ /* Reformat the fw_version into a string
+ */
+ karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
+ ((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
+ karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
+ karg.fw_version[2] = '.';
+ karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
+ ((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
+ karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
+ karg.fw_version[5] = '.';
+ karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
+ ((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
+ karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
+ karg.fw_version[8] = '.';
+ karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
+ ((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
+ karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
+ karg.fw_version[11] = '\0';
+
+ /* Issue a config request to get the device serial number
+ */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = 10;
+
+ strncpy(karg.serial_number, " ", 24);
+ if (mpt_config(ioc, &cfg) == 0) {
+ if (cfg.cfghdr.hdr->PageLength > 0) {
+ /* Issue the second config page request */
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+ if (pbuf) {
+ cfg.physAddr = buf_dma;
+ if (mpt_config(ioc, &cfg) == 0) {
+ ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
+ if (strlen(pdata->BoardTracerNumber) > 1) {
+ strncpy(karg.serial_number, pdata->BoardTracerNumber, 24);
+ karg.serial_number[24-1]='\0';
+ }
+ }
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+ pbuf = NULL;
+ }
+ }
+ }
+ rc = mpt_GetIocState(ioc, 1);
+ switch (rc) {
+ case MPI_IOC_STATE_OPERATIONAL:
+ karg.ioc_status = HP_STATUS_OK;
+ break;
+
+ case MPI_IOC_STATE_FAULT:
+ karg.ioc_status = HP_STATUS_FAILED;
+ break;
+
+ case MPI_IOC_STATE_RESET:
+ case MPI_IOC_STATE_READY:
+ default:
+ karg.ioc_status = HP_STATUS_OTHER;
+ break;
+ }
+
+ karg.base_io_addr = pci_resource_start(pdev, 0);
+
+ if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
+ karg.bus_phys_width = HP_BUS_WIDTH_UNK;
+ else
+ karg.bus_phys_width = HP_BUS_WIDTH_16;
+
+ karg.hard_resets = 0;
+ karg.soft_resets = 0;
+ karg.timeouts = 0;
+ if (ioc->sh != NULL) {
+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+
+ if (hd && (cim_rev == 1)) {
+ karg.hard_resets = ioc->hard_resets;
+ karg.soft_resets = ioc->soft_resets;
+ karg.timeouts = ioc->timeouts;
+ }
+ }
+
+ /*
+ * Gather ISTWI(Industry Standard Two Wire Interface) Data
+ */
+ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "%s, no msg frames!!\n", ioc->name, __func__));
+ goto out;
+ }
+
+ IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
+ mpi_hdr = (MPIHeader_t *) mf;
+ memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
+ IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
+ IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
+ IstwiRWRequest->MsgContext = mpi_hdr->MsgContext;
+ IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
+ IstwiRWRequest->NumAddressBytes = 0x01;
+ IstwiRWRequest->DataLength = cpu_to_le16(0x04);
+ if (pdev->devfn & 1)
+ IstwiRWRequest->DeviceAddr = 0xB2;
+ else
+ IstwiRWRequest->DeviceAddr = 0xB0;
+
+ pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
+ if (!pbuf)
+ goto out;
+ ioc->add_sge((char *)&IstwiRWRequest->SGL,
+ (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
+
+ retval = 0;
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
+ IstwiRWRequest->MsgContext);
+ INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
+ mpt_put_msg_frame(mptctl_id, ioc, mf);
+
+retry_wait:
+ timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
+ HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
+ if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ retval = -ETIME;
+ printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ mpt_free_msg_frame(ioc, mf);
+ goto out;
+ }
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT
+ "HOST INFO command timeout, doorbell=0x%08x\n",
+ ioc->name, mpt_GetIocState(ioc, 0));
+ mptctl_timeout_expired(ioc, mf);
+ } else
+ goto retry_wait;
+ goto out;
+ }
+
+ /*
+ *ISTWI Data Definition
+ * pbuf[0] = FW_VERSION = 0x4
+ * pbuf[1] = Bay Count = 6 or 4 or 2, depending on
+ * the config, you should be seeing one out of these three values
+ * pbuf[2] = Drive Installed Map = bit pattern depend on which
+ * bays have drives in them
+ * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
+ */
+ if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
+ karg.rsvd = *(u32 *)pbuf;
+
+ out:
+ CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
+ SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
+
+ if (pbuf)
+ pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
+
+ /* Copy the data from kernel memory to user memory
+ */
+ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hpgethostinfo - "
+ "Unable to write out hp_host_info @ %p\n",
+ ioc->name, __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ return 0;
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Prototype Routine for the TARGET INFO command.
+ *
+ * Outputs: None.
+ * Return: 0 if successful
+ * -EFAULT if data unavailable
+ * -EBUSY if previous command timeout and IOC reset is not complete.
+ * -ENODEV if no such device/adapter
+ * -ETIME if timer expires
+ * -ENOMEM if memory allocation error
+ */
+static int
+mptctl_hp_targetinfo(unsigned long arg)
+{
+ hp_target_info_t __user *uarg = (void __user *) arg;
+ SCSIDevicePage0_t *pg0_alloc;
+ SCSIDevicePage3_t *pg3_alloc;
+ MPT_ADAPTER *ioc;
+ MPT_SCSI_HOST *hd = NULL;
+ hp_target_info_t karg;
+ int iocnum;
+ int data_sz;
+ dma_addr_t page_dma;
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ int tmp, np, rc = 0;
+
+ if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) {
+ printk(KERN_ERR MYNAM "%s@%d::mptctl_hp_targetinfo - "
+ "Unable to read in hp_host_targetinfo struct @ %p\n",
+ __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+ (ioc == NULL)) {
+ printk(KERN_DEBUG MYNAM "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
+ __FILE__, __LINE__, iocnum);
+ return -ENODEV;
+ }
+ dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
+ ioc->name));
+
+ /* There is nothing to do for FCP parts.
+ */
+ if ((ioc->bus_type == SAS) || (ioc->bus_type == FC))
+ return 0;
+
+ if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL))
+ return 0;
+
+ if (ioc->sh->host_no != karg.hdr.host)
+ return -ENODEV;
+
+ /* Get the data transfer speeds
+ */
+ data_sz = ioc->spi_data.sdp0length * 4;
+ pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ if (pg0_alloc) {
+ hdr.PageVersion = ioc->spi_data.sdp0version;
+ hdr.PageLength = data_sz;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+ cfg.cfghdr.hdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.dir = 0;
+ cfg.timeout = 0;
+ cfg.physAddr = page_dma;
+
+ cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
+
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ np = le32_to_cpu(pg0_alloc->NegotiatedParameters);
+ karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ?
+ HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8;
+
+ if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) {
+ tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
+ if (tmp < 0x09)
+ karg.negotiated_speed = HP_DEV_SPEED_ULTRA320;
+ else if (tmp <= 0x09)
+ karg.negotiated_speed = HP_DEV_SPEED_ULTRA160;
+ else if (tmp <= 0x0A)
+ karg.negotiated_speed = HP_DEV_SPEED_ULTRA2;
+ else if (tmp <= 0x0C)
+ karg.negotiated_speed = HP_DEV_SPEED_ULTRA;
+ else if (tmp <= 0x25)
+ karg.negotiated_speed = HP_DEV_SPEED_FAST;
+ else
+ karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
+ } else
+ karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
+ }
+
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma);
+ }
+
+ /* Set defaults
+ */
+ karg.message_rejects = -1;
+ karg.phase_errors = -1;
+ karg.parity_errors = -1;
+ karg.select_timeouts = -1;
+
+ /* Get the target error parameters
+ */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 3;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+ cfg.cfghdr.hdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.timeout = 0;
+ cfg.physAddr = -1;
+ if ((mpt_config(ioc, &cfg) == 0) && (cfg.cfghdr.hdr->PageLength > 0)) {
+ /* Issue the second config page request */
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
+ pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
+ ioc->pcidev, data_sz, &page_dma);
+ if (pg3_alloc) {
+ cfg.physAddr = page_dma;
+ cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount);
+ karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount);
+ karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount);
+ }
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma);
+ }
+ }
+ hd = shost_priv(ioc->sh);
+ if (hd != NULL)
+ karg.select_timeouts = hd->sel_timeout[karg.hdr.id];
+
+ /* Copy the data from kernel memory to user memory
+ */
+ if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) {
+ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_hp_target_info - "
+ "Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
+ ioc->name, __FILE__, __LINE__, uarg);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+static const struct file_operations mptctl_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .fasync = mptctl_fasync,
+ .unlocked_ioctl = mptctl_ioctl,
+ .release = mptctl_release,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_mpctl_ioctl,
+#endif
+};
+
+static struct miscdevice mptctl_miscdev = {
+ MPT_MINOR,
+ MYNAM,
+ &mptctl_fops
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#ifdef CONFIG_COMPAT
+
+static int
+compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mpt_fw_xfer32 kfw32;
+ struct mpt_fw_xfer kfw;
+ MPT_ADAPTER *iocp = NULL;
+ int iocnum, iocnumX;
+ int nonblock = (filp->f_flags & O_NONBLOCK);
+ int ret;
+
+
+ if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32)))
+ return -EFAULT;
+
+ /* Verify intended MPT adapter */
+ iocnumX = kfw32.iocnum & 0xFF;
+ if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
+ (iocp == NULL)) {
+ printk(KERN_DEBUG MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n",
+ __LINE__, iocnumX);
+ return -ENODEV;
+ }
+
+ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
+ return ret;
+
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mptfwxfer_ioctl() called\n",
+ iocp->name));
+ kfw.iocnum = iocnum;
+ kfw.fwlen = kfw32.fwlen;
+ kfw.bufp = compat_ptr(kfw32.bufp);
+
+ ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+
+ mutex_unlock(&iocp->ioctl_cmds.mutex);
+
+ return ret;
+}
+
+static int
+compat_mpt_command(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct mpt_ioctl_command32 karg32;
+ struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg;
+ struct mpt_ioctl_command karg;
+ MPT_ADAPTER *iocp = NULL;
+ int iocnum, iocnumX;
+ int nonblock = (filp->f_flags & O_NONBLOCK);
+ int ret;
+
+ if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32)))
+ return -EFAULT;
+
+ /* Verify intended MPT adapter */
+ iocnumX = karg32.hdr.iocnum & 0xFF;
+ if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
+ (iocp == NULL)) {
+ printk(KERN_DEBUG MYNAM "::compat_mpt_command @%d - ioc%d not found!\n",
+ __LINE__, iocnumX);
+ return -ENODEV;
+ }
+
+ if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
+ return ret;
+
+ dctlprintk(iocp, printk(MYIOC_s_DEBUG_FMT "compat_mpt_command() called\n",
+ iocp->name));
+ /* Copy data to karg */
+ karg.hdr.iocnum = karg32.hdr.iocnum;
+ karg.hdr.port = karg32.hdr.port;
+ karg.timeout = karg32.timeout;
+ karg.maxReplyBytes = karg32.maxReplyBytes;
+
+ karg.dataInSize = karg32.dataInSize;
+ karg.dataOutSize = karg32.dataOutSize;
+ karg.maxSenseBytes = karg32.maxSenseBytes;
+ karg.dataSgeOffset = karg32.dataSgeOffset;
+
+ karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr;
+ karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr;
+ karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr;
+ karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr;
+
+ /* Pass new structure to do_mpt_command
+ */
+ ret = mptctl_do_mpt_command (karg, &uarg->MF);
+
+ mutex_unlock(&iocp->ioctl_cmds.mutex);
+
+ return ret;
+}
+
+static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ mutex_lock(&mpctl_mutex);
+ switch (cmd) {
+ case MPTIOCINFO:
+ case MPTIOCINFO1:
+ case MPTIOCINFO2:
+ case MPTTARGETINFO:
+ case MPTEVENTQUERY:
+ case MPTEVENTENABLE:
+ case MPTEVENTREPORT:
+ case MPTHARDRESET:
+ case HP_GETHOSTINFO:
+ case HP_GETTARGETINFO:
+ case MPTTEST:
+ ret = __mptctl_ioctl(f, cmd, arg);
+ break;
+ case MPTCOMMAND32:
+ ret = compat_mpt_command(f, cmd, arg);
+ break;
+ case MPTFWDOWNLOAD32:
+ ret = compat_mptfwxfer_ioctl(f, cmd, arg);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ mutex_unlock(&mpctl_mutex);
+ return ret;
+}
+
+#endif
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptctl_probe - Installs ioctl devices per bus.
+ * @pdev: Pointer to pci_dev structure
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+
+static int
+mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+ mutex_init(&ioc->ioctl_cmds.mutex);
+ init_completion(&ioc->ioctl_cmds.done);
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptctl_remove - Removed ioctl devices
+ * @pdev: Pointer to pci_dev structure
+ *
+ *
+ */
+static void
+mptctl_remove(struct pci_dev *pdev)
+{
+}
+
+static struct mpt_pci_driver mptctl_driver = {
+ .probe = mptctl_probe,
+ .remove = mptctl_remove,
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int __init mptctl_init(void)
+{
+ int err;
+ int where = 1;
+
+ show_mptmod_ver(my_NAME, my_VERSION);
+
+ mpt_device_driver_register(&mptctl_driver, MPTCTL_DRIVER);
+
+ /* Register this device */
+ err = misc_register(&mptctl_miscdev);
+ if (err < 0) {
+ printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR);
+ goto out_fail;
+ }
+ printk(KERN_INFO MYNAM ": Registered with Fusion MPT base driver\n");
+ printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n",
+ mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+
+ /*
+ * Install our handler
+ */
+ ++where;
+ mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER,
+ "mptctl_reply");
+ if (!mptctl_id || mptctl_id >= MPT_MAX_PROTOCOL_DRIVERS) {
+ printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
+ misc_deregister(&mptctl_miscdev);
+ err = -EBUSY;
+ goto out_fail;
+ }
+
+ mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER,
+ "mptctl_taskmgmt_reply");
+ if (!mptctl_taskmgmt_id || mptctl_taskmgmt_id >= MPT_MAX_PROTOCOL_DRIVERS) {
+ printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
+ mpt_deregister(mptctl_id);
+ misc_deregister(&mptctl_miscdev);
+ err = -EBUSY;
+ goto out_fail;
+ }
+
+ mpt_reset_register(mptctl_id, mptctl_ioc_reset);
+ mpt_event_register(mptctl_id, mptctl_event_process);
+
+ return 0;
+
+out_fail:
+
+ mpt_device_driver_deregister(MPTCTL_DRIVER);
+
+ return err;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static void mptctl_exit(void)
+{
+ misc_deregister(&mptctl_miscdev);
+ printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
+ mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+
+ /* De-register event handler from base module */
+ mpt_event_deregister(mptctl_id);
+
+ /* De-register reset handler from base module */
+ mpt_reset_deregister(mptctl_id);
+
+ /* De-register callback handler from base module */
+ mpt_deregister(mptctl_taskmgmt_id);
+ mpt_deregister(mptctl_id);
+
+ mpt_device_driver_deregister(MPTCTL_DRIVER);
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+module_init(mptctl_init);
+module_exit(mptctl_exit);
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
new file mode 100644
index 00000000..d564cc9a
--- /dev/null
+++ b/drivers/message/fusion/mptctl.h
@@ -0,0 +1,467 @@
+/*
+ * linux/drivers/message/fusion/mptioctl.h
+ * Fusion MPT misc device (ioctl) driver.
+ * For use with PCI chip/adapter(s):
+ * LSIFC9xx/LSI409xx Fibre Channel
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef MPTCTL_H_INCLUDED
+#define MPTCTL_H_INCLUDED
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *
+ */
+#define MPT_MISCDEV_BASENAME "mptctl"
+#define MPT_MISCDEV_PATHNAME "/dev/" MPT_MISCDEV_BASENAME
+
+#define MPT_PRODUCT_LENGTH 12
+
+/*
+ * Generic MPT Control IOCTLs and structures
+ */
+#define MPT_MAGIC_NUMBER 'm'
+
+#define MPTRWPERF _IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w)
+
+#define MPTFWDOWNLOAD _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer)
+#define MPTCOMMAND _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command)
+
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+#define MPTFWDOWNLOAD32 _IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32)
+#define MPTCOMMAND32 _IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command32)
+#endif
+
+#define MPTIOCINFO _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo)
+#define MPTIOCINFO1 _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev0)
+#define MPTIOCINFO2 _IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev1)
+#define MPTTARGETINFO _IOWR(MPT_MAGIC_NUMBER,18,struct mpt_ioctl_targetinfo)
+#define MPTTEST _IOWR(MPT_MAGIC_NUMBER,19,struct mpt_ioctl_test)
+#define MPTEVENTQUERY _IOWR(MPT_MAGIC_NUMBER,21,struct mpt_ioctl_eventquery)
+#define MPTEVENTENABLE _IOWR(MPT_MAGIC_NUMBER,22,struct mpt_ioctl_eventenable)
+#define MPTEVENTREPORT _IOWR(MPT_MAGIC_NUMBER,23,struct mpt_ioctl_eventreport)
+#define MPTHARDRESET _IOWR(MPT_MAGIC_NUMBER,24,struct mpt_ioctl_diag_reset)
+#define MPTFWREPLACE _IOWR(MPT_MAGIC_NUMBER,25,struct mpt_ioctl_replace_fw)
+
+/*
+ * SPARC PLATFORM REMARKS:
+ * IOCTL data structures that contain pointers
+ * will have different sizes in the driver and applications
+ * (as the app. will not use 8-byte pointers).
+ * Apps should use MPTFWDOWNLOAD and MPTCOMMAND.
+ * The driver will convert data from
+ * mpt_fw_xfer32 (mpt_ioctl_command32) to mpt_fw_xfer (mpt_ioctl_command)
+ * internally.
+ *
+ * If data structures change size, must handle as in IOCGETINFO.
+ */
+struct mpt_fw_xfer {
+ unsigned int iocnum; /* IOC unit number */
+ unsigned int fwlen;
+ void __user *bufp; /* Pointer to firmware buffer */
+};
+
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+struct mpt_fw_xfer32 {
+ unsigned int iocnum;
+ unsigned int fwlen;
+ u32 bufp;
+};
+#endif /*}*/
+
+/*
+ * IOCTL header structure.
+ * iocnum - must be defined.
+ * port - must be defined for all IOCTL commands other than MPTIOCINFO
+ * maxDataSize - ignored on MPTCOMMAND commands
+ * - ignored on MPTFWREPLACE commands
+ * - on query commands, reports the maximum number of bytes to be returned
+ * to the host driver (count includes the header).
+ * That is, set to sizeof(struct mpt_ioctl_iocinfo) for fixed sized commands.
+ * Set to sizeof(struct mpt_ioctl_targetinfo) + datasize for variable
+ * sized commands. (MPTTARGETINFO, MPTEVENTREPORT)
+ */
+typedef struct _mpt_ioctl_header {
+ unsigned int iocnum; /* IOC unit number */
+ unsigned int port; /* IOC port number */
+ int maxDataSize; /* Maximum Num. bytes to transfer on read */
+} mpt_ioctl_header;
+
+/*
+ * Issue a diagnostic reset
+ */
+struct mpt_ioctl_diag_reset {
+ mpt_ioctl_header hdr;
+};
+
+
+/*
+ * PCI bus/device/function information structure.
+ */
+struct mpt_ioctl_pci_info {
+ union {
+ struct {
+ unsigned int deviceNumber : 5;
+ unsigned int functionNumber : 3;
+ unsigned int busNumber : 24;
+ } bits;
+ unsigned int asUlong;
+ } u;
+};
+
+struct mpt_ioctl_pci_info2 {
+ union {
+ struct {
+ unsigned int deviceNumber : 5;
+ unsigned int functionNumber : 3;
+ unsigned int busNumber : 24;
+ } bits;
+ unsigned int asUlong;
+ } u;
+ int segmentID;
+};
+
+/*
+ * Adapter Information Page
+ * Read only.
+ * Data starts at offset 0xC
+ */
+#define MPT_IOCTL_INTERFACE_SCSI (0x00)
+#define MPT_IOCTL_INTERFACE_FC (0x01)
+#define MPT_IOCTL_INTERFACE_FC_IP (0x02)
+#define MPT_IOCTL_INTERFACE_SAS (0x03)
+#define MPT_IOCTL_VERSION_LENGTH (32)
+
+struct mpt_ioctl_iocinfo {
+ mpt_ioctl_header hdr;
+ int adapterType; /* SCSI or FCP */
+ int port; /* port number */
+ int pciId; /* PCI Id. */
+ int hwRev; /* hardware revision */
+ int subSystemDevice; /* PCI subsystem Device ID */
+ int subSystemVendor; /* PCI subsystem Vendor ID */
+ int numDevices; /* number of devices */
+ int FWVersion; /* FW Version (integer) */
+ int BIOSVersion; /* BIOS Version (integer) */
+ char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */
+ char busChangeEvent;
+ char hostId;
+ char rsvd[2];
+ struct mpt_ioctl_pci_info2 pciInfo; /* Added Rev 2 */
+};
+
+struct mpt_ioctl_iocinfo_rev1 {
+ mpt_ioctl_header hdr;
+ int adapterType; /* SCSI or FCP */
+ int port; /* port number */
+ int pciId; /* PCI Id. */
+ int hwRev; /* hardware revision */
+ int subSystemDevice; /* PCI subsystem Device ID */
+ int subSystemVendor; /* PCI subsystem Vendor ID */
+ int numDevices; /* number of devices */
+ int FWVersion; /* FW Version (integer) */
+ int BIOSVersion; /* BIOS Version (integer) */
+ char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */
+ char busChangeEvent;
+ char hostId;
+ char rsvd[2];
+ struct mpt_ioctl_pci_info pciInfo; /* Added Rev 1 */
+};
+
+/* Original structure, must always accept these
+ * IOCTLs. 4 byte pads can occur based on arch with
+ * above structure. Wish to re-align, but cannot.
+ */
+struct mpt_ioctl_iocinfo_rev0 {
+ mpt_ioctl_header hdr;
+ int adapterType; /* SCSI or FCP */
+ int port; /* port number */
+ int pciId; /* PCI Id. */
+ int hwRev; /* hardware revision */
+ int subSystemDevice; /* PCI subsystem Device ID */
+ int subSystemVendor; /* PCI subsystem Vendor ID */
+ int numDevices; /* number of devices */
+ int FWVersion; /* FW Version (integer) */
+ int BIOSVersion; /* BIOS Version (integer) */
+ char driverVersion[MPT_IOCTL_VERSION_LENGTH]; /* Driver Version (string) */
+ char busChangeEvent;
+ char hostId;
+ char rsvd[2];
+};
+
+/*
+ * Device Information Page
+ * Report the number of, and ids of, all targets
+ * on this IOC. The ids array is a packed structure
+ * of the known targetInfo.
+ * bits 31-24: reserved
+ * 23-16: LUN
+ * 15- 8: Bus Number
+ * 7- 0: Target ID
+ */
+struct mpt_ioctl_targetinfo {
+ mpt_ioctl_header hdr;
+ int numDevices; /* Num targets on this ioc */
+ int targetInfo[1];
+};
+
+
+/*
+ * Event reporting IOCTL's. These IOCTL's will
+ * use the following defines:
+ */
+struct mpt_ioctl_eventquery {
+ mpt_ioctl_header hdr;
+ unsigned short eventEntries;
+ unsigned short reserved;
+ unsigned int eventTypes;
+};
+
+struct mpt_ioctl_eventenable {
+ mpt_ioctl_header hdr;
+ unsigned int eventTypes;
+};
+
+#ifndef __KERNEL__
+typedef struct {
+ uint event;
+ uint eventContext;
+ uint data[2];
+} MPT_IOCTL_EVENTS;
+#endif
+
+struct mpt_ioctl_eventreport {
+ mpt_ioctl_header hdr;
+ MPT_IOCTL_EVENTS eventData[1];
+};
+
+#define MPT_MAX_NAME 32
+struct mpt_ioctl_test {
+ mpt_ioctl_header hdr;
+ u8 name[MPT_MAX_NAME];
+ int chip_type;
+ u8 product [MPT_PRODUCT_LENGTH];
+};
+
+/* Replace the FW image cached in host driver memory
+ * newImageSize - image size in bytes
+ * newImage - first byte of the new image
+ */
+typedef struct mpt_ioctl_replace_fw {
+ mpt_ioctl_header hdr;
+ int newImageSize;
+ u8 newImage[1];
+} mpt_ioctl_replace_fw_t;
+
+/* General MPT Pass through data strucutre
+ *
+ * iocnum
+ * timeout - in seconds, command timeout. If 0, set by driver to
+ * default value.
+ * replyFrameBufPtr - reply location
+ * dataInBufPtr - destination for read
+ * dataOutBufPtr - data source for write
+ * senseDataPtr - sense data location
+ * maxReplyBytes - maximum number of reply bytes to be sent to app.
+ * dataInSize - num bytes for data transfer in (read)
+ * dataOutSize - num bytes for data transfer out (write)
+ * dataSgeOffset - offset in words from the start of the request message
+ * to the first SGL
+ * MF[1];
+ *
+ * Remark: Some config pages have bi-directional transfer,
+ * both a read and a write. The basic structure allows for
+ * a bidirectional set up. Normal messages will have one or
+ * both of these buffers NULL.
+ */
+struct mpt_ioctl_command {
+ mpt_ioctl_header hdr;
+ int timeout; /* optional (seconds) */
+ char __user *replyFrameBufPtr;
+ char __user *dataInBufPtr;
+ char __user *dataOutBufPtr;
+ char __user *senseDataPtr;
+ int maxReplyBytes;
+ int dataInSize;
+ int dataOutSize;
+ int maxSenseBytes;
+ int dataSgeOffset;
+ char MF[1];
+};
+
+/*
+ * SPARC PLATFORM: See earlier remark.
+ */
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+struct mpt_ioctl_command32 {
+ mpt_ioctl_header hdr;
+ int timeout;
+ u32 replyFrameBufPtr;
+ u32 dataInBufPtr;
+ u32 dataOutBufPtr;
+ u32 senseDataPtr;
+ int maxReplyBytes;
+ int dataInSize;
+ int dataOutSize;
+ int maxSenseBytes;
+ int dataSgeOffset;
+ char MF[1];
+};
+#endif /*}*/
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#define CPQFCTS_IOC_MAGIC 'Z'
+#define HP_IOC_MAGIC 'Z'
+#define HP_GETHOSTINFO _IOR(HP_IOC_MAGIC, 20, hp_host_info_t)
+#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)
+#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t)
+
+typedef struct _hp_header {
+ unsigned int iocnum;
+ unsigned int host;
+ unsigned int channel;
+ unsigned int id;
+ unsigned int lun;
+} hp_header_t;
+
+/*
+ * Header:
+ * iocnum required (input)
+ * host ignored
+ * channe ignored
+ * id ignored
+ * lun ignored
+ */
+typedef struct _hp_host_info {
+ hp_header_t hdr;
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_id;
+ u8 devfn;
+ u8 bus;
+ ushort host_no; /* SCSI Host number, if scsi driver not loaded*/
+ u8 fw_version[16]; /* string */
+ u8 serial_number[24]; /* string */
+ u32 ioc_status;
+ u32 bus_phys_width;
+ u32 base_io_addr;
+ u32 rsvd;
+ unsigned int hard_resets; /* driver initiated resets */
+ unsigned int soft_resets; /* ioc, external resets */
+ unsigned int timeouts; /* num timeouts */
+} hp_host_info_t;
+
+/* replace ulongs with uints, need to preserve backwards
+ * compatibility.
+ */
+typedef struct _hp_host_info_rev0 {
+ hp_header_t hdr;
+ u16 vendor;
+ u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_id;
+ u8 devfn;
+ u8 bus;
+ ushort host_no; /* SCSI Host number, if scsi driver not loaded*/
+ u8 fw_version[16]; /* string */
+ u8 serial_number[24]; /* string */
+ u32 ioc_status;
+ u32 bus_phys_width;
+ u32 base_io_addr;
+ u32 rsvd;
+ unsigned long hard_resets; /* driver initiated resets */
+ unsigned long soft_resets; /* ioc, external resets */
+ unsigned long timeouts; /* num timeouts */
+} hp_host_info_rev0_t;
+
+/*
+ * Header:
+ * iocnum required (input)
+ * host required
+ * channel required (bus number)
+ * id required
+ * lun ignored
+ *
+ * All error values between 0 and 0xFFFF in size.
+ */
+typedef struct _hp_target_info {
+ hp_header_t hdr;
+ u32 parity_errors;
+ u32 phase_errors;
+ u32 select_timeouts;
+ u32 message_rejects;
+ u32 negotiated_speed;
+ u8 negotiated_width;
+ u8 rsvd[7]; /* 8 byte alignment */
+} hp_target_info_t;
+
+#define HP_STATUS_OTHER 1
+#define HP_STATUS_OK 2
+#define HP_STATUS_FAILED 3
+
+#define HP_BUS_WIDTH_UNK 1
+#define HP_BUS_WIDTH_8 2
+#define HP_BUS_WIDTH_16 3
+#define HP_BUS_WIDTH_32 4
+
+#define HP_DEV_SPEED_ASYNC 2
+#define HP_DEV_SPEED_FAST 3
+#define HP_DEV_SPEED_ULTRA 4
+#define HP_DEV_SPEED_ULTRA2 5
+#define HP_DEV_SPEED_ULTRA160 6
+#define HP_DEV_SPEED_SCSI1 7
+#define HP_DEV_SPEED_ULTRA320 8
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#endif
+
diff --git a/drivers/message/fusion/mptdebug.h b/drivers/message/fusion/mptdebug.h
new file mode 100644
index 00000000..28e47887
--- /dev/null
+++ b/drivers/message/fusion/mptdebug.h
@@ -0,0 +1,291 @@
+/*
+ * linux/drivers/message/fusion/mptdebug.h
+ * For use with LSI PCI chip/adapter(s)
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#ifndef MPTDEBUG_H_INCLUDED
+#define MPTDEBUG_H_INCLUDED
+
+/*
+ * debug level can be programmed on the fly via SysFS (hex values)
+ *
+ * Example: (programming for MPT_DEBUG_EVENTS on host 5)
+ *
+ * echo 8 > /sys/class/scsi_host/host5/debug_level
+ *
+ * --------------------------------------------------------
+ * mpt_debug_level - command line parameter
+ * this allow enabling debug at driver load time (for all iocs)
+ *
+ * Example (programming for MPT_DEBUG_EVENTS)
+ *
+ * insmod mptbase.ko mpt_debug_level=8
+ *
+ * --------------------------------------------------------
+ * CONFIG_FUSION_LOGGING - enables compiling debug into driver
+ * this can be enabled in the driver Makefile
+ *
+ *
+ * --------------------------------------------------------
+ * Please note most debug prints are set to logging priority = debug
+ * This is the lowest level, and most verbose. Please refer to manual
+ * pages for syslogd or syslogd-ng on how to configure this.
+ */
+
+#define MPT_DEBUG 0x00000001
+#define MPT_DEBUG_MSG_FRAME 0x00000002
+#define MPT_DEBUG_SG 0x00000004
+#define MPT_DEBUG_EVENTS 0x00000008
+#define MPT_DEBUG_VERBOSE_EVENTS 0x00000010
+#define MPT_DEBUG_INIT 0x00000020
+#define MPT_DEBUG_EXIT 0x00000040
+#define MPT_DEBUG_FAIL 0x00000080
+#define MPT_DEBUG_TM 0x00000100
+#define MPT_DEBUG_DV 0x00000200
+#define MPT_DEBUG_REPLY 0x00000400
+#define MPT_DEBUG_HANDSHAKE 0x00000800
+#define MPT_DEBUG_CONFIG 0x00001000
+#define MPT_DEBUG_DL 0x00002000
+#define MPT_DEBUG_RESET 0x00008000
+#define MPT_DEBUG_SCSI 0x00010000
+#define MPT_DEBUG_IOCTL 0x00020000
+#define MPT_DEBUG_FC 0x00080000
+#define MPT_DEBUG_SAS 0x00100000
+#define MPT_DEBUG_SAS_WIDE 0x00200000
+#define MPT_DEBUG_36GB_MEM 0x00400000
+
+/*
+ * CONFIG_FUSION_LOGGING - enabled in Kconfig
+ */
+
+#ifdef CONFIG_FUSION_LOGGING
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
+{ \
+ if (IOC->debug_level & BITS) \
+ CMD; \
+}
+#else
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#endif
+
+
+/*
+ * debug macros
+ */
+
+#define dprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG)
+
+#define dsgprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG)
+
+#define devtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS)
+
+#define devtverboseprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_VERBOSE_EVENTS)
+
+#define dinitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT)
+
+#define dexitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT)
+
+#define dfailprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL)
+
+#define dtmprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM)
+
+#define ddvprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DV)
+
+#define dreplyprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY)
+
+#define dhsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE)
+
+#define dcprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG)
+
+#define ddlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL)
+
+#define drsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET)
+
+#define dsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI)
+
+#define dctlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL)
+
+#define dfcprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FC)
+
+#define dsasprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS)
+
+#define dsaswideprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
+
+#define d36memprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
+
+
+/*
+ * Verbose logging
+ */
+#if defined(MPT_DEBUG_VERBOSE) && defined(CONFIG_FUSION_LOGGING)
+static inline void
+DBG_DUMP_FW_DOWNLOAD(MPT_ADAPTER *ioc, u32 *mfp, int numfrags)
+{
+ int i;
+
+ if (!(ioc->debug_level & MPT_DEBUG))
+ return;
+ printk(KERN_DEBUG "F/W download request:\n");
+ for (i=0; i < 7+numfrags*2; i++)
+ printk(" %08x", le32_to_cpu(mfp[i]));
+ printk("\n");
+}
+
+static inline void
+DBG_DUMP_PUT_MSG_FRAME(MPT_ADAPTER *ioc, u32 *mfp)
+{
+ int ii, n;
+
+ if (!(ioc->debug_level & MPT_DEBUG_MSG_FRAME))
+ return;
+ printk(KERN_DEBUG "%s: About to Put msg frame @ %p:\n",
+ ioc->name, mfp);
+ n = ioc->req_sz/4 - 1;
+ while (mfp[n] == 0)
+ n--;
+ for (ii=0; ii<=n; ii++) {
+ if (ii && ((ii%8)==0))
+ printk("\n");
+ printk(" %08x", le32_to_cpu(mfp[ii]));
+ }
+ printk("\n");
+}
+
+static inline void
+DBG_DUMP_FW_REQUEST_FRAME(MPT_ADAPTER *ioc, u32 *mfp)
+{
+ int i, n;
+
+ if (!(ioc->debug_level & MPT_DEBUG_MSG_FRAME))
+ return;
+ n = 10;
+ printk(KERN_INFO " ");
+ for (i = 0; i < n; i++)
+ printk(" %08x", le32_to_cpu(mfp[i]));
+ printk("\n");
+}
+
+static inline void
+DBG_DUMP_REQUEST_FRAME(MPT_ADAPTER *ioc, u32 *mfp)
+{
+ int i, n;
+
+ if (!(ioc->debug_level & MPT_DEBUG_MSG_FRAME))
+ return;
+ n = 24;
+ for (i=0; i<n; i++) {
+ if (i && ((i%8)==0))
+ printk("\n");
+ printk("%08x ", le32_to_cpu(mfp[i]));
+ }
+ printk("\n");
+}
+
+static inline void
+DBG_DUMP_REPLY_FRAME(MPT_ADAPTER *ioc, u32 *mfp)
+{
+ int i, n;
+
+ if (!(ioc->debug_level & MPT_DEBUG_MSG_FRAME))
+ return;
+ n = (le32_to_cpu(mfp[0]) & 0x00FF0000) >> 16;
+ printk(KERN_INFO " ");
+ for (i=0; i<n; i++)
+ printk(" %08x", le32_to_cpu(mfp[i]));
+ printk("\n");
+}
+
+static inline void
+DBG_DUMP_REQUEST_FRAME_HDR(MPT_ADAPTER *ioc, u32 *mfp)
+{
+ int i, n;
+
+ if (!(ioc->debug_level & MPT_DEBUG_MSG_FRAME))
+ return;
+ n = 3;
+ printk(KERN_INFO " ");
+ for (i=0; i<n; i++)
+ printk(" %08x", le32_to_cpu(mfp[i]));
+ printk("\n");
+}
+
+static inline void
+DBG_DUMP_TM_REQUEST_FRAME(MPT_ADAPTER *ioc, u32 *mfp)
+{
+ int i, n;
+
+ if (!(ioc->debug_level & MPT_DEBUG_TM))
+ return;
+ n = 13;
+ printk(KERN_DEBUG "TM_REQUEST:\n");
+ for (i=0; i<n; i++) {
+ if (i && ((i%8)==0))
+ printk("\n");
+ printk("%08x ", le32_to_cpu(mfp[i]));
+ }
+ printk("\n");
+}
+
+static inline void
+DBG_DUMP_TM_REPLY_FRAME(MPT_ADAPTER *ioc, u32 *mfp)
+{
+ int i, n;
+
+ if (!(ioc->debug_level & MPT_DEBUG_TM))
+ return;
+ n = (le32_to_cpu(mfp[0]) & 0x00FF0000) >> 16;
+ printk(KERN_DEBUG "TM_REPLY MessageLength=%d:\n", n);
+ for (i=0; i<n; i++) {
+ if (i && ((i%8)==0))
+ printk("\n");
+ printk(" %08x", le32_to_cpu(mfp[i]));
+ }
+ printk("\n");
+}
+
+#define dmfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
+
+# else /* ifdef MPT_DEBUG_MF */
+
+#define DBG_DUMP_FW_DOWNLOAD(IOC, mfp, numfrags)
+#define DBG_DUMP_PUT_MSG_FRAME(IOC, mfp)
+#define DBG_DUMP_FW_REQUEST_FRAME(IOC, mfp)
+#define DBG_DUMP_REQUEST_FRAME(IOC, mfp)
+#define DBG_DUMP_REPLY_FRAME(IOC, mfp)
+#define DBG_DUMP_REQUEST_FRAME_HDR(IOC, mfp)
+#define DBG_DUMP_TM_REQUEST_FRAME(IOC, mfp)
+#define DBG_DUMP_TM_REPLY_FRAME(IOC, mfp)
+
+#define dmfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
+
+#endif /* defined(MPT_DEBUG_VERBOSE) && defined(CONFIG_FUSION_LOGGING) */
+
+#endif /* ifndef MPTDEBUG_H_INCLUDED */
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
new file mode 100644
index 00000000..d784c367
--- /dev/null
+++ b/drivers/message/fusion/mptfc.c
@@ -0,0 +1,1557 @@
+/*
+ * linux/drivers/message/fusion/mptfc.c
+ * For use with LSI PCI chip/adapter(s)
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h> /* for mdelay */
+#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/reboot.h> /* notifier code */
+#include <linux/workqueue.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "mptbase.h"
+#include "mptscsih.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME "Fusion MPT FC Host driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptfc"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(my_VERSION);
+
+/* Command line args */
+#define MPTFC_DEV_LOSS_TMO (60)
+static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */
+module_param(mptfc_dev_loss_tmo, int, 0);
+MODULE_PARM_DESC(mptfc_dev_loss_tmo, " Initial time the driver programs the "
+ " transport to wait for an rport to "
+ " return following a device loss event."
+ " Default=60.");
+
+/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+#define MPTFC_MAX_LUN (16895)
+static int max_lun = MPTFC_MAX_LUN;
+module_param(max_lun, int, 0);
+MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+
+static u8 mptfcDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptfcTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS;
+
+static int mptfc_target_alloc(struct scsi_target *starget);
+static int mptfc_slave_alloc(struct scsi_device *sdev);
+static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt);
+static void mptfc_target_destroy(struct scsi_target *starget);
+static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout);
+static void __devexit mptfc_remove(struct pci_dev *pdev);
+static int mptfc_abort(struct scsi_cmnd *SCpnt);
+static int mptfc_dev_reset(struct scsi_cmnd *SCpnt);
+static int mptfc_bus_reset(struct scsi_cmnd *SCpnt);
+static int mptfc_host_reset(struct scsi_cmnd *SCpnt);
+
+static struct scsi_host_template mptfc_driver_template = {
+ .module = THIS_MODULE,
+ .proc_name = "mptfc",
+ .proc_info = mptscsih_proc_info,
+ .name = "MPT FC Host",
+ .info = mptscsih_info,
+ .queuecommand = mptfc_qcmd,
+ .target_alloc = mptfc_target_alloc,
+ .slave_alloc = mptfc_slave_alloc,
+ .slave_configure = mptscsih_slave_configure,
+ .target_destroy = mptfc_target_destroy,
+ .slave_destroy = mptscsih_slave_destroy,
+ .change_queue_depth = mptscsih_change_queue_depth,
+ .eh_abort_handler = mptfc_abort,
+ .eh_device_reset_handler = mptfc_dev_reset,
+ .eh_bus_reset_handler = mptfc_bus_reset,
+ .eh_host_reset_handler = mptfc_host_reset,
+ .bios_param = mptscsih_bios_param,
+ .can_queue = MPT_FC_CAN_QUEUE,
+ .this_id = -1,
+ .sg_tablesize = MPT_SCSI_SG_DEPTH,
+ .max_sectors = 8192,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mptscsih_host_attrs,
+};
+
+/****************************************************************************
+ * Supported hardware
+ */
+
+static struct pci_device_id mptfc_pci_table[] = {
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC909,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC919X,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC929X,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC939X,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949X,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVICEID_FC949E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_BROCADE, MPI_MANUFACTPAGE_DEVICEID_FC949E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, mptfc_pci_table);
+
+static struct scsi_transport_template *mptfc_transport_template = NULL;
+
+static struct fc_function_template mptfc_transport_functions = {
+ .dd_fcrport_size = 8,
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_port_id = 1,
+ .show_rport_supported_classes = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = mptfc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .show_host_supported_speeds = 1,
+ .show_host_maxframe_size = 1,
+ .show_host_speed = 1,
+ .show_host_fabric_name = 1,
+ .show_host_port_type = 1,
+ .show_host_port_state = 1,
+ .show_host_symbolic_name = 1,
+};
+
+static int
+mptfc_block_error_handler(struct scsi_cmnd *SCpnt,
+ int (*func)(struct scsi_cmnd *SCpnt),
+ const char *caller)
+{
+ MPT_SCSI_HOST *hd;
+ struct scsi_device *sdev = SCpnt->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ unsigned long flags;
+ int ready;
+ MPT_ADAPTER *ioc;
+ int loops = 40; /* seconds */
+
+ hd = shost_priv(SCpnt->device->host);
+ ioc = hd->ioc;
+ spin_lock_irqsave(shost->host_lock, flags);
+ while ((ready = fc_remote_port_chkready(rport) >> 16) == DID_IMM_RETRY
+ || (loops > 0 && ioc->active == 0)) {
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
+ "mptfc_block_error_handler.%d: %d:%d, port status is "
+ "%x, active flag %d, deferring %s recovery.\n",
+ ioc->name, ioc->sh->host_no,
+ SCpnt->device->id, SCpnt->device->lun,
+ ready, ioc->active, caller));
+ msleep(1000);
+ spin_lock_irqsave(shost->host_lock, flags);
+ loops --;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (ready == DID_NO_CONNECT || !SCpnt->device->hostdata
+ || ioc->active == 0) {
+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s.%d: %d:%d, failing recovery, "
+ "port state %x, active %d, vdevice %p.\n", caller,
+ ioc->name, ioc->sh->host_no,
+ SCpnt->device->id, SCpnt->device->lun, ready,
+ ioc->active, SCpnt->device->hostdata));
+ return FAILED;
+ }
+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s.%d: %d:%d, executing recovery.\n", caller,
+ ioc->name, ioc->sh->host_no,
+ SCpnt->device->id, SCpnt->device->lun));
+ return (*func)(SCpnt);
+}
+
+static int
+mptfc_abort(struct scsi_cmnd *SCpnt)
+{
+ return
+ mptfc_block_error_handler(SCpnt, mptscsih_abort, __func__);
+}
+
+static int
+mptfc_dev_reset(struct scsi_cmnd *SCpnt)
+{
+ return
+ mptfc_block_error_handler(SCpnt, mptscsih_dev_reset, __func__);
+}
+
+static int
+mptfc_bus_reset(struct scsi_cmnd *SCpnt)
+{
+ return
+ mptfc_block_error_handler(SCpnt, mptscsih_bus_reset, __func__);
+}
+
+static int
+mptfc_host_reset(struct scsi_cmnd *SCpnt)
+{
+ return
+ mptfc_block_error_handler(SCpnt, mptscsih_host_reset, __func__);
+}
+
+static void
+mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout > 0)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = mptfc_dev_loss_tmo;
+}
+
+static int
+mptfc_FcDevPage0_cmp_func(const void *a, const void *b)
+{
+ FCDevicePage0_t **aa = (FCDevicePage0_t **)a;
+ FCDevicePage0_t **bb = (FCDevicePage0_t **)b;
+
+ if ((*aa)->CurrentBus == (*bb)->CurrentBus) {
+ if ((*aa)->CurrentTargetID == (*bb)->CurrentTargetID)
+ return 0;
+ if ((*aa)->CurrentTargetID < (*bb)->CurrentTargetID)
+ return -1;
+ return 1;
+ }
+ if ((*aa)->CurrentBus < (*bb)->CurrentBus)
+ return -1;
+ return 1;
+}
+
+static int
+mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
+ void(*func)(MPT_ADAPTER *ioc,int channel, FCDevicePage0_t *arg))
+{
+ ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ FCDevicePage0_t *ppage0_alloc, *fc;
+ dma_addr_t page0_dma;
+ int data_sz;
+ int ii;
+
+ FCDevicePage0_t *p0_array=NULL, *p_p0;
+ FCDevicePage0_t **pp0_array=NULL, **p_pp0;
+
+ int rc = -ENOMEM;
+ U32 port_id = 0xffffff;
+ int num_targ = 0;
+ int max_bus = ioc->facts.MaxBuses;
+ int max_targ;
+
+ max_targ = (ioc->facts.MaxDevices == 0) ? 256 : ioc->facts.MaxDevices;
+
+ data_sz = sizeof(FCDevicePage0_t) * max_bus * max_targ;
+ p_p0 = p0_array = kzalloc(data_sz, GFP_KERNEL);
+ if (!p0_array)
+ goto out;
+
+ data_sz = sizeof(FCDevicePage0_t *) * max_bus * max_targ;
+ p_pp0 = pp0_array = kzalloc(data_sz, GFP_KERNEL);
+ if (!pp0_array)
+ goto out;
+
+ do {
+ /* Get FC Device Page 0 header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_FC_DEVICE;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = port_id;
+ cfg.timeout = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ break;
+
+ if (hdr.PageLength <= 0)
+ break;
+
+ data_sz = hdr.PageLength * 4;
+ ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz,
+ &page0_dma);
+ rc = -ENOMEM;
+ if (!ppage0_alloc)
+ break;
+
+ cfg.physAddr = page0_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ ppage0_alloc->PortIdentifier =
+ le32_to_cpu(ppage0_alloc->PortIdentifier);
+
+ ppage0_alloc->WWNN.Low =
+ le32_to_cpu(ppage0_alloc->WWNN.Low);
+
+ ppage0_alloc->WWNN.High =
+ le32_to_cpu(ppage0_alloc->WWNN.High);
+
+ ppage0_alloc->WWPN.Low =
+ le32_to_cpu(ppage0_alloc->WWPN.Low);
+
+ ppage0_alloc->WWPN.High =
+ le32_to_cpu(ppage0_alloc->WWPN.High);
+
+ ppage0_alloc->BBCredit =
+ le16_to_cpu(ppage0_alloc->BBCredit);
+
+ ppage0_alloc->MaxRxFrameSize =
+ le16_to_cpu(ppage0_alloc->MaxRxFrameSize);
+
+ port_id = ppage0_alloc->PortIdentifier;
+ num_targ++;
+ *p_p0 = *ppage0_alloc; /* save data */
+ *p_pp0++ = p_p0++; /* save addr */
+ }
+ pci_free_consistent(ioc->pcidev, data_sz,
+ (u8 *) ppage0_alloc, page0_dma);
+ if (rc != 0)
+ break;
+
+ } while (port_id <= 0xff0000);
+
+ if (num_targ) {
+ /* sort array */
+ if (num_targ > 1)
+ sort (pp0_array, num_targ, sizeof(FCDevicePage0_t *),
+ mptfc_FcDevPage0_cmp_func, NULL);
+ /* call caller's func for each targ */
+ for (ii = 0; ii < num_targ; ii++) {
+ fc = *(pp0_array+ii);
+ func(ioc, ioc_port, fc);
+ }
+ }
+
+ out:
+ kfree(pp0_array);
+ kfree(p0_array);
+ return rc;
+}
+
+static int
+mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid)
+{
+ /* not currently usable */
+ if (pg0->Flags & (MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID |
+ MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID))
+ return -1;
+
+ if (!(pg0->Flags & MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID))
+ return -1;
+
+ if (!(pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET))
+ return -1;
+
+ /*
+ * board data structure already normalized to platform endianness
+ * shifted to avoid unaligned access on 64 bit architecture
+ */
+ rid->node_name = ((u64)pg0->WWNN.High) << 32 | (u64)pg0->WWNN.Low;
+ rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low;
+ rid->port_id = pg0->PortIdentifier;
+ rid->roles = FC_RPORT_ROLE_UNKNOWN;
+
+ return 0;
+}
+
+static void
+mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0)
+{
+ struct fc_rport_identifiers rport_ids;
+ struct fc_rport *rport;
+ struct mptfc_rport_info *ri;
+ int new_ri = 1;
+ u64 pn, nn;
+ VirtTarget *vtarget;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
+
+ if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0)
+ return;
+
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+
+ /* scan list looking for a match */
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
+ if (pn == rport_ids.port_name) { /* match */
+ list_move_tail(&ri->list, &ioc->fc_rports);
+ new_ri = 0;
+ break;
+ }
+ }
+ if (new_ri) { /* allocate one */
+ ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL);
+ if (!ri)
+ return;
+ list_add_tail(&ri->list, &ioc->fc_rports);
+ }
+
+ ri->pg0 = *pg0; /* add/update pg0 data */
+ ri->flags &= ~MPT_RPORT_INFO_FLAGS_MISSING;
+
+ /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */
+ if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) {
+ ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED;
+ rport = fc_remote_port_add(ioc->sh, channel, &rport_ids);
+ if (rport) {
+ ri->rport = rport;
+ if (new_ri) /* may have been reset by user */
+ rport->dev_loss_tmo = mptfc_dev_loss_tmo;
+ /*
+ * if already mapped, remap here. If not mapped,
+ * target_alloc will allocate vtarget and map,
+ * slave_alloc will fill in vdevice from vtarget.
+ */
+ if (ri->starget) {
+ vtarget = ri->starget->hostdata;
+ if (vtarget) {
+ vtarget->id = pg0->CurrentTargetID;
+ vtarget->channel = pg0->CurrentBus;
+ vtarget->deleted = 0;
+ }
+ }
+ *((struct mptfc_rport_info **)rport->dd_data) = ri;
+ /* scan will be scheduled once rport becomes a target */
+ fc_remote_port_rolechg(rport,roles);
+
+ pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
+ nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
+ "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, "
+ "rport tid %d, tmo %d\n",
+ ioc->name,
+ ioc->sh->host_no,
+ pg0->PortIdentifier,
+ (unsigned long long)nn,
+ (unsigned long long)pn,
+ pg0->CurrentTargetID,
+ ri->rport->scsi_target_id,
+ ri->rport->dev_loss_tmo));
+ } else {
+ list_del(&ri->list);
+ kfree(ri);
+ ri = NULL;
+ }
+ }
+}
+
+/*
+ * OS entry point to allow for host driver to free allocated memory
+ * Called if no device present or device being unloaded
+ */
+static void
+mptfc_target_destroy(struct scsi_target *starget)
+{
+ struct fc_rport *rport;
+ struct mptfc_rport_info *ri;
+
+ rport = starget_to_rport(starget);
+ if (rport) {
+ ri = *((struct mptfc_rport_info **)rport->dd_data);
+ if (ri) /* better be! */
+ ri->starget = NULL;
+ }
+ if (starget->hostdata)
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
+
+/*
+ * OS entry point to allow host driver to alloc memory
+ * for each scsi target. Called once per device the bus scan.
+ * Return non-zero if allocation fails.
+ */
+static int
+mptfc_target_alloc(struct scsi_target *starget)
+{
+ VirtTarget *vtarget;
+ struct fc_rport *rport;
+ struct mptfc_rport_info *ri;
+ int rc;
+
+ vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
+ if (!vtarget)
+ return -ENOMEM;
+ starget->hostdata = vtarget;
+
+ rc = -ENODEV;
+ rport = starget_to_rport(starget);
+ if (rport) {
+ ri = *((struct mptfc_rport_info **)rport->dd_data);
+ if (ri) { /* better be! */
+ vtarget->id = ri->pg0.CurrentTargetID;
+ vtarget->channel = ri->pg0.CurrentBus;
+ ri->starget = starget;
+ rc = 0;
+ }
+ }
+ if (rc != 0) {
+ kfree(vtarget);
+ starget->hostdata = NULL;
+ }
+
+ return rc;
+}
+/*
+ * mptfc_dump_lun_info
+ * @ioc
+ * @rport
+ * @sdev
+ *
+ */
+static void
+mptfc_dump_lun_info(MPT_ADAPTER *ioc, struct fc_rport *rport, struct scsi_device *sdev,
+ VirtTarget *vtarget)
+{
+ u64 nn, pn;
+ struct mptfc_rport_info *ri;
+
+ ri = *((struct mptfc_rport_info **)rport->dd_data);
+ pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low;
+ nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low;
+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
+ "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, "
+ "CurrentTargetID %d, %x %llx %llx\n",
+ ioc->name,
+ sdev->host->host_no,
+ vtarget->num_luns,
+ sdev->id, ri->pg0.CurrentTargetID,
+ ri->pg0.PortIdentifier,
+ (unsigned long long)pn,
+ (unsigned long long)nn));
+}
+
+
+/*
+ * OS entry point to allow host driver to alloc memory
+ * for each scsi device. Called once per device the bus scan.
+ * Return non-zero if allocation fails.
+ * Init memory once per LUN.
+ */
+static int
+mptfc_slave_alloc(struct scsi_device *sdev)
+{
+ MPT_SCSI_HOST *hd;
+ VirtTarget *vtarget;
+ VirtDevice *vdevice;
+ struct scsi_target *starget;
+ struct fc_rport *rport;
+ MPT_ADAPTER *ioc;
+
+ starget = scsi_target(sdev);
+ rport = starget_to_rport(starget);
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ hd = shost_priv(sdev->host);
+ ioc = hd->ioc;
+
+ vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
+ if (!vdevice) {
+ printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ ioc->name, sizeof(VirtDevice));
+ return -ENOMEM;
+ }
+
+
+ sdev->hostdata = vdevice;
+ vtarget = starget->hostdata;
+
+ if (vtarget->num_luns == 0) {
+ vtarget->ioc_id = ioc->id;
+ vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
+ }
+
+ vdevice->vtarget = vtarget;
+ vdevice->lun = sdev->lun;
+
+ vtarget->num_luns++;
+
+
+ mptfc_dump_lun_info(ioc, rport, sdev, vtarget);
+
+ return 0;
+}
+
+static int
+mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ struct mptfc_rport_info *ri;
+ struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
+ int err;
+ VirtDevice *vdevice = SCpnt->device->hostdata;
+
+ if (!vdevice || !vdevice->vtarget) {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ err = fc_remote_port_chkready(rport);
+ if (unlikely(err)) {
+ SCpnt->result = err;
+ done(SCpnt);
+ return 0;
+ }
+
+ /* dd_data is null until finished adding target */
+ ri = *((struct mptfc_rport_info **)rport->dd_data);
+ if (unlikely(!ri)) {
+ SCpnt->result = DID_IMM_RETRY << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ return mptscsih_qcmd(SCpnt,done);
+}
+
+static DEF_SCSI_QCMD(mptfc_qcmd)
+
+/*
+ * mptfc_display_port_link_speed - displaying link speed
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @portnum: IOC Port number
+ * @pp0dest: port page0 data payload
+ *
+ */
+static void
+mptfc_display_port_link_speed(MPT_ADAPTER *ioc, int portnum, FCPortPage0_t *pp0dest)
+{
+ u8 old_speed, new_speed, state;
+ char *old, *new;
+
+ if (portnum >= 2)
+ return;
+
+ old_speed = ioc->fc_link_speed[portnum];
+ new_speed = pp0dest->CurrentSpeed;
+ state = pp0dest->PortState;
+
+ if (state != MPI_FCPORTPAGE0_PORTSTATE_OFFLINE &&
+ new_speed != MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN) {
+
+ old = old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
+ old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :
+ old_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" :
+ "Unknown";
+ new = new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT ? "1 Gbps" :
+ new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT ? "2 Gbps" :
+ new_speed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT ? "4 Gbps" :
+ "Unknown";
+ if (old_speed == 0)
+ printk(MYIOC_s_NOTE_FMT
+ "FC Link Established, Speed = %s\n",
+ ioc->name, new);
+ else if (old_speed != new_speed)
+ printk(MYIOC_s_WARN_FMT
+ "FC Link Speed Change, Old Speed = %s, New Speed = %s\n",
+ ioc->name, old, new);
+
+ ioc->fc_link_speed[portnum] = new_speed;
+ }
+}
+
+/*
+ * mptfc_GetFcPortPage0 - Fetch FCPort config Page0.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @portnum: IOC Port number
+ *
+ * Return: 0 for success
+ * -ENOMEM if no memory available
+ * -EPERM if not allowed due to ISR context
+ * -EAGAIN if no msg frames currently available
+ * -EFAULT for non-successful reply or no reply (timeout)
+ * -EINVAL portnum arg out of range (hardwired to two elements)
+ */
+static int
+mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
+{
+ ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ FCPortPage0_t *ppage0_alloc;
+ FCPortPage0_t *pp0dest;
+ dma_addr_t page0_dma;
+ int data_sz;
+ int copy_sz;
+ int rc;
+ int count = 400;
+
+ if (portnum > 1)
+ return -EINVAL;
+
+ /* Get FCPort Page 0 header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = portnum;
+ cfg.timeout = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ return rc;
+
+ if (hdr.PageLength == 0)
+ return 0;
+
+ data_sz = hdr.PageLength * 4;
+ rc = -ENOMEM;
+ ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ if (ppage0_alloc) {
+
+ try_again:
+ memset((u8 *)ppage0_alloc, 0, data_sz);
+ cfg.physAddr = page0_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ /* save the data */
+ pp0dest = &ioc->fc_port_page0[portnum];
+ copy_sz = min_t(int, sizeof(FCPortPage0_t), data_sz);
+ memcpy(pp0dest, ppage0_alloc, copy_sz);
+
+ /*
+ * Normalize endianness of structure data,
+ * by byte-swapping all > 1 byte fields!
+ */
+ pp0dest->Flags = le32_to_cpu(pp0dest->Flags);
+ pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier);
+ pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low);
+ pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High);
+ pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low);
+ pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High);
+ pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass);
+ pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds);
+ pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed);
+ pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize);
+ pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low);
+ pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High);
+ pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low);
+ pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High);
+ pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount);
+ pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators);
+
+ /*
+ * if still doing discovery,
+ * hang loose a while until finished
+ */
+ if ((pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN) ||
+ (pp0dest->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE &&
+ (pp0dest->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK)
+ == MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT)) {
+ if (count-- > 0) {
+ msleep(100);
+ goto try_again;
+ }
+ printk(MYIOC_s_INFO_FMT "Firmware discovery not"
+ " complete.\n",
+ ioc->name);
+ }
+ mptfc_display_port_link_speed(ioc, portnum, pp0dest);
+ }
+
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
+ }
+
+ return rc;
+}
+
+static int
+mptfc_WriteFcPortPage1(MPT_ADAPTER *ioc, int portnum)
+{
+ ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ int rc;
+
+ if (portnum > 1)
+ return -EINVAL;
+
+ if (!(ioc->fc_data.fc_port_page1[portnum].data))
+ return -EINVAL;
+
+ /* get fcport page 1 header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 1;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = portnum;
+ cfg.timeout = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ return rc;
+
+ if (hdr.PageLength == 0)
+ return -ENODEV;
+
+ if (hdr.PageLength*4 != ioc->fc_data.fc_port_page1[portnum].pg_sz)
+ return -EINVAL;
+
+ cfg.physAddr = ioc->fc_data.fc_port_page1[portnum].dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ cfg.dir = 1;
+
+ rc = mpt_config(ioc, &cfg);
+
+ return rc;
+}
+
+static int
+mptfc_GetFcPortPage1(MPT_ADAPTER *ioc, int portnum)
+{
+ ConfigPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ FCPortPage1_t *page1_alloc;
+ dma_addr_t page1_dma;
+ int data_sz;
+ int rc;
+
+ if (portnum > 1)
+ return -EINVAL;
+
+ /* get fcport page 1 header */
+ hdr.PageVersion = 0;
+ hdr.PageLength = 0;
+ hdr.PageNumber = 1;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0;
+ cfg.pageAddr = portnum;
+ cfg.timeout = 0;
+
+ if ((rc = mpt_config(ioc, &cfg)) != 0)
+ return rc;
+
+ if (hdr.PageLength == 0)
+ return -ENODEV;
+
+start_over:
+
+ if (ioc->fc_data.fc_port_page1[portnum].data == NULL) {
+ data_sz = hdr.PageLength * 4;
+ if (data_sz < sizeof(FCPortPage1_t))
+ data_sz = sizeof(FCPortPage1_t);
+
+ page1_alloc = (FCPortPage1_t *) pci_alloc_consistent(ioc->pcidev,
+ data_sz,
+ &page1_dma);
+ if (!page1_alloc)
+ return -ENOMEM;
+ }
+ else {
+ page1_alloc = ioc->fc_data.fc_port_page1[portnum].data;
+ page1_dma = ioc->fc_data.fc_port_page1[portnum].dma;
+ data_sz = ioc->fc_data.fc_port_page1[portnum].pg_sz;
+ if (hdr.PageLength * 4 > data_sz) {
+ ioc->fc_data.fc_port_page1[portnum].data = NULL;
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *)
+ page1_alloc, page1_dma);
+ goto start_over;
+ }
+ }
+
+ memset(page1_alloc,0,data_sz);
+
+ cfg.physAddr = page1_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if ((rc = mpt_config(ioc, &cfg)) == 0) {
+ ioc->fc_data.fc_port_page1[portnum].data = page1_alloc;
+ ioc->fc_data.fc_port_page1[portnum].pg_sz = data_sz;
+ ioc->fc_data.fc_port_page1[portnum].dma = page1_dma;
+ }
+ else {
+ ioc->fc_data.fc_port_page1[portnum].data = NULL;
+ pci_free_consistent(ioc->pcidev, data_sz, (u8 *)
+ page1_alloc, page1_dma);
+ }
+
+ return rc;
+}
+
+static void
+mptfc_SetFcPortPage1_defaults(MPT_ADAPTER *ioc)
+{
+ int ii;
+ FCPortPage1_t *pp1;
+
+ #define MPTFC_FW_DEVICE_TIMEOUT (1)
+ #define MPTFC_FW_IO_PEND_TIMEOUT (1)
+ #define ON_FLAGS (MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY)
+ #define OFF_FLAGS (MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS)
+
+ for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) {
+ if (mptfc_GetFcPortPage1(ioc, ii) != 0)
+ continue;
+ pp1 = ioc->fc_data.fc_port_page1[ii].data;
+ if ((pp1->InitiatorDeviceTimeout == MPTFC_FW_DEVICE_TIMEOUT)
+ && (pp1->InitiatorIoPendTimeout == MPTFC_FW_IO_PEND_TIMEOUT)
+ && ((pp1->Flags & ON_FLAGS) == ON_FLAGS)
+ && ((pp1->Flags & OFF_FLAGS) == 0))
+ continue;
+ pp1->InitiatorDeviceTimeout = MPTFC_FW_DEVICE_TIMEOUT;
+ pp1->InitiatorIoPendTimeout = MPTFC_FW_IO_PEND_TIMEOUT;
+ pp1->Flags &= ~OFF_FLAGS;
+ pp1->Flags |= ON_FLAGS;
+ mptfc_WriteFcPortPage1(ioc, ii);
+ }
+}
+
+
+static void
+mptfc_init_host_attr(MPT_ADAPTER *ioc,int portnum)
+{
+ unsigned class = 0;
+ unsigned cos = 0;
+ unsigned speed;
+ unsigned port_type;
+ unsigned port_state;
+ FCPortPage0_t *pp0;
+ struct Scsi_Host *sh;
+ char *sn;
+
+ /* don't know what to do as only one scsi (fc) host was allocated */
+ if (portnum != 0)
+ return;
+
+ pp0 = &ioc->fc_port_page0[portnum];
+ sh = ioc->sh;
+
+ sn = fc_host_symbolic_name(sh);
+ snprintf(sn, FC_SYMBOLIC_NAME_SIZE, "%s %s%08xh",
+ ioc->prod_name,
+ MPT_FW_REV_MAGIC_ID_STRING,
+ ioc->facts.FWVersion.Word);
+
+ fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN;
+
+ fc_host_maxframe_size(sh) = pp0->MaxFrameSize;
+
+ fc_host_node_name(sh) =
+ (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
+
+ fc_host_port_name(sh) =
+ (u64)pp0->WWPN.High << 32 | (u64)pp0->WWPN.Low;
+
+ fc_host_port_id(sh) = pp0->PortIdentifier;
+
+ class = pp0->SupportedServiceClass;
+ if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_1)
+ cos |= FC_COS_CLASS1;
+ if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_2)
+ cos |= FC_COS_CLASS2;
+ if (class & MPI_FCPORTPAGE0_SUPPORT_CLASS_3)
+ cos |= FC_COS_CLASS3;
+ fc_host_supported_classes(sh) = cos;
+
+ if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT)
+ speed = FC_PORTSPEED_1GBIT;
+ else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT)
+ speed = FC_PORTSPEED_2GBIT;
+ else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT)
+ speed = FC_PORTSPEED_4GBIT;
+ else if (pp0->CurrentSpeed == MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT)
+ speed = FC_PORTSPEED_10GBIT;
+ else
+ speed = FC_PORTSPEED_UNKNOWN;
+ fc_host_speed(sh) = speed;
+
+ speed = 0;
+ if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED)
+ speed |= FC_PORTSPEED_1GBIT;
+ if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED)
+ speed |= FC_PORTSPEED_2GBIT;
+ if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED)
+ speed |= FC_PORTSPEED_4GBIT;
+ if (pp0->SupportedSpeeds & MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED)
+ speed |= FC_PORTSPEED_10GBIT;
+ fc_host_supported_speeds(sh) = speed;
+
+ port_state = FC_PORTSTATE_UNKNOWN;
+ if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_ONLINE)
+ port_state = FC_PORTSTATE_ONLINE;
+ else if (pp0->PortState == MPI_FCPORTPAGE0_PORTSTATE_OFFLINE)
+ port_state = FC_PORTSTATE_LINKDOWN;
+ fc_host_port_state(sh) = port_state;
+
+ port_type = FC_PORTTYPE_UNKNOWN;
+ if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT)
+ port_type = FC_PORTTYPE_PTP;
+ else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP)
+ port_type = FC_PORTTYPE_LPORT;
+ else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP)
+ port_type = FC_PORTTYPE_NLPORT;
+ else if (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT)
+ port_type = FC_PORTTYPE_NPORT;
+ fc_host_port_type(sh) = port_type;
+
+ fc_host_fabric_name(sh) =
+ (pp0->Flags & MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID) ?
+ (u64) pp0->FabricWWNN.High << 32 | (u64) pp0->FabricWWPN.Low :
+ (u64)pp0->WWNN.High << 32 | (u64)pp0->WWNN.Low;
+
+}
+
+static void
+mptfc_link_status_change(struct work_struct *work)
+{
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fc_rescan_work);
+ int ii;
+
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++)
+ (void) mptfc_GetFcPortPage0(ioc, ii);
+
+}
+
+static void
+mptfc_setup_reset(struct work_struct *work)
+{
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fc_setup_reset_work);
+ u64 pn;
+ struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
+
+ /* reset about to happen, delete (block) all rports */
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
+ ri->flags &= ~MPT_RPORT_INFO_FLAGS_REGISTERED;
+ fc_remote_port_delete(ri->rport); /* won't sleep */
+ ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
+
+ pn = (u64)ri->pg0.WWPN.High << 32 |
+ (u64)ri->pg0.WWPN.Low;
+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
+ "mptfc_setup_reset.%d: %llx deleted\n",
+ ioc->name,
+ ioc->sh->host_no,
+ (unsigned long long)pn));
+ }
+ }
+}
+
+static void
+mptfc_rescan_devices(struct work_struct *work)
+{
+ MPT_ADAPTER *ioc =
+ container_of(work, MPT_ADAPTER, fc_rescan_work);
+ int ii;
+ u64 pn;
+ struct mptfc_rport_info *ri;
+ struct scsi_target *starget;
+ VirtTarget *vtarget;
+
+ /* start by tagging all ports as missing */
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) {
+ ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING;
+ }
+ }
+
+ /*
+ * now rescan devices known to adapter,
+ * will reregister existing rports
+ */
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+ (void) mptfc_GetFcPortPage0(ioc, ii);
+ mptfc_init_host_attr(ioc, ii); /* refresh */
+ mptfc_GetFcDevPage0(ioc, ii, mptfc_register_dev);
+ }
+
+ /* delete devices still missing */
+ list_for_each_entry(ri, &ioc->fc_rports, list) {
+ /* if newly missing, delete it */
+ if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) {
+
+ ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED|
+ MPT_RPORT_INFO_FLAGS_MISSING);
+ fc_remote_port_delete(ri->rport); /* won't sleep */
+ ri->rport = NULL;
+ starget = ri->starget;
+ if (starget) {
+ vtarget = starget->hostdata;
+ if (vtarget)
+ vtarget->deleted = 1;
+ }
+
+ pn = (u64)ri->pg0.WWPN.High << 32 |
+ (u64)ri->pg0.WWPN.Low;
+ dfcprintk (ioc, printk(MYIOC_s_DEBUG_FMT
+ "mptfc_rescan.%d: %llx deleted\n",
+ ioc->name,
+ ioc->sh->host_no,
+ (unsigned long long)pn));
+ }
+ }
+}
+
+static int
+mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *sh;
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+ unsigned long flags;
+ int ii;
+ int numSGE = 0;
+ int scale;
+ int ioc_cap;
+ int error=0;
+ int r;
+
+ if ((r = mpt_attach(pdev,id)) != 0)
+ return r;
+
+ ioc = pci_get_drvdata(pdev);
+ ioc->DoneCtx = mptfcDoneCtx;
+ ioc->TaskCtx = mptfcTaskCtx;
+ ioc->InternalCtx = mptfcInternalCtx;
+
+ /* Added sanity check on readiness of the MPT adapter.
+ */
+ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping because it's not operational!\n",
+ ioc->name);
+ error = -ENODEV;
+ goto out_mptfc_probe;
+ }
+
+ if (!ioc->active) {
+ printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
+ ioc->name);
+ error = -ENODEV;
+ goto out_mptfc_probe;
+ }
+
+ /* Sanity check - ensure at least 1 port is INITIATOR capable
+ */
+ ioc_cap = 0;
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+ if (ioc->pfacts[ii].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_INITIATOR)
+ ioc_cap ++;
+ }
+
+ if (!ioc_cap) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
+ ioc->name, ioc);
+ return 0;
+ }
+
+ sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST));
+
+ if (!sh) {
+ printk(MYIOC_s_WARN_FMT
+ "Unable to register controller with SCSI subsystem\n",
+ ioc->name);
+ error = -1;
+ goto out_mptfc_probe;
+ }
+
+ spin_lock_init(&ioc->fc_rescan_work_lock);
+ INIT_WORK(&ioc->fc_rescan_work, mptfc_rescan_devices);
+ INIT_WORK(&ioc->fc_setup_reset_work, mptfc_setup_reset);
+ INIT_WORK(&ioc->fc_lsc_work, mptfc_link_status_change);
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+ /* Attach the SCSI Host to the IOC structure
+ */
+ ioc->sh = sh;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->irq = 0;
+
+ /* set 16 byte cdb's */
+ sh->max_cmd_len = 16;
+
+ sh->max_id = ioc->pfacts->MaxDevices;
+ sh->max_lun = max_lun;
+
+ /* Required entry.
+ */
+ sh->unique_id = ioc->id;
+
+ /* Verify that we won't exceed the maximum
+ * number of chain buffers
+ * We can optimize: ZZ = req_sz/sizeof(SGE)
+ * For 32bit SGE's:
+ * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
+ * + (req_sz - 64)/sizeof(SGE)
+ * A slightly different algorithm is required for
+ * 64bit SGEs.
+ */
+ scale = ioc->req_sz/ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64)) {
+ numSGE = (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 60) / ioc->SGE_size;
+ } else {
+ numSGE = 1 + (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 64) / ioc->SGE_size;
+ }
+
+ if (numSGE < sh->sg_tablesize) {
+ /* Reset this value */
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Resetting sg_tablesize to %d from %d\n",
+ ioc->name, numSGE, sh->sg_tablesize));
+ sh->sg_tablesize = numSGE;
+ }
+
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ hd = shost_priv(sh);
+ hd->ioc = ioc;
+
+ /* SCSI needs scsi_cmnd lookup table!
+ * (with size equal to req_depth*PtrSz!)
+ */
+ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
+ if (!ioc->ScsiLookup) {
+ error = -ENOMEM;
+ goto out_mptfc_probe;
+ }
+ spin_lock_init(&ioc->scsi_lookup_lock);
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
+ ioc->name, ioc->ScsiLookup));
+
+ hd->last_queue_full = 0;
+
+ sh->transportt = mptfc_transport_template;
+ error = scsi_add_host (sh, &ioc->pcidev->dev);
+ if(error) {
+ dprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "scsi_add_host failed\n", ioc->name));
+ goto out_mptfc_probe;
+ }
+
+ /* initialize workqueue */
+
+ snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
+ "mptfc_wq_%d", sh->host_no);
+ ioc->fc_rescan_work_q =
+ create_singlethread_workqueue(ioc->fc_rescan_work_q_name);
+ if (!ioc->fc_rescan_work_q)
+ goto out_mptfc_probe;
+
+ /*
+ * Pre-fetch FC port WWN and stuff...
+ * (FCPortPage0_t stuff)
+ */
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+ (void) mptfc_GetFcPortPage0(ioc, ii);
+ }
+ mptfc_SetFcPortPage1_defaults(ioc);
+
+ /*
+ * scan for rports -
+ * by doing it via the workqueue, some locking is eliminated
+ */
+
+ queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work);
+ flush_workqueue(ioc->fc_rescan_work_q);
+
+ return 0;
+
+out_mptfc_probe:
+
+ mptscsih_remove(pdev);
+ return error;
+}
+
+static struct pci_driver mptfc_driver = {
+ .name = "mptfc",
+ .id_table = mptfc_pci_table,
+ .probe = mptfc_probe,
+ .remove = __devexit_p(mptfc_remove),
+ .shutdown = mptscsih_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mptscsih_suspend,
+ .resume = mptscsih_resume,
+#endif
+};
+
+static int
+mptfc_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
+{
+ MPT_SCSI_HOST *hd;
+ u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
+ unsigned long flags;
+ int rc=1;
+
+ if (ioc->bus_type != FC)
+ return 0;
+
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
+ ioc->name, event));
+
+ if (ioc->sh == NULL ||
+ ((hd = shost_priv(ioc->sh)) == NULL))
+ return 1;
+
+ switch (event) {
+ case MPI_EVENT_RESCAN:
+ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
+ if (ioc->fc_rescan_work_q) {
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_rescan_work);
+ }
+ spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
+ break;
+ case MPI_EVENT_LINK_STATUS_CHANGE:
+ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
+ if (ioc->fc_rescan_work_q) {
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_lsc_work);
+ }
+ spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
+ break;
+ default:
+ rc = mptscsih_event_process(ioc,pEvReply);
+ break;
+ }
+ return rc;
+}
+
+static int
+mptfc_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ int rc;
+ unsigned long flags;
+
+ rc = mptscsih_ioc_reset(ioc,reset_phase);
+ if ((ioc->bus_type != FC) || (!rc))
+ return rc;
+
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ ": IOC %s_reset routed to FC host driver!\n",ioc->name,
+ reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
+ reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
+
+ if (reset_phase == MPT_IOC_SETUP_RESET) {
+ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
+ if (ioc->fc_rescan_work_q) {
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_setup_reset_work);
+ }
+ spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
+ }
+
+ else if (reset_phase == MPT_IOC_PRE_RESET) {
+ }
+
+ else { /* MPT_IOC_POST_RESET */
+ mptfc_SetFcPortPage1_defaults(ioc);
+ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
+ if (ioc->fc_rescan_work_q) {
+ queue_work(ioc->fc_rescan_work_q,
+ &ioc->fc_rescan_work);
+ }
+ spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
+ }
+ return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptfc_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int __init
+mptfc_init(void)
+{
+ int error;
+
+ show_mptmod_ver(my_NAME, my_VERSION);
+
+ /* sanity check module parameters */
+ if (mptfc_dev_loss_tmo <= 0)
+ mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO;
+
+ mptfc_transport_template =
+ fc_attach_transport(&mptfc_transport_functions);
+
+ if (!mptfc_transport_template)
+ return -ENODEV;
+
+ mptfcDoneCtx = mpt_register(mptscsih_io_done, MPTFC_DRIVER,
+ "mptscsih_scandv_complete");
+ mptfcTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTFC_DRIVER,
+ "mptscsih_scandv_complete");
+ mptfcInternalCtx = mpt_register(mptscsih_scandv_complete, MPTFC_DRIVER,
+ "mptscsih_scandv_complete");
+
+ mpt_event_register(mptfcDoneCtx, mptfc_event_process);
+ mpt_reset_register(mptfcDoneCtx, mptfc_ioc_reset);
+
+ error = pci_register_driver(&mptfc_driver);
+ if (error)
+ fc_release_transport(mptfc_transport_template);
+
+ return error;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptfc_remove - Remove fc infrastructure for devices
+ * @pdev: Pointer to pci_dev structure
+ *
+ */
+static void __devexit
+mptfc_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct mptfc_rport_info *p, *n;
+ struct workqueue_struct *work_q;
+ unsigned long flags;
+ int ii;
+
+ /* destroy workqueue */
+ if ((work_q=ioc->fc_rescan_work_q)) {
+ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags);
+ ioc->fc_rescan_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags);
+ destroy_workqueue(work_q);
+ }
+
+ fc_remove_host(ioc->sh);
+
+ list_for_each_entry_safe(p, n, &ioc->fc_rports, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+
+ for (ii=0; ii<ioc->facts.NumberOfPorts; ii++) {
+ if (ioc->fc_data.fc_port_page1[ii].data) {
+ pci_free_consistent(ioc->pcidev,
+ ioc->fc_data.fc_port_page1[ii].pg_sz,
+ (u8 *) ioc->fc_data.fc_port_page1[ii].data,
+ ioc->fc_data.fc_port_page1[ii].dma);
+ ioc->fc_data.fc_port_page1[ii].data = NULL;
+ }
+ }
+
+ mptscsih_remove(pdev);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptfc_exit - Unregisters MPT adapter(s)
+ *
+ */
+static void __exit
+mptfc_exit(void)
+{
+ pci_unregister_driver(&mptfc_driver);
+ fc_release_transport(mptfc_transport_template);
+
+ mpt_reset_deregister(mptfcDoneCtx);
+ mpt_event_deregister(mptfcDoneCtx);
+
+ mpt_deregister(mptfcInternalCtx);
+ mpt_deregister(mptfcTaskCtx);
+ mpt_deregister(mptfcDoneCtx);
+}
+
+module_init(mptfc_init);
+module_exit(mptfc_exit);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
new file mode 100644
index 00000000..cbe96072
--- /dev/null
+++ b/drivers/message/fusion/mptlan.c
@@ -0,0 +1,1544 @@
+/*
+ * linux/drivers/message/fusion/mptlan.c
+ * IP Over Fibre Channel device driver.
+ * For use with LSI Fibre Channel PCI chip/adapters
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 2000-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Define statements used for debugging
+ */
+//#define MPT_LAN_IO_DEBUG
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "mptlan.h"
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptlan"
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION(my_VERSION);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * MPT LAN message sizes without variable part.
+ */
+#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
+ (sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
+
+#define MPT_LAN_TRANSACTION32_SIZE \
+ (sizeof(SGETransaction32_t) - sizeof(u32))
+
+/*
+ * Fusion MPT LAN private structures
+ */
+
+struct BufferControl {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned int len;
+};
+
+struct mpt_lan_priv {
+ MPT_ADAPTER *mpt_dev;
+ u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
+
+ atomic_t buckets_out; /* number of unused buckets on IOC */
+ int bucketthresh; /* Send more when this many left */
+
+ int *mpt_txfidx; /* Free Tx Context list */
+ int mpt_txfidx_tail;
+ spinlock_t txfidx_lock;
+
+ int *mpt_rxfidx; /* Free Rx Context list */
+ int mpt_rxfidx_tail;
+ spinlock_t rxfidx_lock;
+
+ struct BufferControl *RcvCtl; /* Receive BufferControl structs */
+ struct BufferControl *SendCtl; /* Send BufferControl structs */
+
+ int max_buckets_out; /* Max buckets to send to IOC */
+ int tx_max_out; /* IOC's Tx queue len */
+
+ u32 total_posted;
+ u32 total_received;
+
+ struct delayed_work post_buckets_task;
+ struct net_device *dev;
+ unsigned long post_buckets_active;
+};
+
+struct mpt_lan_ohdr {
+ u16 dtype;
+ u8 daddr[FC_ALEN];
+ u16 stype;
+ u8 saddr[FC_ALEN];
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+/*
+ * Forward protos...
+ */
+static int lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
+ MPT_FRAME_HDR *reply);
+static int mpt_lan_open(struct net_device *dev);
+static int mpt_lan_reset(struct net_device *dev);
+static int mpt_lan_close(struct net_device *dev);
+static void mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv);
+static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
+ int priority);
+static int mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
+static int mpt_lan_receive_post_reply(struct net_device *dev,
+ LANReceivePostReply_t *pRecvRep);
+static int mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
+static int mpt_lan_send_reply(struct net_device *dev,
+ LANSendReply_t *pSendRep);
+static int mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
+static int mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
+static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
+ struct net_device *dev);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Fusion MPT LAN private data
+ */
+static u8 LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
+
+static u32 max_buckets_out = 127;
+static u32 tx_max_out_p = 127 - 16;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * lan_reply - Handle all data sent from the hardware.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @mf: Pointer to original MPT request frame (NULL if TurboReply)
+ * @reply: Pointer to MPT reply frame
+ *
+ * Returns 1 indicating original alloc'd request frame ptr
+ * should be freed, or 0 if it shouldn't.
+ */
+static int
+lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
+{
+ struct net_device *dev = ioc->netdev;
+ int FreeReqFrame = 0;
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev)));
+
+// dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
+// mf, reply));
+
+ if (mf == NULL) {
+ u32 tmsg = CAST_PTR_TO_U32(reply);
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ tmsg));
+
+ switch (GET_LAN_FORM(tmsg)) {
+
+ // NOTE! (Optimization) First case here is now caught in
+ // mptbase.c::mpt_interrupt() routine and callcack here
+ // is now skipped for this case!
+#if 0
+ case LAN_REPLY_FORM_MESSAGE_CONTEXT:
+// dioprintk((KERN_INFO MYNAM "/lan_reply: "
+// "MessageContext turbo reply received\n"));
+ FreeReqFrame = 1;
+ break;
+#endif
+
+ case LAN_REPLY_FORM_SEND_SINGLE:
+// dioprintk((MYNAM "/lan_reply: "
+// "calling mpt_lan_send_reply (turbo)\n"));
+
+ // Potential BUG here?
+ // FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
+ // If/when mpt_lan_send_turbo would return 1 here,
+ // calling routine (mptbase.c|mpt_interrupt)
+ // would Oops because mf has already been set
+ // to NULL. So after return from this func,
+ // mpt_interrupt() will attempt to put (NULL) mf ptr
+ // item back onto its adapter FreeQ - Oops!:-(
+ // It's Ok, since mpt_lan_send_turbo() *currently*
+ // always returns 0, but..., just in case:
+
+ (void) mpt_lan_send_turbo(dev, tmsg);
+ FreeReqFrame = 0;
+
+ break;
+
+ case LAN_REPLY_FORM_RECEIVE_SINGLE:
+// dioprintk((KERN_INFO MYNAM "@lan_reply: "
+// "rcv-Turbo = %08x\n", tmsg));
+ mpt_lan_receive_post_turbo(dev, tmsg);
+ break;
+
+ default:
+ printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
+ "that I don't know what to do with\n");
+
+ /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
+
+ break;
+ }
+
+ return FreeReqFrame;
+ }
+
+// msg = (u32 *) reply;
+// dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
+// le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
+// le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
+// dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
+// reply->u.hdr.Function));
+
+ switch (reply->u.hdr.Function) {
+
+ case MPI_FUNCTION_LAN_SEND:
+ {
+ LANSendReply_t *pSendRep;
+
+ pSendRep = (LANSendReply_t *) reply;
+ FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
+ break;
+ }
+
+ case MPI_FUNCTION_LAN_RECEIVE:
+ {
+ LANReceivePostReply_t *pRecvRep;
+
+ pRecvRep = (LANReceivePostReply_t *) reply;
+ if (pRecvRep->NumberOfContexts) {
+ mpt_lan_receive_post_reply(dev, pRecvRep);
+ if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
+ FreeReqFrame = 1;
+ } else
+ dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
+ "ReceivePostReply received.\n"));
+ break;
+ }
+
+ case MPI_FUNCTION_LAN_RESET:
+ /* Just a default reply. Might want to check it to
+ * make sure that everything went ok.
+ */
+ FreeReqFrame = 1;
+ break;
+
+ case MPI_FUNCTION_EVENT_NOTIFICATION:
+ case MPI_FUNCTION_EVENT_ACK:
+ /* _EVENT_NOTIFICATION should NOT come down this path any more.
+ * Should be routed to mpt_lan_event_process(), but just in case...
+ */
+ FreeReqFrame = 1;
+ break;
+
+ default:
+ printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
+ "reply that I don't know what to do with\n");
+
+ /* CHECKME! Hmmm... FreeReqFrame is 0 here; is that right? */
+ FreeReqFrame = 1;
+
+ break;
+ }
+
+ return FreeReqFrame;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ struct net_device *dev = ioc->netdev;
+ struct mpt_lan_priv *priv;
+
+ if (dev == NULL)
+ return(1);
+ else
+ priv = netdev_priv(dev);
+
+ dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
+ reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
+ reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
+
+ if (priv->mpt_rxfidx == NULL)
+ return (1);
+
+ if (reset_phase == MPT_IOC_SETUP_RESET) {
+ ;
+ } else if (reset_phase == MPT_IOC_PRE_RESET) {
+ int i;
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+
+ dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
+
+ atomic_set(&priv->buckets_out, 0);
+
+ /* Reset Rx Free Tail index and re-populate the queue. */
+ spin_lock_irqsave(&priv->rxfidx_lock, flags);
+ priv->mpt_rxfidx_tail = -1;
+ for (i = 0; i < priv->max_buckets_out; i++)
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+ } else {
+ mpt_lan_post_receive_buckets(priv);
+ netif_wake_queue(dev);
+ }
+
+ return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
+{
+ dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
+
+ switch (le32_to_cpu(pEvReply->Event)) {
+ case MPI_EVENT_NONE: /* 00 */
+ case MPI_EVENT_LOG_DATA: /* 01 */
+ case MPI_EVENT_STATE_CHANGE: /* 02 */
+ case MPI_EVENT_UNIT_ATTENTION: /* 03 */
+ case MPI_EVENT_IOC_BUS_RESET: /* 04 */
+ case MPI_EVENT_EXT_BUS_RESET: /* 05 */
+ case MPI_EVENT_RESCAN: /* 06 */
+ /* Ok, do we need to do anything here? As far as
+ I can tell, this is when a new device gets added
+ to the loop. */
+ case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */
+ case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */
+ case MPI_EVENT_LOGOUT: /* 09 */
+ case MPI_EVENT_EVENT_CHANGE: /* 0A */
+ default:
+ break;
+ }
+
+ /*
+ * NOTE: pEvent->AckRequired handling now done in mptbase.c;
+ * Do NOT do it here now!
+ */
+
+ return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_open(struct net_device *dev)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (mpt_lan_reset(dev) != 0) {
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+
+ printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
+
+ if (mpt_dev->active)
+ printk ("The ioc is active. Perhaps it needs to be"
+ " reset?\n");
+ else
+ printk ("The ioc in inactive, most likely in the "
+ "process of being reset. Please try again in "
+ "a moment.\n");
+ }
+
+ priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
+ if (priv->mpt_txfidx == NULL)
+ goto out;
+ priv->mpt_txfidx_tail = -1;
+
+ priv->SendCtl = kcalloc(priv->tx_max_out, sizeof(struct BufferControl),
+ GFP_KERNEL);
+ if (priv->SendCtl == NULL)
+ goto out_mpt_txfidx;
+ for (i = 0; i < priv->tx_max_out; i++)
+ priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
+
+ dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
+
+ priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
+ GFP_KERNEL);
+ if (priv->mpt_rxfidx == NULL)
+ goto out_SendCtl;
+ priv->mpt_rxfidx_tail = -1;
+
+ priv->RcvCtl = kcalloc(priv->max_buckets_out,
+ sizeof(struct BufferControl),
+ GFP_KERNEL);
+ if (priv->RcvCtl == NULL)
+ goto out_mpt_rxfidx;
+ for (i = 0; i < priv->max_buckets_out; i++)
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
+
+/**/ dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
+/**/ for (i = 0; i < priv->tx_max_out; i++)
+/**/ dlprintk((" %xh", priv->mpt_txfidx[i]));
+/**/ dlprintk(("\n"));
+
+ dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
+
+ mpt_lan_post_receive_buckets(priv);
+ printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev));
+
+ if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
+ printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
+ " Notifications. This is a bad thing! We're not going "
+ "to go ahead, but I'd be leery of system stability at "
+ "this point.\n");
+ }
+
+ netif_start_queue(dev);
+ dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
+
+ return 0;
+out_mpt_rxfidx:
+ kfree(priv->mpt_rxfidx);
+ priv->mpt_rxfidx = NULL;
+out_SendCtl:
+ kfree(priv->SendCtl);
+ priv->SendCtl = NULL;
+out_mpt_txfidx:
+ kfree(priv->mpt_txfidx);
+ priv->mpt_txfidx = NULL;
+out: return -ENOMEM;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Send a LanReset message to the FW. This should result in the FW returning
+ any buckets it still has. */
+static int
+mpt_lan_reset(struct net_device *dev)
+{
+ MPT_FRAME_HDR *mf;
+ LANResetRequest_t *pResetReq;
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+
+ mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
+
+ if (mf == NULL) {
+/* dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
+ "Unable to allocate a request frame.\n"));
+*/
+ return -1;
+ }
+
+ pResetReq = (LANResetRequest_t *) mf;
+
+ pResetReq->Function = MPI_FUNCTION_LAN_RESET;
+ pResetReq->ChainOffset = 0;
+ pResetReq->Reserved = 0;
+ pResetReq->PortNumber = priv->pnum;
+ pResetReq->MsgFlags = 0;
+ pResetReq->Reserved2 = 0;
+
+ mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_close(struct net_device *dev)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ unsigned long timeout;
+ int i;
+
+ dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
+
+ mpt_event_deregister(LanCtx);
+
+ dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
+ "since driver was loaded, %d still out\n",
+ priv->total_posted,atomic_read(&priv->buckets_out)));
+
+ netif_stop_queue(dev);
+
+ mpt_lan_reset(dev);
+
+ timeout = jiffies + 2 * HZ;
+ while (atomic_read(&priv->buckets_out) && time_before(jiffies, timeout))
+ schedule_timeout_interruptible(1);
+
+ for (i = 0; i < priv->max_buckets_out; i++) {
+ if (priv->RcvCtl[i].skb != NULL) {
+/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
+/**/ "is still out\n", i));
+ pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
+ priv->RcvCtl[i].len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(priv->RcvCtl[i].skb);
+ }
+ }
+
+ kfree(priv->RcvCtl);
+ kfree(priv->mpt_rxfidx);
+
+ for (i = 0; i < priv->tx_max_out; i++) {
+ if (priv->SendCtl[i].skb != NULL) {
+ pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
+ priv->SendCtl[i].len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb(priv->SendCtl[i].skb);
+ }
+ }
+
+ kfree(priv->SendCtl);
+ kfree(priv->mpt_txfidx);
+
+ atomic_set(&priv->buckets_out, 0);
+
+ printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev));
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Tx timeout handler. */
+static void
+mpt_lan_tx_timeout(struct net_device *dev)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+
+ if (mpt_dev->active) {
+ dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
+ netif_wake_queue(dev);
+ }
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+//static inline int
+static int
+mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ struct sk_buff *sent;
+ unsigned long flags;
+ u32 ctx;
+
+ ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
+ sent = priv->SendCtl[ctx].skb;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += sent->len;
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ __func__, sent));
+
+ priv->SendCtl[ctx].skb = NULL;
+ pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(sent);
+
+ spin_lock_irqsave(&priv->txfidx_lock, flags);
+ priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
+ spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+ netif_wake_queue(dev);
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ struct sk_buff *sent;
+ unsigned long flags;
+ int FreeReqFrame = 0;
+ u32 *pContext;
+ u32 ctx;
+ u8 count;
+
+ count = pSendRep->NumberOfContexts;
+
+ dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
+ le16_to_cpu(pSendRep->IOCStatus)));
+
+ /* Add check for Loginfo Flag in IOCStatus */
+
+ switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
+ case MPI_IOCSTATUS_SUCCESS:
+ dev->stats.tx_packets += count;
+ break;
+
+ case MPI_IOCSTATUS_LAN_CANCELED:
+ case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
+ break;
+
+ case MPI_IOCSTATUS_INVALID_SGL:
+ dev->stats.tx_errors += count;
+ printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev));
+ goto out;
+
+ default:
+ dev->stats.tx_errors += count;
+ break;
+ }
+
+ pContext = &pSendRep->BufferContext;
+
+ spin_lock_irqsave(&priv->txfidx_lock, flags);
+ while (count > 0) {
+ ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
+
+ sent = priv->SendCtl[ctx].skb;
+ dev->stats.tx_bytes += sent->len;
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ __func__, sent));
+
+ priv->SendCtl[ctx].skb = NULL;
+ pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq(sent);
+
+ priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
+
+ pContext++;
+ count--;
+ }
+ spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+out:
+ if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
+ FreeReqFrame = 1;
+
+ netif_wake_queue(dev);
+ return FreeReqFrame;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ MPT_FRAME_HDR *mf;
+ LANSendRequest_t *pSendReq;
+ SGETransaction32_t *pTrans;
+ SGESimple64_t *pSimple;
+ const unsigned char *mac;
+ dma_addr_t dma;
+ unsigned long flags;
+ int ctx;
+ u16 cur_naa = 0x1000;
+
+ dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
+ __func__, skb));
+
+ spin_lock_irqsave(&priv->txfidx_lock, flags);
+ if (priv->mpt_txfidx_tail < 0) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+ printk (KERN_ERR "%s: no tx context available: %u\n",
+ __func__, priv->mpt_txfidx_tail);
+ return NETDEV_TX_BUSY;
+ }
+
+ mf = mpt_get_msg_frame(LanCtx, mpt_dev);
+ if (mf == NULL) {
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+ printk (KERN_ERR "%s: Unable to alloc request frame\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
+ spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+// dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
+// IOC_AND_NETDEV_NAMES_s_s(dev)));
+
+ pSendReq = (LANSendRequest_t *) mf;
+
+ /* Set the mac.raw pointer, since this apparently isn't getting
+ * done before we get the skb. Pull the data pointer past the mac data.
+ */
+ skb_reset_mac_header(skb);
+ skb_pull(skb, 12);
+
+ dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+
+ priv->SendCtl[ctx].skb = skb;
+ priv->SendCtl[ctx].dma = dma;
+ priv->SendCtl[ctx].len = skb->len;
+
+ /* Message Header */
+ pSendReq->Reserved = 0;
+ pSendReq->Function = MPI_FUNCTION_LAN_SEND;
+ pSendReq->ChainOffset = 0;
+ pSendReq->Reserved2 = 0;
+ pSendReq->MsgFlags = 0;
+ pSendReq->PortNumber = priv->pnum;
+
+ /* Transaction Context Element */
+ pTrans = (SGETransaction32_t *) pSendReq->SG_List;
+
+ /* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
+ pTrans->ContextSize = sizeof(u32);
+ pTrans->DetailsLength = 2 * sizeof(u32);
+ pTrans->Flags = 0;
+ pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+
+// dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
+// IOC_AND_NETDEV_NAMES_s_s(dev),
+// ctx, skb, skb->data));
+
+ mac = skb_mac_header(skb);
+
+ pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa << 16) |
+ (mac[0] << 8) |
+ (mac[1] << 0));
+ pTrans->TransactionDetails[1] = cpu_to_le32((mac[2] << 24) |
+ (mac[3] << 16) |
+ (mac[4] << 8) |
+ (mac[5] << 0));
+
+ pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
+
+ /* If we ever decide to send more than one Simple SGE per LANSend, then
+ we will need to make sure that LAST_ELEMENT only gets set on the
+ last one. Otherwise, bad voodoo and evil funkiness will commence. */
+ pSimple->FlagsLength = cpu_to_le32(
+ ((MPI_SGE_FLAGS_LAST_ELEMENT |
+ MPI_SGE_FLAGS_END_OF_BUFFER |
+ MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_HOST_TO_IOC |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING |
+ MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
+ skb->len);
+ pSimple->Address.Low = cpu_to_le32((u32) dma);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
+ else
+ pSimple->Address.High = 0;
+
+ mpt_put_msg_frame (LanCtx, mpt_dev, mf);
+ dev->trans_start = jiffies;
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ le32_to_cpu(pSimple->FlagsLength)));
+
+ return NETDEV_TX_OK;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static void
+mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
+/*
+ * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
+ */
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+
+ if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
+ if (priority) {
+ schedule_delayed_work(&priv->post_buckets_task, 0);
+ } else {
+ schedule_delayed_work(&priv->post_buckets_task, 1);
+ dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
+ "timer.\n"));
+ }
+ dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev) ));
+ }
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+
+ skb->protocol = mpt_lan_type_trans(skb, dev);
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
+ "delivered to upper level.\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
+
+ dev->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+
+ skb->dev = dev;
+ netif_rx(skb);
+
+ dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
+ atomic_read(&priv->buckets_out)));
+
+ if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
+ mpt_lan_wake_post_buckets_task(dev, 1);
+
+ dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
+ "remaining, %d received back since sod\n",
+ atomic_read(&priv->buckets_out), priv->total_received));
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+//static inline int
+static int
+mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ struct sk_buff *skb, *old_skb;
+ unsigned long flags;
+ u32 ctx, len;
+
+ ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
+ skb = priv->RcvCtl[ctx].skb;
+
+ len = GET_LAN_PACKET_LENGTH(tmsg);
+
+ if (len < MPT_LAN_RX_COPYBREAK) {
+ old_skb = skb;
+
+ skb = (struct sk_buff *)dev_alloc_skb(len);
+ if (!skb) {
+ printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+
+ pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+
+ skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
+
+ pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ goto out;
+ }
+
+ skb_put(skb, len);
+
+ priv->RcvCtl[ctx].skb = NULL;
+
+ pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+
+out:
+ spin_lock_irqsave(&priv->rxfidx_lock, flags);
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+ atomic_dec(&priv->buckets_out);
+ priv->total_received++;
+
+ return mpt_lan_receive_skb(dev, skb);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_receive_post_free(struct net_device *dev,
+ LANReceivePostReply_t *pRecvRep)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ unsigned long flags;
+ struct sk_buff *skb;
+ u32 ctx;
+ int count;
+ int i;
+
+ count = pRecvRep->NumberOfContexts;
+
+/**/ dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
+ "IOC returned %d buckets, freeing them...\n", count));
+
+ spin_lock_irqsave(&priv->rxfidx_lock, flags);
+ for (i = 0; i < count; i++) {
+ ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
+
+ skb = priv->RcvCtl[ctx].skb;
+
+// dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
+// IOC_AND_NETDEV_NAMES_s_s(dev)));
+// dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
+// priv, &(priv->buckets_out)));
+// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
+
+ priv->RcvCtl[ctx].skb = NULL;
+ pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+ }
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+ atomic_sub(count, &priv->buckets_out);
+
+// for (i = 0; i < priv->max_buckets_out; i++)
+// if (priv->RcvCtl[i].skb != NULL)
+// dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
+// "is still out\n", i));
+
+/* dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
+ count));
+*/
+/**/ dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
+/**/ "remaining, %d received back since sod.\n",
+/**/ atomic_read(&priv->buckets_out), priv->total_received));
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_receive_post_reply(struct net_device *dev,
+ LANReceivePostReply_t *pRecvRep)
+{
+ struct mpt_lan_priv *priv = netdev_priv(dev);
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ struct sk_buff *skb, *old_skb;
+ unsigned long flags;
+ u32 len, ctx, offset;
+ u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
+ int count;
+ int i, l;
+
+ dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
+ dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
+ le16_to_cpu(pRecvRep->IOCStatus)));
+
+ if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
+ MPI_IOCSTATUS_LAN_CANCELED)
+ return mpt_lan_receive_post_free(dev, pRecvRep);
+
+ len = le32_to_cpu(pRecvRep->PacketLength);
+ if (len == 0) {
+ printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
+ "ReceivePostReply w/ PacketLength zero!\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev));
+ printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
+ pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
+ return -1;
+ }
+
+ ctx = le32_to_cpu(pRecvRep->BucketContext[0]);
+ count = pRecvRep->NumberOfContexts;
+ skb = priv->RcvCtl[ctx].skb;
+
+ offset = le32_to_cpu(pRecvRep->PacketOffset);
+// if (offset != 0) {
+// printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
+// "w/ PacketOffset %u\n",
+// IOC_AND_NETDEV_NAMES_s_s(dev),
+// offset);
+// }
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ offset, len));
+
+ if (count > 1) {
+ int szrem = len;
+
+// dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
+// "for single packet, concatenating...\n",
+// IOC_AND_NETDEV_NAMES_s_s(dev)));
+
+ skb = (struct sk_buff *)dev_alloc_skb(len);
+ if (!skb) {
+ printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&priv->rxfidx_lock, flags);
+ for (i = 0; i < count; i++) {
+
+ ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
+ old_skb = priv->RcvCtl[ctx].skb;
+
+ l = priv->RcvCtl[ctx].len;
+ if (szrem < l)
+ l = szrem;
+
+// dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
+// IOC_AND_NETDEV_NAMES_s_s(dev),
+// i, l));
+
+ pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
+
+ pci_dma_sync_single_for_device(mpt_dev->pcidev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ PCI_DMA_FROMDEVICE);
+
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+ szrem -= l;
+ }
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+ } else if (len < MPT_LAN_RX_COPYBREAK) {
+
+ old_skb = skb;
+
+ skb = (struct sk_buff *)dev_alloc_skb(len);
+ if (!skb) {
+ printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+
+ pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ PCI_DMA_FROMDEVICE);
+
+ skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
+
+ pci_dma_sync_single_for_device(mpt_dev->pcidev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ PCI_DMA_FROMDEVICE);
+
+ spin_lock_irqsave(&priv->rxfidx_lock, flags);
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+ } else {
+ spin_lock_irqsave(&priv->rxfidx_lock, flags);
+
+ priv->RcvCtl[ctx].skb = NULL;
+
+ pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ priv->RcvCtl[ctx].dma = 0;
+
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+ skb_put(skb,len);
+ }
+
+ atomic_sub(count, &priv->buckets_out);
+ priv->total_received += count;
+
+ if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
+ printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
+ "MPT_LAN_MAX_BUCKETS_OUT = %d\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ priv->mpt_rxfidx_tail,
+ MPT_LAN_MAX_BUCKETS_OUT);
+
+ return -1;
+ }
+
+ if (remaining == 0)
+ printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
+ "(priv->buckets_out = %d)\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ atomic_read(&priv->buckets_out));
+ else if (remaining < 10)
+ printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
+ "(priv->buckets_out = %d)\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ remaining, atomic_read(&priv->buckets_out));
+
+ if ((remaining < priv->bucketthresh) &&
+ ((atomic_read(&priv->buckets_out) - remaining) >
+ MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
+
+ printk (KERN_WARNING MYNAM " Mismatch between driver's "
+ "buckets_out count and fw's BucketsRemaining "
+ "count has crossed the threshold, issuing a "
+ "LanReset to clear the fw's hashtable. You may "
+ "want to check your /var/log/messages for \"CRC "
+ "error\" event notifications.\n");
+
+ mpt_lan_reset(dev);
+ mpt_lan_wake_post_buckets_task(dev, 0);
+ }
+
+ return mpt_lan_receive_skb(dev, skb);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Simple SGE's only at the moment */
+
+static void
+mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
+{
+ struct net_device *dev = priv->dev;
+ MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+ MPT_FRAME_HDR *mf;
+ LANReceivePostRequest_t *pRecvReq;
+ SGETransaction32_t *pTrans;
+ SGESimple64_t *pSimple;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u32 curr, buckets, count, max;
+ u32 len = (dev->mtu + dev->hard_header_len + 4);
+ unsigned long flags;
+ int i;
+
+ curr = atomic_read(&priv->buckets_out);
+ buckets = (priv->max_buckets_out - curr);
+
+ dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ __func__, buckets, curr));
+
+ max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
+ (MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
+
+ while (buckets) {
+ mf = mpt_get_msg_frame(LanCtx, mpt_dev);
+ if (mf == NULL) {
+ printk (KERN_ERR "%s: Unable to alloc request frame\n",
+ __func__);
+ dioprintk((KERN_ERR "%s: %u buckets remaining\n",
+ __func__, buckets));
+ goto out;
+ }
+ pRecvReq = (LANReceivePostRequest_t *) mf;
+
+ i = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+ mpt_dev->RequestNB[i] = 0;
+ count = buckets;
+ if (count > max)
+ count = max;
+
+ pRecvReq->Function = MPI_FUNCTION_LAN_RECEIVE;
+ pRecvReq->ChainOffset = 0;
+ pRecvReq->MsgFlags = 0;
+ pRecvReq->PortNumber = priv->pnum;
+
+ pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
+ pSimple = NULL;
+
+ for (i = 0; i < count; i++) {
+ int ctx;
+
+ spin_lock_irqsave(&priv->rxfidx_lock, flags);
+ if (priv->mpt_rxfidx_tail < 0) {
+ printk (KERN_ERR "%s: Can't alloc context\n",
+ __func__);
+ spin_unlock_irqrestore(&priv->rxfidx_lock,
+ flags);
+ break;
+ }
+
+ ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
+
+ skb = priv->RcvCtl[ctx].skb;
+ if (skb && (priv->RcvCtl[ctx].len != len)) {
+ pci_unmap_single(mpt_dev->pcidev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(priv->RcvCtl[ctx].skb);
+ skb = priv->RcvCtl[ctx].skb = NULL;
+ }
+
+ if (skb == NULL) {
+ skb = dev_alloc_skb(len);
+ if (skb == NULL) {
+ printk (KERN_WARNING
+ MYNAM "/%s: Can't alloc skb\n",
+ __func__);
+ priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+ break;
+ }
+
+ dma = pci_map_single(mpt_dev->pcidev, skb->data,
+ len, PCI_DMA_FROMDEVICE);
+
+ priv->RcvCtl[ctx].skb = skb;
+ priv->RcvCtl[ctx].dma = dma;
+ priv->RcvCtl[ctx].len = len;
+ }
+
+ spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+ pTrans->ContextSize = sizeof(u32);
+ pTrans->DetailsLength = 0;
+ pTrans->Flags = 0;
+ pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+
+ pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
+
+ pSimple->FlagsLength = cpu_to_le32(
+ ((MPI_SGE_FLAGS_END_OF_BUFFER |
+ MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
+ pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
+ else
+ pSimple->Address.High = 0;
+
+ pTrans = (SGETransaction32_t *) (pSimple + 1);
+ }
+
+ if (pSimple == NULL) {
+/**/ printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
+/**/ __func__);
+ mpt_free_msg_frame(mpt_dev, mf);
+ goto out;
+ }
+
+ pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
+
+ pRecvReq->BucketCount = cpu_to_le32(i);
+
+/* printk(KERN_INFO MYNAM ": posting buckets\n ");
+ * for (i = 0; i < j + 2; i ++)
+ * printk (" %08x", le32_to_cpu(msg[i]));
+ * printk ("\n");
+ */
+
+ mpt_put_msg_frame(LanCtx, mpt_dev, mf);
+
+ priv->total_posted += i;
+ buckets -= i;
+ atomic_add(i, &priv->buckets_out);
+ }
+
+out:
+ dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
+ __func__, buckets, atomic_read(&priv->buckets_out)));
+ dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
+ __func__, priv->total_posted, priv->total_received));
+
+ clear_bit(0, &priv->post_buckets_active);
+}
+
+static void
+mpt_lan_post_receive_buckets_work(struct work_struct *work)
+{
+ mpt_lan_post_receive_buckets(container_of(work, struct mpt_lan_priv,
+ post_buckets_task.work));
+}
+
+static const struct net_device_ops mpt_netdev_ops = {
+ .ndo_open = mpt_lan_open,
+ .ndo_stop = mpt_lan_close,
+ .ndo_start_xmit = mpt_lan_sdu_send,
+ .ndo_change_mtu = mpt_lan_change_mtu,
+ .ndo_tx_timeout = mpt_lan_tx_timeout,
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static struct net_device *
+mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
+{
+ struct net_device *dev;
+ struct mpt_lan_priv *priv;
+ u8 HWaddr[FC_ALEN], *a;
+
+ dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
+ if (!dev)
+ return NULL;
+
+ dev->mtu = MPT_LAN_MTU;
+
+ priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ priv->mpt_dev = mpt_dev;
+ priv->pnum = pnum;
+
+ INIT_DELAYED_WORK(&priv->post_buckets_task,
+ mpt_lan_post_receive_buckets_work);
+ priv->post_buckets_active = 0;
+
+ dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
+ __LINE__, dev->mtu + dev->hard_header_len + 4));
+
+ atomic_set(&priv->buckets_out, 0);
+ priv->total_posted = 0;
+ priv->total_received = 0;
+ priv->max_buckets_out = max_buckets_out;
+ if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
+ priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
+
+ dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
+ __LINE__,
+ mpt_dev->pfacts[0].MaxLanBuckets,
+ max_buckets_out,
+ priv->max_buckets_out));
+
+ priv->bucketthresh = priv->max_buckets_out * 2 / 3;
+ spin_lock_init(&priv->txfidx_lock);
+ spin_lock_init(&priv->rxfidx_lock);
+
+ /* Grab pre-fetched LANPage1 stuff. :-) */
+ a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
+
+ HWaddr[0] = a[5];
+ HWaddr[1] = a[4];
+ HWaddr[2] = a[3];
+ HWaddr[3] = a[2];
+ HWaddr[4] = a[1];
+ HWaddr[5] = a[0];
+
+ dev->addr_len = FC_ALEN;
+ memcpy(dev->dev_addr, HWaddr, FC_ALEN);
+ memset(dev->broadcast, 0xff, FC_ALEN);
+
+ /* The Tx queue is 127 deep on the 909.
+ * Give ourselves some breathing room.
+ */
+ priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
+ tx_max_out_p : MPT_TX_MAX_OUT_LIM;
+
+ dev->netdev_ops = &mpt_netdev_ops;
+ dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
+
+ dlprintk((KERN_INFO MYNAM ": Finished registering dev "
+ "and setting initial values\n"));
+
+ if (register_netdev(dev) != 0) {
+ free_netdev(dev);
+ dev = NULL;
+ }
+ return dev;
+}
+
+static int
+mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ int i;
+
+ for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
+ printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
+ "ProtocolFlags=%02Xh (%c%c%c%c)\n",
+ ioc->name, ioc->pfacts[i].PortNumber,
+ ioc->pfacts[i].ProtocolFlags,
+ MPT_PROTOCOL_FLAGS_c_c_c_c(
+ ioc->pfacts[i].ProtocolFlags));
+
+ if (!(ioc->pfacts[i].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_LAN)) {
+ printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
+ "seems to be disabled on this adapter port!\n",
+ ioc->name);
+ continue;
+ }
+
+ dev = mpt_register_lan_device(ioc, i);
+ if (!dev) {
+ printk(KERN_ERR MYNAM ": %s: Unable to register "
+ "port%d as a LAN device\n", ioc->name,
+ ioc->pfacts[i].PortNumber);
+ continue;
+ }
+
+ printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
+ "registered as '%s'\n", ioc->name, dev->name);
+ printk(KERN_INFO MYNAM ": %s/%s: "
+ "LanAddr = %pM\n",
+ IOC_AND_NETDEV_NAMES_s_s(dev),
+ dev->dev_addr);
+
+ ioc->netdev = dev;
+
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+static void
+mptlan_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct net_device *dev = ioc->netdev;
+
+ if(dev != NULL) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+}
+
+static struct mpt_pci_driver mptlan_driver = {
+ .probe = mptlan_probe,
+ .remove = mptlan_remove,
+};
+
+static int __init mpt_lan_init (void)
+{
+ show_mptmod_ver(LANAME, LANVER);
+
+ LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER,
+ "lan_reply");
+ if (LanCtx <= 0) {
+ printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
+ return -EBUSY;
+ }
+
+ dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
+
+ if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
+ printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
+ "handler with mptbase! The world is at an end! "
+ "Everything is fading to black! Goodbye.\n");
+ return -EBUSY;
+ }
+
+ dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
+
+ mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER);
+ return 0;
+}
+
+static void __exit mpt_lan_exit(void)
+{
+ mpt_device_driver_deregister(MPTLAN_DRIVER);
+ mpt_reset_deregister(LanCtx);
+
+ if (LanCtx) {
+ mpt_deregister(LanCtx);
+ LanCtx = MPT_MAX_PROTOCOL_DRIVERS;
+ }
+}
+
+module_init(mpt_lan_init);
+module_exit(mpt_lan_exit);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static unsigned short
+mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
+ struct fcllc *fcllc;
+
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(struct mpt_lan_ohdr));
+
+ if (fch->dtype == htons(0xffff)) {
+ u32 *p = (u32 *) fch;
+
+ swab32s(p + 0);
+ swab32s(p + 1);
+ swab32s(p + 2);
+ swab32s(p + 3);
+
+ printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
+ NETDEV_PTR_TO_IOC_NAME_s(dev));
+ printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %pM\n",
+ fch->saddr);
+ }
+
+ if (*fch->daddr & 1) {
+ if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
+ skb->pkt_type = PACKET_BROADCAST;
+ } else {
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+ } else {
+ if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
+ skb->pkt_type = PACKET_OTHERHOST;
+ } else {
+ skb->pkt_type = PACKET_HOST;
+ }
+ }
+
+ fcllc = (struct fcllc *)skb->data;
+
+ /* Strip the SNAP header from ARP packets since we don't
+ * pass them through to the 802.2/SNAP layers.
+ */
+ if (fcllc->dsap == EXTENDED_SAP &&
+ (fcllc->ethertype == htons(ETH_P_IP) ||
+ fcllc->ethertype == htons(ETH_P_ARP))) {
+ skb_pull(skb, sizeof(struct fcllc));
+ return fcllc->ethertype;
+ }
+
+ return htons(ETH_P_802_2);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
new file mode 100644
index 00000000..c171afa9
--- /dev/null
+++ b/drivers/message/fusion/mptlan.h
@@ -0,0 +1,131 @@
+/*
+ * linux/drivers/message/fusion/mptlan.h
+ * IP Over Fibre Channel device driver.
+ * For use with LSI Fibre Channel PCI chip/adapters
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 2000-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+/* mptlan.h */
+
+#ifndef LINUX_MPTLAN_H_INCLUDED
+#define LINUX_MPTLAN_H_INCLUDED
+/*****************************************************************************/
+
+#if !defined(__GENKSYMS__)
+#include <linux/module.h>
+#endif
+
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+// #include <linux/etherdevice.h>
+#include <linux/fcdevice.h>
+// #include <linux/fddidevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+// #include <linux/trdevice.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+ /* Override mptbase.h by pre-defining these! */
+#define MODULEAUTHOR "LSI Corporation"
+
+#include "mptbase.h"
+
+/*****************************************************************************/
+#define LANAME "Fusion MPT LAN driver"
+#define LANVER MPT_LINUX_VERSION_COMMON
+
+#ifdef MODULE
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(LANAME);
+#endif
+/*****************************************************************************/
+
+#define MPT_LAN_MAX_BUCKETS_OUT 256
+#define MPT_LAN_BUCKET_THRESH 18 /* 9 buckets in one message */
+#define MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH 10
+#define MPT_LAN_RX_COPYBREAK 200
+#define MPT_LAN_TX_TIMEOUT (1*HZ)
+#define MPT_TX_MAX_OUT_LIM 127
+
+#define MPT_LAN_MIN_MTU 96 /* RFC2625 */
+#define MPT_LAN_MAX_MTU 65280 /* RFC2625 */
+#define MPT_LAN_MTU 13312 /* Max perf range + lower mem
+ usage than 16128 */
+
+#define MPT_LAN_NAA_RFC2625 0x1
+#define MPT_LAN_NAA_QLOGIC 0x2
+
+/* MPT LAN Reset and Suspend Resource Flags Defines */
+
+#define MPT_LAN_RESOURCE_FLAG_RETURN_POSTED_BUCKETS 0x01
+#define MPT_LAN_RESOURCE_FLAG_RETURN_PEND_TRANSMITS 0x02
+
+/*****************************************************************************/
+#ifdef MPT_LAN_IO_DEBUG
+#define dioprintk(x) printk x
+#else
+#define dioprintk(x)
+#endif
+
+#ifdef MPT_LAN_DEBUG
+#define dlprintk(x) printk x
+#else
+#define dlprintk(x)
+#endif
+
+#define NETDEV_TO_LANPRIV_PTR(d) ((struct mpt_lan_priv *)netdev_priv(d))
+#define NETDEV_PTR_TO_IOC_NAME_s(d) (NETDEV_TO_LANPRIV_PTR(d)->mpt_dev->name)
+#define IOC_AND_NETDEV_NAMES_s_s(d) NETDEV_PTR_TO_IOC_NAME_s(d), (d)->name
+
+/*****************************************************************************/
+#endif
+
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
new file mode 100644
index 00000000..7596aecd
--- /dev/null
+++ b/drivers/message/fusion/mptsas.c
@@ -0,0 +1,5416 @@
+/*
+ * linux/drivers/message/fusion/mptsas.c
+ * For use with LSI PCI chip/adapter(s)
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h> /* for mdelay */
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mptbase.h"
+#include "mptscsih.h"
+#include "mptsas.h"
+
+
+#define my_NAME "Fusion MPT SAS Host driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptsas"
+
+/*
+ * Reserved channel for integrated raid
+ */
+#define MPTSAS_RAID_CHANNEL 1
+
+#define SAS_CONFIG_PAGE_TIMEOUT 30
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(my_VERSION);
+
+static int mpt_pt_clear;
+module_param(mpt_pt_clear, int, 0);
+MODULE_PARM_DESC(mpt_pt_clear,
+ " Clear persistency table: enable=1 "
+ "(default=MPTSCSIH_PT_CLEAR=0)");
+
+/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+#define MPTSAS_MAX_LUN (16895)
+static int max_lun = MPTSAS_MAX_LUN;
+module_param(max_lun, int, 0);
+MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+
+static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
+static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
+
+static void mptsas_firmware_event_work(struct work_struct *work);
+static void mptsas_send_sas_event(struct fw_event_work *fw_event);
+static void mptsas_send_raid_event(struct fw_event_work *fw_event);
+static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
+static void mptsas_parse_device_info(struct sas_identify *identify,
+ struct mptsas_devinfo *device_info);
+static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
+ struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
+static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address
+ (MPT_ADAPTER *ioc, u64 sas_address);
+static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
+ struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
+static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
+ struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
+static int mptsas_add_end_device(MPT_ADAPTER *ioc,
+ struct mptsas_phyinfo *phy_info);
+static void mptsas_del_end_device(MPT_ADAPTER *ioc,
+ struct mptsas_phyinfo *phy_info);
+static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
+static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address
+ (MPT_ADAPTER *ioc, u64 sas_address);
+static void mptsas_expander_delete(MPT_ADAPTER *ioc,
+ struct mptsas_portinfo *port_info, u8 force);
+static void mptsas_send_expander_event(struct fw_event_work *fw_event);
+static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
+static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
+static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
+static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
+static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
+void mptsas_schedule_target_reset(void *ioc);
+
+static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
+ MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
+{
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "---- IO UNIT PAGE 0 ------------\n", ioc->name));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
+ ioc->name, le16_to_cpu(phy_data->AttachedDeviceHandle)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Controller Handle=0x%X\n",
+ ioc->name, le16_to_cpu(phy_data->ControllerDevHandle)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port=0x%X\n",
+ ioc->name, phy_data->Port));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Port Flags=0x%X\n",
+ ioc->name, phy_data->PortFlags));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Flags=0x%X\n",
+ ioc->name, phy_data->PhyFlags));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
+ ioc->name, phy_data->NegotiatedLinkRate));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Controller PHY Device Info=0x%X\n", ioc->name,
+ le32_to_cpu(phy_data->ControllerPhyDeviceInfo)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "DiscoveryStatus=0x%X\n\n",
+ ioc->name, le32_to_cpu(phy_data->DiscoveryStatus)));
+}
+
+static void mptsas_print_phy_pg0(MPT_ADAPTER *ioc, SasPhyPage0_t *pg0)
+{
+ __le64 sas_address;
+
+ memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
+
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "---- SAS PHY PAGE 0 ------------\n", ioc->name));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Attached Device Handle=0x%X\n", ioc->name,
+ le16_to_cpu(pg0->AttachedDevHandle)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
+ ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Attached PHY Identifier=0x%X\n", ioc->name,
+ pg0->AttachedPhyIdentifier));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Attached Device Info=0x%X\n",
+ ioc->name, le32_to_cpu(pg0->AttachedDeviceInfo)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
+ ioc->name, pg0->ProgrammedLinkRate));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Change Count=0x%X\n",
+ ioc->name, pg0->ChangeCount));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Info=0x%X\n\n",
+ ioc->name, le32_to_cpu(pg0->PhyInfo)));
+}
+
+static void mptsas_print_phy_pg1(MPT_ADAPTER *ioc, SasPhyPage1_t *pg1)
+{
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "---- SAS PHY PAGE 1 ------------\n", ioc->name));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Invalid Dword Count=0x%x\n",
+ ioc->name, pg1->InvalidDwordCount));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Running Disparity Error Count=0x%x\n", ioc->name,
+ pg1->RunningDisparityErrorCount));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Loss Dword Synch Count=0x%x\n", ioc->name,
+ pg1->LossDwordSynchCount));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "PHY Reset Problem Count=0x%x\n\n", ioc->name,
+ pg1->PhyResetProblemCount));
+}
+
+static void mptsas_print_device_pg0(MPT_ADAPTER *ioc, SasDevicePage0_t *pg0)
+{
+ __le64 sas_address;
+
+ memcpy(&sas_address, &pg0->SASAddress, sizeof(__le64));
+
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "---- SAS DEVICE PAGE 0 ---------\n", ioc->name));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Handle=0x%X\n",
+ ioc->name, le16_to_cpu(pg0->DevHandle)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Handle=0x%X\n",
+ ioc->name, le16_to_cpu(pg0->ParentDevHandle)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Enclosure Handle=0x%X\n",
+ ioc->name, le16_to_cpu(pg0->EnclosureHandle)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Slot=0x%X\n",
+ ioc->name, le16_to_cpu(pg0->Slot)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SAS Address=0x%llX\n",
+ ioc->name, (unsigned long long)le64_to_cpu(sas_address)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Target ID=0x%X\n",
+ ioc->name, pg0->TargetID));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Bus=0x%X\n",
+ ioc->name, pg0->Bus));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Parent Phy Num=0x%X\n",
+ ioc->name, pg0->PhyNum));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Access Status=0x%X\n",
+ ioc->name, le16_to_cpu(pg0->AccessStatus)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Device Info=0x%X\n",
+ ioc->name, le32_to_cpu(pg0->DeviceInfo)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Flags=0x%X\n",
+ ioc->name, le16_to_cpu(pg0->Flags)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n\n",
+ ioc->name, pg0->PhysicalPort));
+}
+
+static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
+{
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "---- SAS EXPANDER PAGE 1 ------------\n", ioc->name));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Physical Port=0x%X\n",
+ ioc->name, pg1->PhysicalPort));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "PHY Identifier=0x%X\n",
+ ioc->name, pg1->PhyIdentifier));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Negotiated Link Rate=0x%X\n",
+ ioc->name, pg1->NegotiatedLinkRate));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Programmed Link Rate=0x%X\n",
+ ioc->name, pg1->ProgrammedLinkRate));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hardware Link Rate=0x%X\n",
+ ioc->name, pg1->HwLinkRate));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Owner Device Handle=0x%X\n",
+ ioc->name, le16_to_cpu(pg1->OwnerDevHandle)));
+ dsasprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Attached Device Handle=0x%X\n\n", ioc->name,
+ le16_to_cpu(pg1->AttachedDevHandle)));
+}
+
+/* inhibit sas firmware event handling */
+static void
+mptsas_fw_event_off(MPT_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ ioc->fw_events_off = 1;
+ ioc->sas_discovery_quiesce_io = 0;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+
+}
+
+/* enable sas firmware event handling */
+static void
+mptsas_fw_event_on(MPT_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ ioc->fw_events_off = 0;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* queue a sas firmware event */
+static void
+mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+ unsigned long delay)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
+ ioc->name, __func__, fw_event));
+ queue_delayed_work(ioc->fw_event_q, &fw_event->work,
+ delay);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* requeue a sas firmware event */
+static void
+mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+ unsigned long delay)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
+ "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
+ fw_event->retries++;
+ queue_delayed_work(ioc->fw_event_q, &fw_event->work,
+ msecs_to_jiffies(delay));
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* free memory associated to a sas firmware event */
+static void
+mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
+ ioc->name, __func__, fw_event));
+ list_del(&fw_event->list);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* walk the firmware event queue, and either stop or wait for
+ * outstanding events to complete */
+static void
+mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event, *next;
+ struct mptsas_target_reset_event *target_reset_list, *n;
+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+
+ /* flush the target_reset_list */
+ if (!list_empty(&hd->target_reset_list)) {
+ list_for_each_entry_safe(target_reset_list, n,
+ &hd->target_reset_list, list) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: removing target reset for id=%d\n",
+ ioc->name, __func__,
+ target_reset_list->sas_event_data.TargetID));
+ list_del(&target_reset_list->list);
+ kfree(target_reset_list);
+ }
+ }
+
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->fw_event_q || in_interrupt())
+ return;
+
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->work))
+ mptsas_free_fw_event(ioc, fw_event);
+ }
+}
+
+
+static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+}
+
+static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+}
+
+/*
+ * mptsas_find_portinfo_by_handle
+ *
+ * This function should be called with the sas_topology_mutex already held
+ */
+static struct mptsas_portinfo *
+mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
+{
+ struct mptsas_portinfo *port_info, *rc=NULL;
+ int i;
+
+ list_for_each_entry(port_info, &ioc->sas_topology, list)
+ for (i = 0; i < port_info->num_phys; i++)
+ if (port_info->phy_info[i].identify.handle == handle) {
+ rc = port_info;
+ goto out;
+ }
+ out:
+ return rc;
+}
+
+/**
+ * mptsas_find_portinfo_by_sas_address -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @handle:
+ *
+ * This function should be called with the sas_topology_mutex already held
+ *
+ **/
+static struct mptsas_portinfo *
+mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
+{
+ struct mptsas_portinfo *port_info, *rc = NULL;
+ int i;
+
+ if (sas_address >= ioc->hba_port_sas_addr &&
+ sas_address < (ioc->hba_port_sas_addr +
+ ioc->hba_port_num_phy))
+ return ioc->hba_port_info;
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(port_info, &ioc->sas_topology, list)
+ for (i = 0; i < port_info->num_phys; i++)
+ if (port_info->phy_info[i].identify.sas_address ==
+ sas_address) {
+ rc = port_info;
+ goto out;
+ }
+ out:
+ mutex_unlock(&ioc->sas_topology_mutex);
+ return rc;
+}
+
+/*
+ * Returns true if there is a scsi end device
+ */
+static inline int
+mptsas_is_end_device(struct mptsas_devinfo * attached)
+{
+ if ((attached->sas_address) &&
+ (attached->device_info &
+ MPI_SAS_DEVICE_INFO_END_DEVICE) &&
+ ((attached->device_info &
+ MPI_SAS_DEVICE_INFO_SSP_TARGET) |
+ (attached->device_info &
+ MPI_SAS_DEVICE_INFO_STP_TARGET) |
+ (attached->device_info &
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)))
+ return 1;
+ else
+ return 0;
+}
+
+/* no mutex */
+static void
+mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
+{
+ struct mptsas_portinfo *port_info;
+ struct mptsas_phyinfo *phy_info;
+ u8 i;
+
+ if (!port_details)
+ return;
+
+ port_info = port_details->port_info;
+ phy_info = port_info->phy_info;
+
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: [%p]: num_phys=%02d "
+ "bitmask=0x%016llX\n", ioc->name, __func__, port_details,
+ port_details->num_phys, (unsigned long long)
+ port_details->phy_bitmask));
+
+ for (i = 0; i < port_info->num_phys; i++, phy_info++) {
+ if(phy_info->port_details != port_details)
+ continue;
+ memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
+ mptsas_set_rphy(ioc, phy_info, NULL);
+ phy_info->port_details = NULL;
+ }
+ kfree(port_details);
+}
+
+static inline struct sas_rphy *
+mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
+{
+ if (phy_info->port_details)
+ return phy_info->port_details->rphy;
+ else
+ return NULL;
+}
+
+static inline void
+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
+{
+ if (phy_info->port_details) {
+ phy_info->port_details->rphy = rphy;
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
+ ioc->name, rphy));
+ }
+
+ if (rphy) {
+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
+ ioc->name, rphy, rphy->dev.release));
+ }
+}
+
+static inline struct sas_port *
+mptsas_get_port(struct mptsas_phyinfo *phy_info)
+{
+ if (phy_info->port_details)
+ return phy_info->port_details->port;
+ else
+ return NULL;
+}
+
+static inline void
+mptsas_set_port(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_port *port)
+{
+ if (phy_info->port_details)
+ phy_info->port_details->port = port;
+
+ if (port) {
+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
+ &port->dev, MYIOC_s_FMT "add:", ioc->name));
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "port=%p release=%p\n",
+ ioc->name, port, port->dev.release));
+ }
+}
+
+static inline struct scsi_target *
+mptsas_get_starget(struct mptsas_phyinfo *phy_info)
+{
+ if (phy_info->port_details)
+ return phy_info->port_details->starget;
+ else
+ return NULL;
+}
+
+static inline void
+mptsas_set_starget(struct mptsas_phyinfo *phy_info, struct scsi_target *
+starget)
+{
+ if (phy_info->port_details)
+ phy_info->port_details->starget = starget;
+}
+
+/**
+ * mptsas_add_device_component -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: fw mapped id's
+ * @id:
+ * @sas_address:
+ * @device_info:
+ *
+ **/
+static void
+mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
+ u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
+{
+ struct mptsas_device_info *sas_info, *next;
+ struct scsi_device *sdev;
+ struct scsi_target *starget;
+ struct sas_rphy *rphy;
+
+ /*
+ * Delete all matching devices out of the list
+ */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ if (!sas_info->is_logical_volume &&
+ (sas_info->sas_address == sas_address ||
+ (sas_info->fw.channel == channel &&
+ sas_info->fw.id == id))) {
+ list_del(&sas_info->list);
+ kfree(sas_info);
+ }
+ }
+
+ sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
+ if (!sas_info)
+ goto out;
+
+ /*
+ * Set Firmware mapping
+ */
+ sas_info->fw.id = id;
+ sas_info->fw.channel = channel;
+
+ sas_info->sas_address = sas_address;
+ sas_info->device_info = device_info;
+ sas_info->slot = slot;
+ sas_info->enclosure_logical_id = enclosure_logical_id;
+ INIT_LIST_HEAD(&sas_info->list);
+ list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
+
+ /*
+ * Set OS mapping
+ */
+ shost_for_each_device(sdev, ioc->sh) {
+ starget = scsi_target(sdev);
+ rphy = dev_to_rphy(starget->dev.parent);
+ if (rphy->identify.sas_address == sas_address) {
+ sas_info->os.id = starget->id;
+ sas_info->os.channel = starget->channel;
+ }
+ }
+
+ out:
+ mutex_unlock(&ioc->sas_device_info_mutex);
+ return;
+}
+
+/**
+ * mptsas_add_device_component_by_fw -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: fw mapped id's
+ * @id:
+ *
+ **/
+static void
+mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ struct mptsas_devinfo sas_device;
+ struct mptsas_enclosure enclosure_info;
+ int rc;
+
+ rc = mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (channel << 8) + id);
+ if (rc)
+ return;
+
+ memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
+ mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
+ (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
+ MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
+ sas_device.handle_enclosure);
+
+ mptsas_add_device_component(ioc, sas_device.channel,
+ sas_device.id, sas_device.sas_address, sas_device.device_info,
+ sas_device.slot, enclosure_info.enclosure_logical_id);
+}
+
+/**
+ * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: fw mapped id's
+ * @id:
+ *
+ **/
+static void
+mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
+ struct scsi_target *starget)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidVolumePage0_t buffer = NULL;
+ int i;
+ RaidPhysDiskPage0_t phys_disk;
+ struct mptsas_device_info *sas_info, *next;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
+ /* assumption that all volumes on channel = 0 */
+ cfg.pageAddr = starget->id;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!hdr.PageLength)
+ goto out;
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer)
+ goto out;
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!buffer->NumPhysDisks)
+ goto out;
+
+ /*
+ * Adding entry for hidden components
+ */
+ for (i = 0; i < buffer->NumPhysDisks; i++) {
+
+ if (mpt_raid_phys_disk_pg0(ioc,
+ buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
+ continue;
+
+ mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
+ phys_disk.PhysDiskID);
+
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+ list) {
+ if (!sas_info->is_logical_volume &&
+ (sas_info->fw.channel == phys_disk.PhysDiskBus &&
+ sas_info->fw.id == phys_disk.PhysDiskID)) {
+ sas_info->is_hidden_raid_component = 1;
+ sas_info->volume_id = starget->id;
+ }
+ }
+ mutex_unlock(&ioc->sas_device_info_mutex);
+
+ }
+
+ /*
+ * Delete all matching devices out of the list
+ */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->is_logical_volume && sas_info->fw.id ==
+ starget->id) {
+ list_del(&sas_info->list);
+ kfree(sas_info);
+ }
+ }
+
+ sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
+ if (sas_info) {
+ sas_info->fw.id = starget->id;
+ sas_info->os.id = starget->id;
+ sas_info->os.channel = starget->channel;
+ sas_info->is_logical_volume = 1;
+ INIT_LIST_HEAD(&sas_info->list);
+ list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
+ }
+ mutex_unlock(&ioc->sas_device_info_mutex);
+
+ out:
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+}
+
+/**
+ * mptsas_add_device_component_starget -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @starget:
+ *
+ **/
+static void
+mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
+ struct scsi_target *starget)
+{
+ VirtTarget *vtarget;
+ struct sas_rphy *rphy;
+ struct mptsas_phyinfo *phy_info = NULL;
+ struct mptsas_enclosure enclosure_info;
+
+ rphy = dev_to_rphy(starget->dev.parent);
+ vtarget = starget->hostdata;
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (!phy_info)
+ return;
+
+ memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
+ mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
+ (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
+ MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
+ phy_info->attached.handle_enclosure);
+
+ mptsas_add_device_component(ioc, phy_info->attached.channel,
+ phy_info->attached.id, phy_info->attached.sas_address,
+ phy_info->attached.device_info,
+ phy_info->attached.slot, enclosure_info.enclosure_logical_id);
+}
+
+/**
+ * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @channel: os mapped id's
+ * @id:
+ *
+ **/
+static void
+mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ struct mptsas_device_info *sas_info, *next;
+
+ /*
+ * Set is_cached flag
+ */
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->os.channel == channel && sas_info->os.id == id)
+ sas_info->is_cached = 1;
+ }
+}
+
+/**
+ * mptsas_del_device_components - Cleaning the list
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+static void
+mptsas_del_device_components(MPT_ADAPTER *ioc)
+{
+ struct mptsas_device_info *sas_info, *next;
+
+ mutex_lock(&ioc->sas_device_info_mutex);
+ list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+ list) {
+ list_del(&sas_info->list);
+ kfree(sas_info);
+ }
+ mutex_unlock(&ioc->sas_device_info_mutex);
+}
+
+
+/*
+ * mptsas_setup_wide_ports
+ *
+ * Updates for new and existing narrow/wide port configuration
+ * in the sas_topology
+ */
+static void
+mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
+{
+ struct mptsas_portinfo_details * port_details;
+ struct mptsas_phyinfo *phy_info, *phy_info_cmp;
+ u64 sas_address;
+ int i, j;
+
+ mutex_lock(&ioc->sas_topology_mutex);
+
+ phy_info = port_info->phy_info;
+ for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
+ if (phy_info->attached.handle)
+ continue;
+ port_details = phy_info->port_details;
+ if (!port_details)
+ continue;
+ if (port_details->num_phys < 2)
+ continue;
+ /*
+ * Removing a phy from a port, letting the last
+ * phy be removed by firmware events.
+ */
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: [%p]: deleting phy = %d\n",
+ ioc->name, __func__, port_details, i));
+ port_details->num_phys--;
+ port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
+ memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
+ if (phy_info->phy) {
+ devtprintk(ioc, dev_printk(KERN_DEBUG,
+ &phy_info->phy->dev, MYIOC_s_FMT
+ "delete phy %d, phy-obj (0x%p)\n", ioc->name,
+ phy_info->phy_id, phy_info->phy));
+ sas_port_delete_phy(port_details->port, phy_info->phy);
+ }
+ phy_info->port_details = NULL;
+ }
+
+ /*
+ * Populate and refresh the tree
+ */
+ phy_info = port_info->phy_info;
+ for (i = 0 ; i < port_info->num_phys ; i++, phy_info++) {
+ sas_address = phy_info->attached.sas_address;
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "phy_id=%d sas_address=0x%018llX\n",
+ ioc->name, i, (unsigned long long)sas_address));
+ if (!sas_address)
+ continue;
+ port_details = phy_info->port_details;
+ /*
+ * Forming a port
+ */
+ if (!port_details) {
+ port_details = kzalloc(sizeof(struct
+ mptsas_portinfo_details), GFP_KERNEL);
+ if (!port_details)
+ goto out;
+ port_details->num_phys = 1;
+ port_details->port_info = port_info;
+ if (phy_info->phy_id < 64 )
+ port_details->phy_bitmask |=
+ (1 << phy_info->phy_id);
+ phy_info->sas_port_add_phy=1;
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tForming port\n\t\t"
+ "phy_id=%d sas_address=0x%018llX\n",
+ ioc->name, i, (unsigned long long)sas_address));
+ phy_info->port_details = port_details;
+ }
+
+ if (i == port_info->num_phys - 1)
+ continue;
+ phy_info_cmp = &port_info->phy_info[i + 1];
+ for (j = i + 1 ; j < port_info->num_phys ; j++,
+ phy_info_cmp++) {
+ if (!phy_info_cmp->attached.sas_address)
+ continue;
+ if (sas_address != phy_info_cmp->attached.sas_address)
+ continue;
+ if (phy_info_cmp->port_details == port_details )
+ continue;
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "\t\tphy_id=%d sas_address=0x%018llX\n",
+ ioc->name, j, (unsigned long long)
+ phy_info_cmp->attached.sas_address));
+ if (phy_info_cmp->port_details) {
+ port_details->rphy =
+ mptsas_get_rphy(phy_info_cmp);
+ port_details->port =
+ mptsas_get_port(phy_info_cmp);
+ port_details->starget =
+ mptsas_get_starget(phy_info_cmp);
+ port_details->num_phys =
+ phy_info_cmp->port_details->num_phys;
+ if (!phy_info_cmp->port_details->num_phys)
+ kfree(phy_info_cmp->port_details);
+ } else
+ phy_info_cmp->sas_port_add_phy=1;
+ /*
+ * Adding a phy to a port
+ */
+ phy_info_cmp->port_details = port_details;
+ if (phy_info_cmp->phy_id < 64 )
+ port_details->phy_bitmask |=
+ (1 << phy_info_cmp->phy_id);
+ port_details->num_phys++;
+ }
+ }
+
+ out:
+
+ for (i = 0; i < port_info->num_phys; i++) {
+ port_details = port_info->phy_info[i].port_details;
+ if (!port_details)
+ continue;
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: [%p]: phy_id=%02d num_phys=%02d "
+ "bitmask=0x%016llX\n", ioc->name, __func__,
+ port_details, i, port_details->num_phys,
+ (unsigned long long)port_details->phy_bitmask));
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\t\tport = %p rphy=%p\n",
+ ioc->name, port_details->port, port_details->rphy));
+ }
+ dsaswideprintk(ioc, printk("\n"));
+ mutex_unlock(&ioc->sas_topology_mutex);
+}
+
+/**
+ * csmisas_find_vtarget
+ *
+ * @ioc
+ * @volume_id
+ * @volume_bus
+ *
+ **/
+static VirtTarget *
+mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ struct scsi_device *sdev;
+ VirtDevice *vdevice;
+ VirtTarget *vtarget = NULL;
+
+ shost_for_each_device(sdev, ioc->sh) {
+ vdevice = sdev->hostdata;
+ if ((vdevice == NULL) ||
+ (vdevice->vtarget == NULL))
+ continue;
+ if ((vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ vdevice->vtarget->raidVolume))
+ continue;
+ if (vdevice->vtarget->id == id &&
+ vdevice->vtarget->channel == channel)
+ vtarget = vdevice->vtarget;
+ }
+ return vtarget;
+}
+
+static void
+mptsas_queue_device_delete(MPT_ADAPTER *ioc,
+ MpiEventDataSasDeviceStatusChange_t *sas_event_data)
+{
+ struct fw_event_work *fw_event;
+ int sz;
+
+ sz = offsetof(struct fw_event_work, event_data) +
+ sizeof(MpiEventDataSasDeviceStatusChange_t);
+ fw_event = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event) {
+ printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ memcpy(fw_event->event_data, sas_event_data,
+ sizeof(MpiEventDataSasDeviceStatusChange_t));
+ fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
+ fw_event->ioc = ioc;
+ mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
+}
+
+static void
+mptsas_queue_rescan(MPT_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+ int sz;
+
+ sz = offsetof(struct fw_event_work, event_data);
+ fw_event = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event) {
+ printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ fw_event->event = -1;
+ fw_event->ioc = ioc;
+ mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
+}
+
+
+/**
+ * mptsas_target_reset
+ *
+ * Issues TARGET_RESET to end device using handshaking method
+ *
+ * @ioc
+ * @channel
+ * @id
+ *
+ * Returns (1) success
+ * (0) failure
+ *
+ **/
+static int
+mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ MPT_FRAME_HDR *mf;
+ SCSITaskMgmt_t *pScsiTm;
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
+ return 0;
+
+
+ mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
+ if (mf == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "%s, no msg frames @%d!!\n", ioc->name,
+ __func__, __LINE__));
+ goto out_fail;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
+ ioc->name, mf));
+
+ /* Format the Request
+ */
+ pScsiTm = (SCSITaskMgmt_t *) mf;
+ memset (pScsiTm, 0, sizeof(SCSITaskMgmt_t));
+ pScsiTm->TargetID = id;
+ pScsiTm->Bus = channel;
+ pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+ pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
+
+ DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
+ ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
+
+ mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
+
+ return 1;
+
+ out_fail:
+
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ return 0;
+}
+
+static void
+mptsas_block_io_sdev(struct scsi_device *sdev, void *data)
+{
+ scsi_device_set_state(sdev, SDEV_BLOCK);
+}
+
+static void
+mptsas_block_io_starget(struct scsi_target *starget)
+{
+ if (starget)
+ starget_for_each_device(starget, NULL, mptsas_block_io_sdev);
+}
+
+/**
+ * mptsas_target_reset_queue
+ *
+ * Receive request for TARGET_RESET after receiving an firmware
+ * event NOT_RESPONDING_EVENT, then put command in link list
+ * and queue if task_queue already in use.
+ *
+ * @ioc
+ * @sas_event_data
+ *
+ **/
+static void
+mptsas_target_reset_queue(MPT_ADAPTER *ioc,
+ EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
+{
+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ VirtTarget *vtarget = NULL;
+ struct mptsas_target_reset_event *target_reset_list;
+ u8 id, channel;
+
+ id = sas_event_data->TargetID;
+ channel = sas_event_data->Bus;
+
+ vtarget = mptsas_find_vtarget(ioc, channel, id);
+ if (vtarget) {
+ mptsas_block_io_starget(vtarget->starget);
+ vtarget->deleted = 1; /* block IO */
+ }
+
+ target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
+ GFP_ATOMIC);
+ if (!target_reset_list) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "%s, failed to allocate mem @%d..!!\n",
+ ioc->name, __func__, __LINE__));
+ return;
+ }
+
+ memcpy(&target_reset_list->sas_event_data, sas_event_data,
+ sizeof(*sas_event_data));
+ list_add_tail(&target_reset_list->list, &hd->target_reset_list);
+
+ target_reset_list->time_count = jiffies;
+
+ if (mptsas_target_reset(ioc, channel, id)) {
+ target_reset_list->target_reset_issued = 1;
+ }
+}
+
+/**
+ * mptsas_schedule_target_reset- send pending target reset
+ * @iocp: per adapter object
+ *
+ * This function will delete scheduled target reset from the list and
+ * try to send next target reset. This will be called from completion
+ * context of any Task management command.
+ */
+
+void
+mptsas_schedule_target_reset(void *iocp)
+{
+ MPT_ADAPTER *ioc = (MPT_ADAPTER *)(iocp);
+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ struct list_head *head = &hd->target_reset_list;
+ struct mptsas_target_reset_event *target_reset_list;
+ u8 id, channel;
+ /*
+ * issue target reset to next device in the queue
+ */
+
+ head = &hd->target_reset_list;
+ if (list_empty(head))
+ return;
+
+ target_reset_list = list_entry(head->next,
+ struct mptsas_target_reset_event, list);
+
+ id = target_reset_list->sas_event_data.TargetID;
+ channel = target_reset_list->sas_event_data.Bus;
+ target_reset_list->time_count = jiffies;
+
+ if (mptsas_target_reset(ioc, channel, id))
+ target_reset_list->target_reset_issued = 1;
+ return;
+}
+
+
+/**
+ * mptsas_taskmgmt_complete - complete SAS task management function
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
+ * queue to finish off removing device from upper layers. then send next
+ * TARGET_RESET in the queue.
+ **/
+static int
+mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+ MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ struct list_head *head = &hd->target_reset_list;
+ u8 id, channel;
+ struct mptsas_target_reset_event *target_reset_list;
+ SCSITaskMgmtReply_t *pScsiTmReply;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
+ "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
+
+ pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
+ if (pScsiTmReply) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
+ "\ttask_type = 0x%02X, iocstatus = 0x%04X "
+ "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
+ "term_cmnds = %d\n", ioc->name,
+ pScsiTmReply->Bus, pScsiTmReply->TargetID,
+ pScsiTmReply->TaskType,
+ le16_to_cpu(pScsiTmReply->IOCStatus),
+ le32_to_cpu(pScsiTmReply->IOCLogInfo),
+ pScsiTmReply->ResponseCode,
+ le32_to_cpu(pScsiTmReply->TerminationCount)));
+
+ if (pScsiTmReply->ResponseCode)
+ mptscsih_taskmgmt_response_code(ioc,
+ pScsiTmReply->ResponseCode);
+ }
+
+ if (pScsiTmReply && (pScsiTmReply->TaskType ==
+ MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
+ MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->taskmgmt_cmds.reply, mr,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->taskmgmt_cmds.done);
+ return 1;
+ }
+ return 0;
+ }
+
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+
+ if (list_empty(head))
+ return 1;
+
+ target_reset_list = list_entry(head->next,
+ struct mptsas_target_reset_event, list);
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt: completed (%d seconds)\n",
+ ioc->name, jiffies_to_msecs(jiffies -
+ target_reset_list->time_count)/1000));
+
+ id = pScsiTmReply->TargetID;
+ channel = pScsiTmReply->Bus;
+ target_reset_list->time_count = jiffies;
+
+ /*
+ * retry target reset
+ */
+ if (!target_reset_list->target_reset_issued) {
+ if (mptsas_target_reset(ioc, channel, id))
+ target_reset_list->target_reset_issued = 1;
+ return 1;
+ }
+
+ /*
+ * enable work queue to remove device from upper layers
+ */
+ list_del(&target_reset_list->list);
+ if (!ioc->fw_events_off)
+ mptsas_queue_device_delete(ioc,
+ &target_reset_list->sas_event_data);
+
+
+ ioc->schedule_target_reset(ioc);
+
+ return 1;
+}
+
+/**
+ * mptscsih_ioc_reset
+ *
+ * @ioc
+ * @reset_phase
+ *
+ **/
+static int
+mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ MPT_SCSI_HOST *hd;
+ int rc;
+
+ rc = mptscsih_ioc_reset(ioc, reset_phase);
+ if ((ioc->bus_type != SAS) || (!rc))
+ return rc;
+
+ hd = shost_priv(ioc->sh);
+ if (!hd->ioc)
+ goto out;
+
+ switch (reset_phase) {
+ case MPT_IOC_SETUP_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ mptsas_fw_event_off(ioc);
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_POST_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->sas_mgmt.done);
+ }
+ mptsas_cleanup_fw_event_q(ioc);
+ mptsas_queue_rescan(ioc);
+ break;
+ default:
+ break;
+ }
+
+ out:
+ return rc;
+}
+
+
+/**
+ * enum device_state -
+ * @DEVICE_RETRY: need to retry the TUR
+ * @DEVICE_ERROR: TUR return error, don't add device
+ * @DEVICE_READY: device can be added
+ *
+ */
+enum device_state{
+ DEVICE_RETRY,
+ DEVICE_ERROR,
+ DEVICE_READY,
+};
+
+static int
+mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasEnclosurePage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+ __le64 le_identifier;
+
+ memset(&hdr, 0, sizeof(hdr));
+ hdr.PageVersion = MPI_SASENCLOSURE0_PAGEVERSION;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_ENCLOSURE;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ /* save config data */
+ memcpy(&le_identifier, &buffer->EnclosureLogicalID, sizeof(__le64));
+ enclosure->enclosure_logical_id = le64_to_cpu(le_identifier);
+ enclosure->enclosure_handle = le16_to_cpu(buffer->EnclosureHandle);
+ enclosure->flags = le16_to_cpu(buffer->Flags);
+ enclosure->num_slot = le16_to_cpu(buffer->NumSlots);
+ enclosure->start_slot = le16_to_cpu(buffer->StartSlot);
+ enclosure->start_id = buffer->StartTargetID;
+ enclosure->start_channel = buffer->StartBus;
+ enclosure->sep_id = buffer->SEPTargetID;
+ enclosure->sep_channel = buffer->SEPBus;
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+/**
+ * mptsas_add_end_device - report a new end device to sas transport layer
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @phy_info: describes attached device
+ *
+ * return (0) success (1) failure
+ *
+ **/
+static int
+mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
+{
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct sas_identify identify;
+ char *ds = NULL;
+ u8 fw_id;
+
+ if (!phy_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ return 1;
+ }
+
+ fw_id = phy_info->attached.id;
+
+ if (mptsas_get_rphy(phy_info)) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return 2;
+ }
+
+ port = mptsas_get_port(phy_info);
+ if (!port) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return 3;
+ }
+
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SSP_TARGET)
+ ds = "ssp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "stp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "sata";
+
+ printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
+ " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
+ phy_info->attached.channel, phy_info->attached.id,
+ phy_info->attached.phy_id, (unsigned long long)
+ phy_info->attached.sas_address);
+
+ mptsas_parse_device_info(&identify, &phy_info->attached);
+ rphy = sas_end_device_alloc(port);
+ if (!rphy) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return 5; /* non-fatal: an rphy can be added later */
+ }
+
+ rphy->identify = identify;
+ if (sas_rphy_add(rphy)) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ sas_rphy_free(rphy);
+ return 6;
+ }
+ mptsas_set_rphy(ioc, phy_info, rphy);
+ return 0;
+}
+
+/**
+ * mptsas_del_end_device - report a deleted end device to sas transport layer
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @phy_info: describes attached device
+ *
+ **/
+static void
+mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
+{
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct mptsas_portinfo *port_info;
+ struct mptsas_phyinfo *phy_info_parent;
+ int i;
+ char *ds = NULL;
+ u8 fw_id;
+ u64 sas_address;
+
+ if (!phy_info)
+ return;
+
+ fw_id = phy_info->attached.id;
+ sas_address = phy_info->attached.sas_address;
+
+ if (!phy_info->port_details) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return;
+ }
+ rphy = mptsas_get_rphy(phy_info);
+ if (!rphy) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return;
+ }
+
+ if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
+ || phy_info->attached.device_info
+ & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
+ || phy_info->attached.device_info
+ & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
+ ds = "initiator";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SSP_TARGET)
+ ds = "ssp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "stp";
+ if (phy_info->attached.device_info &
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "sata";
+
+ dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
+ "removing %s device: fw_channel %d, fw_id %d, phy %d,"
+ "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
+ phy_info->attached.id, phy_info->attached.phy_id,
+ (unsigned long long) sas_address);
+
+ port = mptsas_get_port(phy_info);
+ if (!port) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, fw_id, __LINE__));
+ return;
+ }
+ port_info = phy_info->portinfo;
+ phy_info_parent = port_info->phy_info;
+ for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
+ if (!phy_info_parent->phy)
+ continue;
+ if (phy_info_parent->attached.sas_address !=
+ sas_address)
+ continue;
+ dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
+ MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
+ ioc->name, phy_info_parent->phy_id,
+ phy_info_parent->phy);
+ sas_port_delete_phy(port, phy_info_parent->phy);
+ }
+
+ dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
+ "delete port %d, sas_addr (0x%llx)\n", ioc->name,
+ port->port_identifier, (unsigned long long)sas_address);
+ sas_port_delete(port);
+ mptsas_set_port(ioc, phy_info, NULL);
+ mptsas_port_delete(ioc, phy_info->port_details);
+}
+
+struct mptsas_phyinfo *
+mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
+ struct mptsas_devinfo *sas_device)
+{
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_portinfo *port_info;
+ int i;
+
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_device->sas_address);
+ if (!phy_info)
+ goto out;
+ port_info = phy_info->portinfo;
+ if (!port_info)
+ goto out;
+ mutex_lock(&ioc->sas_topology_mutex);
+ for (i = 0; i < port_info->num_phys; i++) {
+ if (port_info->phy_info[i].attached.sas_address !=
+ sas_device->sas_address)
+ continue;
+ port_info->phy_info[i].attached.channel = sas_device->channel;
+ port_info->phy_info[i].attached.id = sas_device->id;
+ port_info->phy_info[i].attached.sas_address =
+ sas_device->sas_address;
+ port_info->phy_info[i].attached.handle = sas_device->handle;
+ port_info->phy_info[i].attached.handle_parent =
+ sas_device->handle_parent;
+ port_info->phy_info[i].attached.handle_enclosure =
+ sas_device->handle_enclosure;
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+ out:
+ return phy_info;
+}
+
+/**
+ * mptsas_firmware_event_work - work thread for processing fw events
+ * @work: work queue payload containing info describing the event
+ * Context: user
+ *
+ */
+static void
+mptsas_firmware_event_work(struct work_struct *work)
+{
+ struct fw_event_work *fw_event =
+ container_of(work, struct fw_event_work, work.work);
+ MPT_ADAPTER *ioc = fw_event->ioc;
+
+ /* special rescan topology handling */
+ if (fw_event->event == -1) {
+ if (ioc->in_rescan) {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: rescan ignored as it is in progress\n",
+ ioc->name, __func__));
+ return;
+ }
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
+ "reset\n", ioc->name, __func__));
+ ioc->in_rescan = 1;
+ mptsas_not_responding_devices(ioc);
+ mptsas_scan_sas_topology(ioc);
+ ioc->in_rescan = 0;
+ mptsas_free_fw_event(ioc, fw_event);
+ mptsas_fw_event_on(ioc);
+ return;
+ }
+
+ /* events handling turned off during host reset */
+ if (ioc->fw_events_off) {
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
+ "event = (0x%02x)\n", ioc->name, __func__, fw_event,
+ (fw_event->event & 0xFF)));
+
+ switch (fw_event->event) {
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ mptsas_send_sas_event(fw_event);
+ break;
+ case MPI_EVENT_INTEGRATED_RAID:
+ mptsas_send_raid_event(fw_event);
+ break;
+ case MPI_EVENT_IR2:
+ mptsas_send_ir2_event(fw_event);
+ break;
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ mptsas_free_fw_event(ioc, fw_event);
+ break;
+ case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
+ mptsas_broadcast_primative_work(fw_event);
+ break;
+ case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+ mptsas_send_expander_event(fw_event);
+ break;
+ case MPI_EVENT_SAS_PHY_LINK_STATUS:
+ mptsas_send_link_status_event(fw_event);
+ break;
+ case MPI_EVENT_QUEUE_FULL:
+ mptsas_handle_queue_full_event(fw_event);
+ break;
+ }
+}
+
+
+
+static int
+mptsas_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *host = sdev->host;
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+ VirtDevice *vdevice = sdev->hostdata;
+
+ if (vdevice->vtarget->deleted) {
+ sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
+ vdevice->vtarget->deleted = 0;
+ }
+
+ /*
+ * RAID volumes placed beyond the last expected port.
+ * Ignore sending sas mode pages in that case..
+ */
+ if (sdev->channel == MPTSAS_RAID_CHANNEL) {
+ mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
+ goto out;
+ }
+
+ sas_read_port_mode_page(sdev);
+
+ mptsas_add_device_component_starget(ioc, scsi_target(sdev));
+
+ out:
+ return mptscsih_slave_configure(sdev);
+}
+
+static int
+mptsas_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *host = dev_to_shost(&starget->dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ VirtTarget *vtarget;
+ u8 id, channel;
+ struct sas_rphy *rphy;
+ struct mptsas_portinfo *p;
+ int i;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
+ if (!vtarget)
+ return -ENOMEM;
+
+ vtarget->starget = starget;
+ vtarget->ioc_id = ioc->id;
+ vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
+ id = starget->id;
+ channel = 0;
+
+ /*
+ * RAID volumes placed beyond the last expected port.
+ */
+ if (starget->channel == MPTSAS_RAID_CHANNEL) {
+ if (!ioc->raid_data.pIocPg2) {
+ kfree(vtarget);
+ return -ENXIO;
+ }
+ for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+ if (id == ioc->raid_data.pIocPg2->
+ RaidVolume[i].VolumeID) {
+ channel = ioc->raid_data.pIocPg2->
+ RaidVolume[i].VolumeBus;
+ }
+ }
+ vtarget->raidVolume = 1;
+ goto out;
+ }
+
+ rphy = dev_to_rphy(starget->dev.parent);
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(p, &ioc->sas_topology, list) {
+ for (i = 0; i < p->num_phys; i++) {
+ if (p->phy_info[i].attached.sas_address !=
+ rphy->identify.sas_address)
+ continue;
+ id = p->phy_info[i].attached.id;
+ channel = p->phy_info[i].attached.channel;
+ mptsas_set_starget(&p->phy_info[i], starget);
+
+ /*
+ * Exposing hidden raid components
+ */
+ if (mptscsih_is_phys_disk(ioc, channel, id)) {
+ id = mptscsih_raid_id_to_num(ioc,
+ channel, id);
+ vtarget->tflags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ p->phy_info[i].attached.phys_disk_num = id;
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+
+ kfree(vtarget);
+ return -ENXIO;
+
+ out:
+ vtarget->id = id;
+ vtarget->channel = channel;
+ starget->hostdata = vtarget;
+ return 0;
+}
+
+static void
+mptsas_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *host = dev_to_shost(&starget->dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ struct sas_rphy *rphy;
+ struct mptsas_portinfo *p;
+ int i;
+ MPT_ADAPTER *ioc = hd->ioc;
+ VirtTarget *vtarget;
+
+ if (!starget->hostdata)
+ return;
+
+ vtarget = starget->hostdata;
+
+ mptsas_del_device_component_by_os(ioc, starget->channel,
+ starget->id);
+
+
+ if (starget->channel == MPTSAS_RAID_CHANNEL)
+ goto out;
+
+ rphy = dev_to_rphy(starget->dev.parent);
+ list_for_each_entry(p, &ioc->sas_topology, list) {
+ for (i = 0; i < p->num_phys; i++) {
+ if (p->phy_info[i].attached.sas_address !=
+ rphy->identify.sas_address)
+ continue;
+
+ starget_printk(KERN_INFO, starget, MYIOC_s_FMT
+ "delete device: fw_channel %d, fw_id %d, phy %d, "
+ "sas_addr 0x%llx\n", ioc->name,
+ p->phy_info[i].attached.channel,
+ p->phy_info[i].attached.id,
+ p->phy_info[i].attached.phy_id, (unsigned long long)
+ p->phy_info[i].attached.sas_address);
+
+ mptsas_set_starget(&p->phy_info[i], NULL);
+ }
+ }
+
+ out:
+ vtarget->starget = NULL;
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
+
+
+static int
+mptsas_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *host = sdev->host;
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ struct sas_rphy *rphy;
+ struct mptsas_portinfo *p;
+ VirtDevice *vdevice;
+ struct scsi_target *starget;
+ int i;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
+ if (!vdevice) {
+ printk(MYIOC_s_ERR_FMT "slave_alloc kzalloc(%zd) FAILED!\n",
+ ioc->name, sizeof(VirtDevice));
+ return -ENOMEM;
+ }
+ starget = scsi_target(sdev);
+ vdevice->vtarget = starget->hostdata;
+
+ if (sdev->channel == MPTSAS_RAID_CHANNEL)
+ goto out;
+
+ rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(p, &ioc->sas_topology, list) {
+ for (i = 0; i < p->num_phys; i++) {
+ if (p->phy_info[i].attached.sas_address !=
+ rphy->identify.sas_address)
+ continue;
+ vdevice->lun = sdev->lun;
+ /*
+ * Exposing hidden raid components
+ */
+ if (mptscsih_is_phys_disk(ioc,
+ p->phy_info[i].attached.channel,
+ p->phy_info[i].attached.id))
+ sdev->no_uld_attach = 1;
+ mutex_unlock(&ioc->sas_topology_mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+
+ kfree(vdevice);
+ return -ENXIO;
+
+ out:
+ vdevice->vtarget->num_luns++;
+ sdev->hostdata = vdevice;
+ return 0;
+}
+
+static int
+mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+ VirtDevice *vdevice = SCpnt->device->hostdata;
+
+ if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ hd = shost_priv(SCpnt->device->host);
+ ioc = hd->ioc;
+
+ if (ioc->sas_discovery_quiesce_io)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ if (ioc->debug_level & MPT_DEBUG_SCSI)
+ scsi_print_command(SCpnt);
+
+ return mptscsih_qcmd(SCpnt,done);
+}
+
+static DEF_SCSI_QCMD(mptsas_qcmd)
+
+/**
+ * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
+ * if the device under question is currently in the
+ * device removal delay.
+ * @sc: scsi command that the midlayer is about to time out
+ *
+ **/
+static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
+{
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+ VirtDevice *vdevice;
+ enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
+
+ hd = shost_priv(sc->device->host);
+ if (hd == NULL) {
+ printk(KERN_ERR MYNAM ": %s: Can't locate host! (sc=%p)\n",
+ __func__, sc);
+ goto done;
+ }
+
+ ioc = hd->ioc;
+ if (ioc->bus_type != SAS) {
+ printk(KERN_ERR MYNAM ": %s: Wrong bus type (sc=%p)\n",
+ __func__, sc);
+ goto done;
+ }
+
+ vdevice = sc->device->hostdata;
+ if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
+ || vdevice->vtarget->deleted)) {
+ dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: target removed "
+ "or in device removal delay (sc=%p)\n",
+ ioc->name, __func__, sc));
+ rc = BLK_EH_RESET_TIMER;
+ goto done;
+ }
+
+done:
+ return rc;
+}
+
+
+static struct scsi_host_template mptsas_driver_template = {
+ .module = THIS_MODULE,
+ .proc_name = "mptsas",
+ .proc_info = mptscsih_proc_info,
+ .name = "MPT SAS Host",
+ .info = mptscsih_info,
+ .queuecommand = mptsas_qcmd,
+ .target_alloc = mptsas_target_alloc,
+ .slave_alloc = mptsas_slave_alloc,
+ .slave_configure = mptsas_slave_configure,
+ .target_destroy = mptsas_target_destroy,
+ .slave_destroy = mptscsih_slave_destroy,
+ .change_queue_depth = mptscsih_change_queue_depth,
+ .eh_abort_handler = mptscsih_abort,
+ .eh_device_reset_handler = mptscsih_dev_reset,
+ .eh_host_reset_handler = mptscsih_host_reset,
+ .bios_param = mptscsih_bios_param,
+ .can_queue = MPT_SAS_CAN_QUEUE,
+ .this_id = -1,
+ .sg_tablesize = MPT_SCSI_SG_DEPTH,
+ .max_sectors = 8192,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mptscsih_host_attrs,
+};
+
+static int mptsas_get_linkerrors(struct sas_phy *phy)
+{
+ MPT_ADAPTER *ioc = phy_to_ioc(phy);
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasPhyPage1_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ /* FIXME: only have link errors on local phys */
+ if (!scsi_is_sas_phy_local(phy))
+ return -EINVAL;
+
+ hdr.PageVersion = MPI_SASPHY1_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 1 /* page number 1*/;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = phy->identify.phy_identifier;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ return error;
+ if (!hdr.ExtPageLength)
+ return -ENXIO;
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer)
+ return -ENOMEM;
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ mptsas_print_phy_pg1(ioc, buffer);
+
+ phy->invalid_dword_count = le32_to_cpu(buffer->InvalidDwordCount);
+ phy->running_disparity_error_count =
+ le32_to_cpu(buffer->RunningDisparityErrorCount);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(buffer->LossDwordSynchCount);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(buffer->PhyResetProblemCount);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ return error;
+}
+
+static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+ MPT_FRAME_HDR *reply)
+{
+ ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+ if (reply != NULL) {
+ ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->sas_mgmt.reply, reply,
+ min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
+ }
+
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->sas_mgmt.done);
+ return 1;
+ }
+ return 0;
+}
+
+static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ MPT_ADAPTER *ioc = phy_to_ioc(phy);
+ SasIoUnitControlRequest_t *req;
+ SasIoUnitControlReply_t *reply;
+ MPT_FRAME_HDR *mf;
+ MPIHeader_t *hdr;
+ unsigned long timeleft;
+ int error = -ERESTARTSYS;
+
+ /* FIXME: fusion doesn't allow non-local phy reset */
+ if (!scsi_is_sas_phy_local(phy))
+ return -EINVAL;
+
+ /* not implemented for expanders */
+ if (phy->identify.target_port_protocols & SAS_PROTOCOL_SMP)
+ return -ENXIO;
+
+ if (mutex_lock_interruptible(&ioc->sas_mgmt.mutex))
+ goto out;
+
+ mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
+ if (!mf) {
+ error = -ENOMEM;
+ goto out_unlock;
+ }
+
+ hdr = (MPIHeader_t *) mf;
+ req = (SasIoUnitControlRequest_t *)mf;
+ memset(req, 0, sizeof(SasIoUnitControlRequest_t));
+ req->Function = MPI_FUNCTION_SAS_IO_UNIT_CONTROL;
+ req->MsgContext = hdr->MsgContext;
+ req->Operation = hard_reset ?
+ MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
+ req->PhyNum = phy->identify.phy_identifier;
+
+ INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
+ mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
+
+ timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
+ 10 * HZ);
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ error = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out_unlock;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ goto out_unlock;
+ }
+
+ /* a reply frame is expected */
+ if ((ioc->sas_mgmt.status &
+ MPT_MGMT_STATUS_RF_VALID) == 0) {
+ error = -ENXIO;
+ goto out_unlock;
+ }
+
+ /* process the completed Reply Message Frame */
+ reply = (SasIoUnitControlReply_t *)ioc->sas_mgmt.reply;
+ if (reply->IOCStatus != MPI_IOCSTATUS_SUCCESS) {
+ printk(MYIOC_s_INFO_FMT "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
+ ioc->name, __func__, reply->IOCStatus, reply->IOCLogInfo);
+ error = -ENXIO;
+ goto out_unlock;
+ }
+
+ error = 0;
+
+ out_unlock:
+ CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
+ mutex_unlock(&ioc->sas_mgmt.mutex);
+ out:
+ return error;
+}
+
+static int
+mptsas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+{
+ MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
+ int i, error;
+ struct mptsas_portinfo *p;
+ struct mptsas_enclosure enclosure_info;
+ u64 enclosure_handle;
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(p, &ioc->sas_topology, list) {
+ for (i = 0; i < p->num_phys; i++) {
+ if (p->phy_info[i].attached.sas_address ==
+ rphy->identify.sas_address) {
+ enclosure_handle = p->phy_info[i].
+ attached.handle_enclosure;
+ goto found_info;
+ }
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+ return -ENXIO;
+
+ found_info:
+ mutex_unlock(&ioc->sas_topology_mutex);
+ memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
+ error = mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
+ (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
+ MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), enclosure_handle);
+ if (!error)
+ *identifier = enclosure_info.enclosure_logical_id;
+ return error;
+}
+
+static int
+mptsas_get_bay_identifier(struct sas_rphy *rphy)
+{
+ MPT_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct mptsas_portinfo *p;
+ int i, rc;
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(p, &ioc->sas_topology, list) {
+ for (i = 0; i < p->num_phys; i++) {
+ if (p->phy_info[i].attached.sas_address ==
+ rphy->identify.sas_address) {
+ rc = p->phy_info[i].attached.slot;
+ goto out;
+ }
+ }
+ }
+ rc = -ENXIO;
+ out:
+ mutex_unlock(&ioc->sas_topology_mutex);
+ return rc;
+}
+
+static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ MPT_ADAPTER *ioc = ((MPT_SCSI_HOST *) shost->hostdata)->ioc;
+ MPT_FRAME_HDR *mf;
+ SmpPassthroughRequest_t *smpreq;
+ struct request *rsp = req->next_rq;
+ int ret;
+ int flagsLength;
+ unsigned long timeleft;
+ char *psge;
+ dma_addr_t dma_addr_in = 0;
+ dma_addr_t dma_addr_out = 0;
+ u64 sas_address = 0;
+
+ if (!rsp) {
+ printk(MYIOC_s_ERR_FMT "%s: the smp response space is missing\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ /* do we need to support multiple segments? */
+ if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) {
+ printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n",
+ ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req),
+ rsp->bio->bi_vcnt, blk_rq_bytes(rsp));
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
+ if (ret)
+ goto out;
+
+ mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
+ if (!mf) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ smpreq = (SmpPassthroughRequest_t *)mf;
+ memset(smpreq, 0, sizeof(*smpreq));
+
+ smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+ smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
+
+ if (rphy)
+ sas_address = rphy->identify.sas_address;
+ else {
+ struct mptsas_portinfo *port_info;
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ port_info = ioc->hba_port_info;
+ if (port_info && port_info->phy_info)
+ sas_address =
+ port_info->phy_info[0].phy->identify.sas_address;
+ mutex_unlock(&ioc->sas_topology_mutex);
+ }
+
+ *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
+
+ psge = (char *)
+ (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
+
+ /* request */
+ flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_END_OF_BUFFER |
+ MPI_SGE_FLAGS_DIRECTION)
+ << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= (blk_rq_bytes(req) - 4);
+
+ dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_out)
+ goto put_mf;
+ ioc->add_sge(psge, flagsLength, dma_addr_out);
+ psge += ioc->SGE_size;
+
+ /* response */
+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_IOC_TO_HOST |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+
+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= blk_rq_bytes(rsp) + 4;
+ dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_in)
+ goto unmap;
+ ioc->add_sge(psge, flagsLength, dma_addr_in);
+
+ INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
+ mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
+
+ timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ mf = NULL;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto unmap;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ goto unmap;
+ }
+ mf = NULL;
+
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
+ SmpPassthroughReply_t *smprep;
+
+ smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
+ memcpy(req->sense, smprep, sizeof(*smprep));
+ req->sense_len = sizeof(*smprep);
+ req->resid_len = 0;
+ rsp->resid_len -= smprep->ResponseDataLength;
+ } else {
+ printk(MYIOC_s_ERR_FMT
+ "%s: smp passthru reply failed to be returned\n",
+ ioc->name, __func__);
+ ret = -ENXIO;
+ }
+unmap:
+ if (dma_addr_out)
+ pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req),
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_addr_in)
+ pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp),
+ PCI_DMA_BIDIRECTIONAL);
+put_mf:
+ if (mf)
+ mpt_free_msg_frame(ioc, mf);
+out_unlock:
+ CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
+ mutex_unlock(&ioc->sas_mgmt.mutex);
+out:
+ return ret;
+}
+
+static struct sas_function_template mptsas_transport_functions = {
+ .get_linkerrors = mptsas_get_linkerrors,
+ .get_enclosure_identifier = mptsas_get_enclosure_identifier,
+ .get_bay_identifier = mptsas_get_bay_identifier,
+ .phy_reset = mptsas_phy_reset,
+ .smp_handler = mptsas_smp_handler,
+};
+
+static struct scsi_transport_template *mptsas_transport_template;
+
+static int
+mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasIOUnitPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error, i;
+
+ hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = 0;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ port_info->num_phys = buffer->NumPhys;
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo), GFP_KERNEL);
+ if (!port_info->phy_info) {
+ error = -ENOMEM;
+ goto out_free_consistent;
+ }
+
+ ioc->nvdata_version_persistent =
+ le16_to_cpu(buffer->NvdataVersionPersistent);
+ ioc->nvdata_version_default =
+ le16_to_cpu(buffer->NvdataVersionDefault);
+
+ for (i = 0; i < port_info->num_phys; i++) {
+ mptsas_print_phy_data(ioc, &buffer->PhyData[i]);
+ port_info->phy_info[i].phy_id = i;
+ port_info->phy_info[i].port_id =
+ buffer->PhyData[i].Port;
+ port_info->phy_info[i].negotiated_link_rate =
+ buffer->PhyData[i].NegotiatedLinkRate;
+ port_info->phy_info[i].portinfo = port_info;
+ port_info->phy_info[i].handle =
+ le16_to_cpu(buffer->PhyData[i].ControllerDevHandle);
+ }
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasIOUnitPage1_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+ u8 device_missing_delay;
+
+ memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
+ memset(&cfg, 0, sizeof(CONFIGPARMS));
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+ cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
+ cfg.cfghdr.ehdr->PageNumber = 1;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ ioc->io_missing_delay =
+ le16_to_cpu(buffer->IODeviceMissingDelay);
+ device_missing_delay = buffer->ReportDeviceMissingDelay;
+ ioc->device_missing_delay = (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_UNIT_16) ?
+ (device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16 :
+ device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasPhyPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int error;
+
+ hdr.PageVersion = MPI_SASPHY0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_PHY;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.dir = 0; /* read */
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ /* Get Phy Pg 0 for each Phy. */
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out_free_consistent;
+
+ mptsas_print_phy_pg0(ioc, buffer);
+
+ phy_info->hw_link_rate = buffer->HwLinkRate;
+ phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
+ phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
+ phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasDevicePage0_t *buffer;
+ dma_addr_t dma_handle;
+ __le64 sas_address;
+ int error=0;
+
+ hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.pageAddr = form + form_specific;
+ cfg.physAddr = -1;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ memset(device_info, 0, sizeof(struct mptsas_devinfo));
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out_free_consistent;
+ }
+
+ if (error)
+ goto out_free_consistent;
+
+ mptsas_print_device_pg0(ioc, buffer);
+
+ memset(device_info, 0, sizeof(struct mptsas_devinfo));
+ device_info->handle = le16_to_cpu(buffer->DevHandle);
+ device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
+ device_info->handle_enclosure =
+ le16_to_cpu(buffer->EnclosureHandle);
+ device_info->slot = le16_to_cpu(buffer->Slot);
+ device_info->phy_id = buffer->PhyNum;
+ device_info->port_id = buffer->PhysicalPort;
+ device_info->id = buffer->TargetID;
+ device_info->phys_disk_num = ~0;
+ device_info->channel = buffer->Bus;
+ memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
+ device_info->sas_address = le64_to_cpu(sas_address);
+ device_info->device_info =
+ le32_to_cpu(buffer->DeviceInfo);
+ device_info->flags = le16_to_cpu(buffer->Flags);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasExpanderPage0_t *buffer;
+ dma_addr_t dma_handle;
+ int i, error;
+ __le64 sas_address;
+
+ memset(port_info, 0, sizeof(struct mptsas_portinfo));
+ hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 0;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ memset(port_info, 0, sizeof(struct mptsas_portinfo));
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out_free_consistent;
+ }
+
+ if (error)
+ goto out_free_consistent;
+
+ /* save config data */
+ port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo), GFP_KERNEL);
+ if (!port_info->phy_info) {
+ error = -ENOMEM;
+ goto out_free_consistent;
+ }
+
+ memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
+ for (i = 0; i < port_info->num_phys; i++) {
+ port_info->phy_info[i].portinfo = port_info;
+ port_info->phy_info[i].handle =
+ le16_to_cpu(buffer->DevHandle);
+ port_info->phy_info[i].identify.sas_address =
+ le64_to_cpu(sas_address);
+ port_info->phy_info[i].identify.handle_parent =
+ le16_to_cpu(buffer->ParentDevHandle);
+ }
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+static int
+mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
+ u32 form, u32 form_specific)
+{
+ ConfigExtendedPageHeader_t hdr;
+ CONFIGPARMS cfg;
+ SasExpanderPage1_t *buffer;
+ dma_addr_t dma_handle;
+ int error=0;
+
+ hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
+ hdr.ExtPageLength = 0;
+ hdr.PageNumber = 1;
+ hdr.Reserved1 = 0;
+ hdr.Reserved2 = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+ hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+
+ cfg.cfghdr.ehdr = &hdr;
+ cfg.physAddr = -1;
+ cfg.pageAddr = form + form_specific;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.dir = 0; /* read */
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ error = mpt_config(ioc, &cfg);
+ if (error)
+ goto out;
+
+ if (!hdr.ExtPageLength) {
+ error = -ENXIO;
+ goto out;
+ }
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ &dma_handle);
+ if (!buffer) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ error = mpt_config(ioc, &cfg);
+
+ if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+ error = -ENODEV;
+ goto out_free_consistent;
+ }
+
+ if (error)
+ goto out_free_consistent;
+
+
+ mptsas_print_expander_pg1(ioc, buffer);
+
+ /* save config data */
+ phy_info->phy_id = buffer->PhyIdentifier;
+ phy_info->port_id = buffer->PhysicalPort;
+ phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
+ phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
+ phy_info->hw_link_rate = buffer->HwLinkRate;
+ phy_info->identify.handle = le16_to_cpu(buffer->OwnerDevHandle);
+ phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
+
+ out_free_consistent:
+ pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+ buffer, dma_handle);
+ out:
+ return error;
+}
+
+struct rep_manu_request{
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+struct rep_manu_reply{
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format:1;
+ u8 reserved1:7;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * mptsas_exp_repmanufacture_info -
+ * @ioc: per adapter object
+ * @sas_address: expander sas address
+ * @edev: the sas_expander_device object
+ *
+ * Fills in the sas_expander_device object when SMP port is created.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
+ u64 sas_address, struct sas_expander_device *edev)
+{
+ MPT_FRAME_HDR *mf;
+ SmpPassthroughRequest_t *smpreq;
+ SmpPassthroughReply_t *smprep;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int ret;
+ int flagsLength;
+ unsigned long timeleft;
+ char *psge;
+ unsigned long flags;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ u32 sz;
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ printk(MYIOC_s_INFO_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ ret = mutex_lock_interruptible(&ioc->sas_mgmt.mutex);
+ if (ret)
+ goto out;
+
+ mf = mpt_get_msg_frame(mptsasMgmtCtx, ioc);
+ if (!mf) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ smpreq = (SmpPassthroughRequest_t *)mf;
+ memset(smpreq, 0, sizeof(*smpreq));
+
+ sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
+
+ data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
+ if (!data_out) {
+ printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ ret = -ENOMEM;
+ goto put_mf;
+ }
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH;
+ smpreq->PhysicalPort = 0xFF;
+ *((u64 *)&smpreq->SASAddress) = cpu_to_le64(sas_address);
+ smpreq->RequestDataLength = sizeof(struct rep_manu_request);
+
+ psge = (char *)
+ (((int *) mf) + (offsetof(SmpPassthroughRequest_t, SGL) / 4));
+
+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_HOST_TO_IOC |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= sizeof(struct rep_manu_request);
+
+ ioc->add_sge(psge, flagsLength, data_out_dma);
+ psge += ioc->SGE_size;
+
+ flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+ MPI_SGE_FLAGS_IOC_TO_HOST |
+ MPI_SGE_FLAGS_END_OF_BUFFER;
+ flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
+ flagsLength |= sizeof(struct rep_manu_reply);
+ ioc->add_sge(psge, flagsLength, data_out_dma +
+ sizeof(struct rep_manu_request));
+
+ INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
+ mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
+
+ timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
+ if (!(ioc->sas_mgmt.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ mpt_free_msg_frame(ioc, mf);
+ mf = NULL;
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out_free;
+ if (!timeleft)
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ goto out_free;
+ }
+
+ mf = NULL;
+
+ if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
+ u8 *tmp;
+
+ smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
+ if (le16_to_cpu(smprep->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
+ goto out_free;
+
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format;
+ if (manufacture_reply->sas_format) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+ } else {
+ printk(MYIOC_s_ERR_FMT
+ "%s: smp passthru reply failed to be returned\n",
+ ioc->name, __func__);
+ ret = -ENXIO;
+ }
+out_free:
+ if (data_out_dma)
+ pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
+put_mf:
+ if (mf)
+ mpt_free_msg_frame(ioc, mf);
+out_unlock:
+ CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
+ mutex_unlock(&ioc->sas_mgmt.mutex);
+out:
+ return ret;
+ }
+
+static void
+mptsas_parse_device_info(struct sas_identify *identify,
+ struct mptsas_devinfo *device_info)
+{
+ u16 protocols;
+
+ identify->sas_address = device_info->sas_address;
+ identify->phy_identifier = device_info->phy_id;
+
+ /*
+ * Fill in Phy Initiator Port Protocol.
+ * Bits 6:3, more than one bit can be set, fall through cases.
+ */
+ protocols = device_info->device_info & 0x78;
+ identify->initiator_port_protocols = 0;
+ if (protocols & MPI_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (protocols & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /*
+ * Fill in Phy Target Port Protocol.
+ * Bits 10:7, more than one bit can be set, fall through cases.
+ */
+ protocols = device_info->device_info & 0x780;
+ identify->target_port_protocols = 0;
+ if (protocols & MPI_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (protocols & MPI_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (protocols & MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /*
+ * Fill in Attached device type.
+ */
+ switch (device_info->device_info &
+ MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+}
+
+static int mptsas_probe_one_phy(struct device *dev,
+ struct mptsas_phyinfo *phy_info, int index, int local)
+{
+ MPT_ADAPTER *ioc;
+ struct sas_phy *phy;
+ struct sas_port *port;
+ int error = 0;
+ VirtTarget *vtarget;
+
+ if (!dev) {
+ error = -ENODEV;
+ goto out;
+ }
+
+ if (!phy_info->phy) {
+ phy = sas_phy_alloc(dev, index);
+ if (!phy) {
+ error = -ENOMEM;
+ goto out;
+ }
+ } else
+ phy = phy_info->phy;
+
+ mptsas_parse_device_info(&phy->identify, &phy_info->identify);
+
+ /*
+ * Set Negotiated link rate.
+ */
+ switch (phy_info->negotiated_link_rate) {
+ case MPI_SAS_IOUNIT0_RATE_PHY_DISABLED:
+ phy->negotiated_linkrate = SAS_PHY_DISABLED;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION:
+ phy->negotiated_linkrate = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_1_5:
+ phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_3_0:
+ phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_6_0:
+ phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE:
+ case MPI_SAS_IOUNIT0_RATE_UNKNOWN:
+ default:
+ phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+
+ /*
+ * Set Max hardware link rate.
+ */
+ switch (phy_info->hw_link_rate & MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
+ case MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5:
+ phy->maximum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
+ phy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Max programmed link rate.
+ */
+ switch (phy_info->programmed_link_rate &
+ MPI_SAS_PHY0_PRATE_MAX_RATE_MASK) {
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_1_5:
+ phy->maximum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MAX_RATE_3_0:
+ phy->maximum_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Min hardware link rate.
+ */
+ switch (phy_info->hw_link_rate & MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK) {
+ case MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5:
+ phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
+ phy->minimum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Set Min programmed link rate.
+ */
+ switch (phy_info->programmed_link_rate &
+ MPI_SAS_PHY0_PRATE_MIN_RATE_MASK) {
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_1_5:
+ phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI_SAS_PHY0_PRATE_MIN_RATE_3_0:
+ phy->minimum_linkrate = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ default:
+ break;
+ }
+
+ if (!phy_info->phy) {
+
+ error = sas_phy_add(phy);
+ if (error) {
+ sas_phy_free(phy);
+ goto out;
+ }
+ phy_info->phy = phy;
+ }
+
+ if (!phy_info->attached.handle ||
+ !phy_info->port_details)
+ goto out;
+
+ port = mptsas_get_port(phy_info);
+ ioc = phy_to_ioc(phy_info->phy);
+
+ if (phy_info->sas_port_add_phy) {
+
+ if (!port) {
+ port = sas_port_alloc_num(dev);
+ if (!port) {
+ error = -ENOMEM;
+ goto out;
+ }
+ error = sas_port_add(port);
+ if (error) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ goto out;
+ }
+ mptsas_set_port(ioc, phy_info, port);
+ devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
+ MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
+ ioc->name, port->port_identifier,
+ (unsigned long long)phy_info->
+ attached.sas_address));
+ }
+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "sas_port_add_phy: phy_id=%d\n",
+ ioc->name, phy_info->phy_id));
+ sas_port_add_phy(port, phy_info->phy);
+ phy_info->sas_port_add_phy = 0;
+ devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
+ MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
+ phy_info->phy_id, phy_info->phy));
+ }
+ if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
+
+ struct sas_rphy *rphy;
+ struct device *parent;
+ struct sas_identify identify;
+
+ parent = dev->parent->parent;
+ /*
+ * Let the hotplug_work thread handle processing
+ * the adding/removing of devices that occur
+ * after start of day.
+ */
+ if (mptsas_is_end_device(&phy_info->attached) &&
+ phy_info->attached.handle_parent) {
+ goto out;
+ }
+
+ mptsas_parse_device_info(&identify, &phy_info->attached);
+ if (scsi_is_host_device(parent)) {
+ struct mptsas_portinfo *port_info;
+ int i;
+
+ port_info = ioc->hba_port_info;
+
+ for (i = 0; i < port_info->num_phys; i++)
+ if (port_info->phy_info[i].identify.sas_address ==
+ identify.sas_address) {
+ sas_port_mark_backlink(port);
+ goto out;
+ }
+
+ } else if (scsi_is_sas_rphy(parent)) {
+ struct sas_rphy *parent_rphy = dev_to_rphy(parent);
+ if (identify.sas_address ==
+ parent_rphy->identify.sas_address) {
+ sas_port_mark_backlink(port);
+ goto out;
+ }
+ }
+
+ switch (identify.device_type) {
+ case SAS_END_DEVICE:
+ rphy = sas_end_device_alloc(port);
+ break;
+ case SAS_EDGE_EXPANDER_DEVICE:
+ case SAS_FANOUT_EXPANDER_DEVICE:
+ rphy = sas_expander_alloc(port, identify.device_type);
+ break;
+ default:
+ rphy = NULL;
+ break;
+ }
+ if (!rphy) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ goto out;
+ }
+
+ rphy->identify = identify;
+ error = sas_rphy_add(rphy);
+ if (error) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ sas_rphy_free(rphy);
+ goto out;
+ }
+ mptsas_set_rphy(ioc, phy_info, rphy);
+ if (identify.device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ identify.device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mptsas_exp_repmanufacture_info(ioc,
+ identify.sas_address,
+ rphy_to_expander_device(rphy));
+ }
+
+ /* If the device exists,verify it wasn't previously flagged
+ as a missing device. If so, clear it */
+ vtarget = mptsas_find_vtarget(ioc,
+ phy_info->attached.channel,
+ phy_info->attached.id);
+ if (vtarget && vtarget->inDMD) {
+ printk(KERN_INFO "Device returned, unsetting inDMD\n");
+ vtarget->inDMD = 0;
+ }
+
+ out:
+ return error;
+}
+
+static int
+mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
+{
+ struct mptsas_portinfo *port_info, *hba;
+ int error = -ENOMEM, i;
+
+ hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
+ if (! hba)
+ goto out;
+
+ error = mptsas_sas_io_unit_pg0(ioc, hba);
+ if (error)
+ goto out_free_port_info;
+
+ mptsas_sas_io_unit_pg1(ioc);
+ mutex_lock(&ioc->sas_topology_mutex);
+ port_info = ioc->hba_port_info;
+ if (!port_info) {
+ ioc->hba_port_info = port_info = hba;
+ ioc->hba_port_num_phy = port_info->num_phys;
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ } else {
+ for (i = 0; i < hba->num_phys; i++) {
+ port_info->phy_info[i].negotiated_link_rate =
+ hba->phy_info[i].negotiated_link_rate;
+ port_info->phy_info[i].handle =
+ hba->phy_info[i].handle;
+ port_info->phy_info[i].port_id =
+ hba->phy_info[i].port_id;
+ }
+ kfree(hba->phy_info);
+ kfree(hba);
+ hba = NULL;
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+#if defined(CPQ_CIM)
+ ioc->num_ports = port_info->num_phys;
+#endif
+ for (i = 0; i < port_info->num_phys; i++) {
+ mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
+ (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
+ MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
+ port_info->phy_info[i].identify.handle =
+ port_info->phy_info[i].handle;
+ mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].identify.handle);
+ if (!ioc->hba_port_sas_addr)
+ ioc->hba_port_sas_addr =
+ port_info->phy_info[i].identify.sas_address;
+ port_info->phy_info[i].identify.phy_id =
+ port_info->phy_info[i].phy_id = i;
+ if (port_info->phy_info[i].attached.handle)
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].attached,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].attached.handle);
+ }
+
+ mptsas_setup_wide_ports(ioc, port_info);
+
+ for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
+ mptsas_probe_one_phy(&ioc->sh->shost_gendev,
+ &port_info->phy_info[i], ioc->sas_index, 1);
+
+ return 0;
+
+ out_free_port_info:
+ kfree(hba);
+ out:
+ return error;
+}
+
+static void
+mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
+{
+ struct mptsas_portinfo *parent;
+ struct device *parent_dev;
+ struct sas_rphy *rphy;
+ int i;
+ u64 sas_address; /* expander sas address */
+ u32 handle;
+
+ handle = port_info->phy_info[0].handle;
+ sas_address = port_info->phy_info[0].identify.sas_address;
+ for (i = 0; i < port_info->num_phys; i++) {
+ mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
+
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].identify,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].identify.handle);
+ port_info->phy_info[i].identify.phy_id =
+ port_info->phy_info[i].phy_id;
+
+ if (port_info->phy_info[i].attached.handle) {
+ mptsas_sas_device_pg0(ioc,
+ &port_info->phy_info[i].attached,
+ (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ port_info->phy_info[i].attached.handle);
+ port_info->phy_info[i].attached.phy_id =
+ port_info->phy_info[i].phy_id;
+ }
+ }
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ parent = mptsas_find_portinfo_by_handle(ioc,
+ port_info->phy_info[0].identify.handle_parent);
+ if (!parent) {
+ mutex_unlock(&ioc->sas_topology_mutex);
+ return;
+ }
+ for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
+ i++) {
+ if (parent->phy_info[i].attached.sas_address == sas_address) {
+ rphy = mptsas_get_rphy(&parent->phy_info[i]);
+ parent_dev = &rphy->dev;
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+
+ mptsas_setup_wide_ports(ioc, port_info);
+ for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
+ mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
+ ioc->sas_index, 0);
+}
+
+static void
+mptsas_expander_event_add(MPT_ADAPTER *ioc,
+ MpiEventDataSasExpanderStatusChange_t *expander_data)
+{
+ struct mptsas_portinfo *port_info;
+ int i;
+ __le64 sas_address;
+
+ port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
+ if (!port_info)
+ BUG();
+ port_info->num_phys = (expander_data->NumPhys) ?
+ expander_data->NumPhys : 1;
+ port_info->phy_info = kcalloc(port_info->num_phys,
+ sizeof(struct mptsas_phyinfo), GFP_KERNEL);
+ if (!port_info->phy_info)
+ BUG();
+ memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
+ for (i = 0; i < port_info->num_phys; i++) {
+ port_info->phy_info[i].portinfo = port_info;
+ port_info->phy_info[i].handle =
+ le16_to_cpu(expander_data->DevHandle);
+ port_info->phy_info[i].identify.sas_address =
+ le64_to_cpu(sas_address);
+ port_info->phy_info[i].identify.handle_parent =
+ le16_to_cpu(expander_data->ParentDevHandle);
+ }
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ mutex_unlock(&ioc->sas_topology_mutex);
+
+ printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)sas_address);
+
+ mptsas_expander_refresh(ioc, port_info);
+}
+
+/**
+ * mptsas_delete_expander_siblings - remove siblings attached to expander
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @parent: the parent port_info object
+ * @expander: the expander port_info object
+ **/
+static void
+mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
+ *parent, struct mptsas_portinfo *expander)
+{
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_portinfo *port_info;
+ struct sas_rphy *rphy;
+ int i;
+
+ phy_info = expander->phy_info;
+ for (i = 0; i < expander->num_phys; i++, phy_info++) {
+ rphy = mptsas_get_rphy(phy_info);
+ if (!rphy)
+ continue;
+ if (rphy->identify.device_type == SAS_END_DEVICE)
+ mptsas_del_end_device(ioc, phy_info);
+ }
+
+ phy_info = expander->phy_info;
+ for (i = 0; i < expander->num_phys; i++, phy_info++) {
+ rphy = mptsas_get_rphy(phy_info);
+ if (!rphy)
+ continue;
+ if (rphy->identify.device_type ==
+ MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+ rphy->identify.device_type ==
+ MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
+ port_info = mptsas_find_portinfo_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (!port_info)
+ continue;
+ if (port_info == parent) /* backlink rphy */
+ continue;
+ /*
+ Delete this expander even if the expdevpage is exists
+ because the parent expander is already deleted
+ */
+ mptsas_expander_delete(ioc, port_info, 1);
+ }
+ }
+}
+
+
+/**
+ * mptsas_expander_delete - remove this expander
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @port_info: expander port_info struct
+ * @force: Flag to forcefully delete the expander
+ *
+ **/
+
+static void mptsas_expander_delete(MPT_ADAPTER *ioc,
+ struct mptsas_portinfo *port_info, u8 force)
+{
+
+ struct mptsas_portinfo *parent;
+ int i;
+ u64 expander_sas_address;
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_portinfo buffer;
+ struct mptsas_portinfo_details *port_details;
+ struct sas_port *port;
+
+ if (!port_info)
+ return;
+
+ /* see if expander is still there before deleting */
+ mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
+ port_info->phy_info[0].identify.handle);
+
+ if (buffer.num_phys) {
+ kfree(buffer.phy_info);
+ if (!force)
+ return;
+ }
+
+
+ /*
+ * Obtain the port_info instance to the parent port
+ */
+ port_details = NULL;
+ expander_sas_address =
+ port_info->phy_info[0].identify.sas_address;
+ parent = mptsas_find_portinfo_by_handle(ioc,
+ port_info->phy_info[0].identify.handle_parent);
+ mptsas_delete_expander_siblings(ioc, parent, port_info);
+ if (!parent)
+ goto out;
+
+ /*
+ * Delete rphys in the parent that point
+ * to this expander.
+ */
+ phy_info = parent->phy_info;
+ port = NULL;
+ for (i = 0; i < parent->num_phys; i++, phy_info++) {
+ if (!phy_info->phy)
+ continue;
+ if (phy_info->attached.sas_address !=
+ expander_sas_address)
+ continue;
+ if (!port) {
+ port = mptsas_get_port(phy_info);
+ port_details = phy_info->port_details;
+ }
+ dev_printk(KERN_DEBUG, &phy_info->phy->dev,
+ MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
+ phy_info->phy_id, phy_info->phy);
+ sas_port_delete_phy(port, phy_info->phy);
+ }
+ if (port) {
+ dev_printk(KERN_DEBUG, &port->dev,
+ MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
+ ioc->name, port->port_identifier,
+ (unsigned long long)expander_sas_address);
+ sas_port_delete(port);
+ mptsas_port_delete(ioc, port_details);
+ }
+ out:
+
+ printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)expander_sas_address);
+
+ /*
+ * free link
+ */
+ list_del(&port_info->list);
+ kfree(port_info->phy_info);
+ kfree(port_info);
+}
+
+
+/**
+ * mptsas_send_expander_event - expanders events
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @expander_data: event data
+ *
+ *
+ * This function handles adding, removing, and refreshing
+ * device handles within the expander objects.
+ */
+static void
+mptsas_send_expander_event(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc;
+ MpiEventDataSasExpanderStatusChange_t *expander_data;
+ struct mptsas_portinfo *port_info;
+ __le64 sas_address;
+ int i;
+
+ ioc = fw_event->ioc;
+ expander_data = (MpiEventDataSasExpanderStatusChange_t *)
+ fw_event->event_data;
+ memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
+ sas_address = le64_to_cpu(sas_address);
+ port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
+
+ if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
+ if (port_info) {
+ for (i = 0; i < port_info->num_phys; i++) {
+ port_info->phy_info[i].portinfo = port_info;
+ port_info->phy_info[i].handle =
+ le16_to_cpu(expander_data->DevHandle);
+ port_info->phy_info[i].identify.sas_address =
+ le64_to_cpu(sas_address);
+ port_info->phy_info[i].identify.handle_parent =
+ le16_to_cpu(expander_data->ParentDevHandle);
+ }
+ mptsas_expander_refresh(ioc, port_info);
+ } else if (!port_info && expander_data->NumPhys)
+ mptsas_expander_event_add(ioc, expander_data);
+ } else if (expander_data->ReasonCode ==
+ MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
+ mptsas_expander_delete(ioc, port_info, 0);
+
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+
+/**
+ * mptsas_expander_add -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @handle:
+ *
+ */
+struct mptsas_portinfo *
+mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
+{
+ struct mptsas_portinfo buffer, *port_info;
+ int i;
+
+ if ((mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
+ return NULL;
+
+ port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
+ if (!port_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ return NULL;
+ }
+ port_info->num_phys = buffer.num_phys;
+ port_info->phy_info = buffer.phy_info;
+ for (i = 0; i < port_info->num_phys; i++)
+ port_info->phy_info[i].portinfo = port_info;
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ mutex_unlock(&ioc->sas_topology_mutex);
+ printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)buffer.phy_info[0].identify.sas_address);
+ mptsas_expander_refresh(ioc, port_info);
+ return port_info;
+}
+
+static void
+mptsas_send_link_status_event(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc;
+ MpiEventDataSasPhyLinkStatus_t *link_data;
+ struct mptsas_portinfo *port_info;
+ struct mptsas_phyinfo *phy_info = NULL;
+ __le64 sas_address;
+ u8 phy_num;
+ u8 link_rate;
+
+ ioc = fw_event->ioc;
+ link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
+
+ memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
+ sas_address = le64_to_cpu(sas_address);
+ link_rate = link_data->LinkRates >> 4;
+ phy_num = link_data->PhyNum;
+
+ port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
+ if (port_info) {
+ phy_info = &port_info->phy_info[phy_num];
+ if (phy_info)
+ phy_info->negotiated_link_rate = link_rate;
+ }
+
+ if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
+ link_rate == MPI_SAS_IOUNIT0_RATE_3_0 ||
+ link_rate == MPI_SAS_IOUNIT0_RATE_6_0) {
+
+ if (!port_info) {
+ if (ioc->old_sas_discovery_protocal) {
+ port_info = mptsas_expander_add(ioc,
+ le16_to_cpu(link_data->DevHandle));
+ if (port_info)
+ goto out;
+ }
+ goto out;
+ }
+
+ if (port_info == ioc->hba_port_info)
+ mptsas_probe_hba_phys(ioc);
+ else
+ mptsas_expander_refresh(ioc, port_info);
+ } else if (phy_info && phy_info->phy) {
+ if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
+ phy_info->phy->negotiated_linkrate =
+ SAS_PHY_DISABLED;
+ else if (link_rate ==
+ MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
+ phy_info->phy->negotiated_linkrate =
+ SAS_LINK_RATE_FAILED;
+ else {
+ phy_info->phy->negotiated_linkrate =
+ SAS_LINK_RATE_UNKNOWN;
+ if (ioc->device_missing_delay &&
+ mptsas_is_end_device(&phy_info->attached)) {
+ struct scsi_device *sdev;
+ VirtDevice *vdevice;
+ u8 channel, id;
+ id = phy_info->attached.id;
+ channel = phy_info->attached.channel;
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Link down for fw_id %d:fw_channel %d\n",
+ ioc->name, phy_info->attached.id,
+ phy_info->attached.channel));
+
+ shost_for_each_device(sdev, ioc->sh) {
+ vdevice = sdev->hostdata;
+ if ((vdevice == NULL) ||
+ (vdevice->vtarget == NULL))
+ continue;
+ if ((vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ vdevice->vtarget->raidVolume))
+ continue;
+ if (vdevice->vtarget->id == id &&
+ vdevice->vtarget->channel ==
+ channel)
+ devtprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT
+ "SDEV OUTSTANDING CMDS"
+ "%d\n", ioc->name,
+ sdev->device_busy));
+ }
+
+ }
+ }
+ }
+ out:
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+static void
+mptsas_not_responding_devices(MPT_ADAPTER *ioc)
+{
+ struct mptsas_portinfo buffer, *port_info;
+ struct mptsas_device_info *sas_info;
+ struct mptsas_devinfo sas_device;
+ u32 handle;
+ VirtTarget *vtarget = NULL;
+ struct mptsas_phyinfo *phy_info;
+ u8 found_expander;
+ int retval, retry_count;
+ unsigned long flags;
+
+ mpt_findImVolumes(ioc);
+
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: exiting due to a parallel reset \n", ioc->name,
+ __func__));
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ /* devices, logical volumes */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ redo_device_scan:
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
+ if (sas_info->is_cached)
+ continue;
+ if (!sas_info->is_logical_volume) {
+ sas_device.handle = 0;
+ retry_count = 0;
+retry_page:
+ retval = mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
+ << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (sas_info->fw.channel << 8) +
+ sas_info->fw.id);
+
+ if (sas_device.handle)
+ continue;
+ if (retval == -EBUSY) {
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ dfailprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT
+ "%s: exiting due to reset\n",
+ ioc->name, __func__));
+ spin_unlock_irqrestore
+ (&ioc->taskmgmt_lock, flags);
+ mutex_unlock(&ioc->
+ sas_device_info_mutex);
+ return;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock,
+ flags);
+ }
+
+ if (retval && (retval != -ENODEV)) {
+ if (retry_count < 10) {
+ retry_count++;
+ goto retry_page;
+ } else {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Config page retry exceeded retry "
+ "count deleting device 0x%llx\n",
+ ioc->name, __func__,
+ sas_info->sas_address));
+ }
+ }
+
+ /* delete device */
+ vtarget = mptsas_find_vtarget(ioc,
+ sas_info->fw.channel, sas_info->fw.id);
+
+ if (vtarget)
+ vtarget->deleted = 1;
+
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_info->sas_address);
+
+ if (phy_info) {
+ mptsas_del_end_device(ioc, phy_info);
+ goto redo_device_scan;
+ }
+ } else
+ mptsas_volume_delete(ioc, sas_info->fw.id);
+ }
+ mutex_unlock(&ioc->sas_device_info_mutex);
+
+ /* expanders */
+ mutex_lock(&ioc->sas_topology_mutex);
+ redo_expander_scan:
+ list_for_each_entry(port_info, &ioc->sas_topology, list) {
+
+ if (port_info->phy_info &&
+ (!(port_info->phy_info[0].identify.device_info &
+ MPI_SAS_DEVICE_INFO_SMP_TARGET)))
+ continue;
+ found_expander = 0;
+ handle = 0xFFFF;
+ while (!mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
+ !found_expander) {
+
+ handle = buffer.phy_info[0].handle;
+ if (buffer.phy_info[0].identify.sas_address ==
+ port_info->phy_info[0].identify.sas_address) {
+ found_expander = 1;
+ }
+ kfree(buffer.phy_info);
+ }
+
+ if (!found_expander) {
+ mptsas_expander_delete(ioc, port_info, 0);
+ goto redo_expander_scan;
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+}
+
+/**
+ * mptsas_probe_expanders - adding expanders
+ * @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+static void
+mptsas_probe_expanders(MPT_ADAPTER *ioc)
+{
+ struct mptsas_portinfo buffer, *port_info;
+ u32 handle;
+ int i;
+
+ handle = 0xFFFF;
+ while (!mptsas_sas_expander_pg0(ioc, &buffer,
+ (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+ MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
+
+ handle = buffer.phy_info[0].handle;
+ port_info = mptsas_find_portinfo_by_sas_address(ioc,
+ buffer.phy_info[0].identify.sas_address);
+
+ if (port_info) {
+ /* refreshing handles */
+ for (i = 0; i < buffer.num_phys; i++) {
+ port_info->phy_info[i].handle = handle;
+ port_info->phy_info[i].identify.handle_parent =
+ buffer.phy_info[0].identify.handle_parent;
+ }
+ mptsas_expander_refresh(ioc, port_info);
+ kfree(buffer.phy_info);
+ continue;
+ }
+
+ port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
+ if (!port_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: exit at line=%d\n", ioc->name,
+ __func__, __LINE__));
+ return;
+ }
+ port_info->num_phys = buffer.num_phys;
+ port_info->phy_info = buffer.phy_info;
+ for (i = 0; i < port_info->num_phys; i++)
+ port_info->phy_info[i].portinfo = port_info;
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_add_tail(&port_info->list, &ioc->sas_topology);
+ mutex_unlock(&ioc->sas_topology_mutex);
+ printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+ "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+ (unsigned long long)buffer.phy_info[0].identify.sas_address);
+ mptsas_expander_refresh(ioc, port_info);
+ }
+}
+
+static void
+mptsas_probe_devices(MPT_ADAPTER *ioc)
+{
+ u16 handle;
+ struct mptsas_devinfo sas_device;
+ struct mptsas_phyinfo *phy_info;
+
+ handle = 0xFFFF;
+ while (!(mptsas_sas_device_pg0(ioc, &sas_device,
+ MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+
+ handle = sas_device.handle;
+
+ if ((sas_device.device_info &
+ (MPI_SAS_DEVICE_INFO_SSP_TARGET |
+ MPI_SAS_DEVICE_INFO_STP_TARGET |
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
+ continue;
+
+ /* If there is no FW B_T mapping for this device then continue
+ * */
+ if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
+ || !(sas_device.flags &
+ MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
+ continue;
+
+ phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
+ if (!phy_info)
+ continue;
+
+ if (mptsas_get_rphy(phy_info))
+ continue;
+
+ mptsas_add_end_device(ioc, phy_info);
+ }
+}
+
+/**
+ * mptsas_scan_sas_topology -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sas_address:
+ *
+ **/
+static void
+mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
+{
+ struct scsi_device *sdev;
+ int i;
+
+ mptsas_probe_hba_phys(ioc);
+ mptsas_probe_expanders(ioc);
+ mptsas_probe_devices(ioc);
+
+ /*
+ Reporting RAID volumes.
+ */
+ if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
+ !ioc->raid_data.pIocPg2->NumActiveVolumes)
+ return;
+ for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+ sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
+ if (sdev) {
+ scsi_device_put(sdev);
+ continue;
+ }
+ printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
+ scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
+ ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
+ }
+}
+
+
+static void
+mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc;
+ EventDataQueueFull_t *qfull_data;
+ struct mptsas_device_info *sas_info;
+ struct scsi_device *sdev;
+ int depth;
+ int id = -1;
+ int channel = -1;
+ int fw_id, fw_channel;
+ u16 current_depth;
+
+
+ ioc = fw_event->ioc;
+ qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
+ fw_id = qfull_data->TargetID;
+ fw_channel = qfull_data->Bus;
+ current_depth = le16_to_cpu(qfull_data->CurrentDepth);
+
+ /* if hidden raid component, look for the volume id */
+ mutex_lock(&ioc->sas_device_info_mutex);
+ if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->is_cached ||
+ sas_info->is_logical_volume)
+ continue;
+ if (sas_info->is_hidden_raid_component &&
+ (sas_info->fw.channel == fw_channel &&
+ sas_info->fw.id == fw_id)) {
+ id = sas_info->volume_id;
+ channel = MPTSAS_RAID_CHANNEL;
+ goto out;
+ }
+ }
+ } else {
+ list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+ list) {
+ if (sas_info->is_cached ||
+ sas_info->is_hidden_raid_component ||
+ sas_info->is_logical_volume)
+ continue;
+ if (sas_info->fw.channel == fw_channel &&
+ sas_info->fw.id == fw_id) {
+ id = sas_info->os.id;
+ channel = sas_info->os.channel;
+ goto out;
+ }
+ }
+
+ }
+
+ out:
+ mutex_unlock(&ioc->sas_device_info_mutex);
+
+ if (id != -1) {
+ shost_for_each_device(sdev, ioc->sh) {
+ if (sdev->id == id && sdev->channel == channel) {
+ if (current_depth > sdev->queue_depth) {
+ sdev_printk(KERN_INFO, sdev,
+ "strange observation, the queue "
+ "depth is (%d) meanwhile fw queue "
+ "depth (%d)\n", sdev->queue_depth,
+ current_depth);
+ continue;
+ }
+ depth = scsi_track_queue_full(sdev,
+ current_depth - 1);
+ if (depth > 0)
+ sdev_printk(KERN_INFO, sdev,
+ "Queue depth reduced to (%d)\n",
+ depth);
+ else if (depth < 0)
+ sdev_printk(KERN_INFO, sdev,
+ "Tagged Command Queueing is being "
+ "disabled\n");
+ else if (depth == 0)
+ sdev_printk(KERN_INFO, sdev,
+ "Queue depth not changed yet\n");
+ }
+ }
+ }
+
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+
+static struct mptsas_phyinfo *
+mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
+{
+ struct mptsas_portinfo *port_info;
+ struct mptsas_phyinfo *phy_info = NULL;
+ int i;
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(port_info, &ioc->sas_topology, list) {
+ for (i = 0; i < port_info->num_phys; i++) {
+ if (!mptsas_is_end_device(
+ &port_info->phy_info[i].attached))
+ continue;
+ if (port_info->phy_info[i].attached.sas_address
+ != sas_address)
+ continue;
+ phy_info = &port_info->phy_info[i];
+ break;
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+ return phy_info;
+}
+
+/**
+ * mptsas_find_phyinfo_by_phys_disk_num -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @phys_disk_num:
+ * @channel:
+ * @id:
+ *
+ **/
+static struct mptsas_phyinfo *
+mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
+ u8 channel, u8 id)
+{
+ struct mptsas_phyinfo *phy_info = NULL;
+ struct mptsas_portinfo *port_info;
+ RaidPhysDiskPage1_t *phys_disk = NULL;
+ int num_paths;
+ u64 sas_address = 0;
+ int i;
+
+ phy_info = NULL;
+ if (!ioc->raid_data.pIocPg3)
+ return NULL;
+ /* dual port support */
+ num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
+ if (!num_paths)
+ goto out;
+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+ (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+ if (!phys_disk)
+ goto out;
+ mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
+ for (i = 0; i < num_paths; i++) {
+ if ((phys_disk->Path[i].Flags & 1) != 0)
+ /* entry no longer valid */
+ continue;
+ if ((id == phys_disk->Path[i].PhysDiskID) &&
+ (channel == phys_disk->Path[i].PhysDiskBus)) {
+ memcpy(&sas_address, &phys_disk->Path[i].WWID,
+ sizeof(u64));
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_address);
+ goto out;
+ }
+ }
+
+ out:
+ kfree(phys_disk);
+ if (phy_info)
+ return phy_info;
+
+ /*
+ * Extra code to handle RAID0 case, where the sas_address is not updated
+ * in phys_disk_page_1 when hotswapped
+ */
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry(port_info, &ioc->sas_topology, list) {
+ for (i = 0; i < port_info->num_phys && !phy_info; i++) {
+ if (!mptsas_is_end_device(
+ &port_info->phy_info[i].attached))
+ continue;
+ if (port_info->phy_info[i].attached.phys_disk_num == ~0)
+ continue;
+ if ((port_info->phy_info[i].attached.phys_disk_num ==
+ phys_disk_num) &&
+ (port_info->phy_info[i].attached.id == id) &&
+ (port_info->phy_info[i].attached.channel ==
+ channel))
+ phy_info = &port_info->phy_info[i];
+ }
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+ return phy_info;
+}
+
+static void
+mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
+{
+ int rc;
+
+ sdev->no_uld_attach = data ? 1 : 0;
+ rc = scsi_device_reprobe(sdev);
+}
+
+static void
+mptsas_reprobe_target(struct scsi_target *starget, int uld_attach)
+{
+ starget_for_each_device(starget, uld_attach ? (void *)1 : NULL,
+ mptsas_reprobe_lun);
+}
+
+static void
+mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ CONFIGPARMS cfg;
+ ConfigPageHeader_t hdr;
+ dma_addr_t dma_handle;
+ pRaidVolumePage0_t buffer = NULL;
+ RaidPhysDiskPage0_t phys_disk;
+ int i;
+ struct mptsas_phyinfo *phy_info;
+ struct mptsas_devinfo sas_device;
+
+ memset(&cfg, 0 , sizeof(CONFIGPARMS));
+ memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+ hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
+ cfg.pageAddr = (channel << 8) + id;
+ cfg.cfghdr.hdr = &hdr;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+ cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!hdr.PageLength)
+ goto out;
+
+ buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+ &dma_handle);
+
+ if (!buffer)
+ goto out;
+
+ cfg.physAddr = dma_handle;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+ if (mpt_config(ioc, &cfg) != 0)
+ goto out;
+
+ if (!(buffer->VolumeStatus.Flags &
+ MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE))
+ goto out;
+
+ if (!buffer->NumPhysDisks)
+ goto out;
+
+ for (i = 0; i < buffer->NumPhysDisks; i++) {
+
+ if (mpt_raid_phys_disk_pg0(ioc,
+ buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
+ continue;
+
+ if (mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (phys_disk.PhysDiskBus << 8) +
+ phys_disk.PhysDiskID))
+ continue;
+
+ /* If there is no FW B_T mapping for this device then continue
+ * */
+ if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
+ || !(sas_device.flags &
+ MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
+ continue;
+
+
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_device.sas_address);
+ mptsas_add_end_device(ioc, phy_info);
+ }
+
+ out:
+ if (buffer)
+ pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+ dma_handle);
+}
+/*
+ * Work queue thread to handle SAS hotplug events
+ */
+static void
+mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+ struct mptsas_hotplug_event *hot_plug_info)
+{
+ struct mptsas_phyinfo *phy_info;
+ struct scsi_target * starget;
+ struct mptsas_devinfo sas_device;
+ VirtTarget *vtarget;
+ int i;
+ struct mptsas_portinfo *port_info;
+
+ switch (hot_plug_info->event_type) {
+
+ case MPTSAS_ADD_PHYSDISK:
+
+ if (!ioc->raid_data.pIocPg2)
+ break;
+
+ for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+ if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
+ hot_plug_info->id) {
+ printk(MYIOC_s_WARN_FMT "firmware bug: unable "
+ "to add hidden disk - target_id matchs "
+ "volume_id\n", ioc->name);
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+ }
+ mpt_findImVolumes(ioc);
+
+ case MPTSAS_ADD_DEVICE:
+ memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
+ mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (hot_plug_info->channel << 8) +
+ hot_plug_info->id);
+
+ /* If there is no FW B_T mapping for this device then break
+ * */
+ if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
+ || !(sas_device.flags &
+ MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
+ break;
+
+ if (!sas_device.handle)
+ return;
+
+ phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
+ /* Only For SATA Device ADD */
+ if (!phy_info && (sas_device.device_info &
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)) {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s %d SATA HOT PLUG: "
+ "parent handle of device %x\n", ioc->name,
+ __func__, __LINE__, sas_device.handle_parent));
+ port_info = mptsas_find_portinfo_by_handle(ioc,
+ sas_device.handle_parent);
+
+ if (port_info == ioc->hba_port_info)
+ mptsas_probe_hba_phys(ioc);
+ else if (port_info)
+ mptsas_expander_refresh(ioc, port_info);
+ else {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s %d port info is NULL\n",
+ ioc->name, __func__, __LINE__));
+ break;
+ }
+ phy_info = mptsas_refreshing_device_handles
+ (ioc, &sas_device);
+ }
+
+ if (!phy_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s %d phy info is NULL\n",
+ ioc->name, __func__, __LINE__));
+ break;
+ }
+
+ if (mptsas_get_rphy(phy_info))
+ break;
+
+ mptsas_add_end_device(ioc, phy_info);
+ break;
+
+ case MPTSAS_DEL_DEVICE:
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ hot_plug_info->sas_address);
+ mptsas_del_end_device(ioc, phy_info);
+ break;
+
+ case MPTSAS_DEL_PHYSDISK:
+
+ mpt_findImVolumes(ioc);
+
+ phy_info = mptsas_find_phyinfo_by_phys_disk_num(
+ ioc, hot_plug_info->phys_disk_num,
+ hot_plug_info->channel,
+ hot_plug_info->id);
+ mptsas_del_end_device(ioc, phy_info);
+ break;
+
+ case MPTSAS_ADD_PHYSDISK_REPROBE:
+
+ if (mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (hot_plug_info->channel << 8) + hot_plug_info->id)) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ /* If there is no FW B_T mapping for this device then break
+ * */
+ if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
+ || !(sas_device.flags &
+ MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
+ break;
+
+ phy_info = mptsas_find_phyinfo_by_sas_address(
+ ioc, sas_device.sas_address);
+
+ if (!phy_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ starget = mptsas_get_starget(phy_info);
+ if (!starget) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ vtarget = starget->hostdata;
+ if (!vtarget) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ mpt_findImVolumes(ioc);
+
+ starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
+ "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
+ ioc->name, hot_plug_info->channel, hot_plug_info->id,
+ hot_plug_info->phys_disk_num, (unsigned long long)
+ sas_device.sas_address);
+
+ vtarget->id = hot_plug_info->phys_disk_num;
+ vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
+ phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
+ mptsas_reprobe_target(starget, 1);
+ break;
+
+ case MPTSAS_DEL_PHYSDISK_REPROBE:
+
+ if (mptsas_sas_device_pg0(ioc, &sas_device,
+ (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+ MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+ (hot_plug_info->channel << 8) + hot_plug_info->id)) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n",
+ ioc->name, __func__,
+ hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ /* If there is no FW B_T mapping for this device then break
+ * */
+ if (!(sas_device.flags & MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)
+ || !(sas_device.flags &
+ MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED))
+ break;
+
+ phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+ sas_device.sas_address);
+ if (!phy_info) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ starget = mptsas_get_starget(phy_info);
+ if (!starget) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ vtarget = starget->hostdata;
+ if (!vtarget) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "%s: fw_id=%d exit at line=%d\n", ioc->name,
+ __func__, hot_plug_info->id, __LINE__));
+ break;
+ }
+
+ mpt_findImVolumes(ioc);
+
+ starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
+ " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
+ ioc->name, hot_plug_info->channel, hot_plug_info->id,
+ hot_plug_info->phys_disk_num, (unsigned long long)
+ sas_device.sas_address);
+
+ vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ vtarget->id = hot_plug_info->id;
+ phy_info->attached.phys_disk_num = ~0;
+ mptsas_reprobe_target(starget, 0);
+ mptsas_add_device_component_by_fw(ioc,
+ hot_plug_info->channel, hot_plug_info->id);
+ break;
+
+ case MPTSAS_ADD_RAID:
+
+ mpt_findImVolumes(ioc);
+ printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+ hot_plug_info->id);
+ scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
+ hot_plug_info->id, 0);
+ break;
+
+ case MPTSAS_DEL_RAID:
+
+ mpt_findImVolumes(ioc);
+ printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+ hot_plug_info->id);
+ scsi_remove_device(hot_plug_info->sdev);
+ scsi_device_put(hot_plug_info->sdev);
+ break;
+
+ case MPTSAS_ADD_INACTIVE_VOLUME:
+
+ mpt_findImVolumes(ioc);
+ mptsas_adding_inactive_raid_components(ioc,
+ hot_plug_info->channel, hot_plug_info->id);
+ break;
+
+ default:
+ break;
+ }
+
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+static void
+mptsas_send_sas_event(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc;
+ struct mptsas_hotplug_event hot_plug_info;
+ EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
+ u32 device_info;
+ u64 sas_address;
+
+ ioc = fw_event->ioc;
+ sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
+ fw_event->event_data;
+ device_info = le32_to_cpu(sas_event_data->DeviceInfo);
+
+ if ((device_info &
+ (MPI_SAS_DEVICE_INFO_SSP_TARGET |
+ MPI_SAS_DEVICE_INFO_STP_TARGET |
+ MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+
+ if (sas_event_data->ReasonCode ==
+ MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
+ mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+
+ switch (sas_event_data->ReasonCode) {
+ case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
+ case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
+ memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+ hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
+ hot_plug_info.channel = sas_event_data->Bus;
+ hot_plug_info.id = sas_event_data->TargetID;
+ hot_plug_info.phy_id = sas_event_data->PhyNum;
+ memcpy(&sas_address, &sas_event_data->SASAddress,
+ sizeof(u64));
+ hot_plug_info.sas_address = le64_to_cpu(sas_address);
+ hot_plug_info.device_info = device_info;
+ if (sas_event_data->ReasonCode &
+ MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
+ hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
+ else
+ hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
+ mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
+ break;
+
+ case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
+ mptbase_sas_persist_operation(ioc,
+ MPI_SAS_OP_CLEAR_NOT_PRESENT);
+ mptsas_free_fw_event(ioc, fw_event);
+ break;
+
+ case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ /* TODO */
+ case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ /* TODO */
+ default:
+ mptsas_free_fw_event(ioc, fw_event);
+ break;
+ }
+}
+
+static void
+mptsas_send_raid_event(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc;
+ EVENT_DATA_RAID *raid_event_data;
+ struct mptsas_hotplug_event hot_plug_info;
+ int status;
+ int state;
+ struct scsi_device *sdev = NULL;
+ VirtDevice *vdevice = NULL;
+ RaidPhysDiskPage0_t phys_disk;
+
+ ioc = fw_event->ioc;
+ raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
+ status = le32_to_cpu(raid_event_data->SettingsStatus);
+ state = (status >> 8) & 0xff;
+
+ memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+ hot_plug_info.id = raid_event_data->VolumeID;
+ hot_plug_info.channel = raid_event_data->VolumeBus;
+ hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
+
+ if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
+ raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
+ raid_event_data->ReasonCode ==
+ MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
+ sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
+ hot_plug_info.id, 0);
+ hot_plug_info.sdev = sdev;
+ if (sdev)
+ vdevice = sdev->hostdata;
+ }
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
+ "ReasonCode=%02x\n", ioc->name, __func__,
+ raid_event_data->ReasonCode));
+
+ switch (raid_event_data->ReasonCode) {
+ case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
+ hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
+ hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
+ break;
+ case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
+ switch (state) {
+ case MPI_PD_STATE_ONLINE:
+ case MPI_PD_STATE_NOT_COMPATIBLE:
+ mpt_raid_phys_disk_pg0(ioc,
+ raid_event_data->PhysDiskNum, &phys_disk);
+ hot_plug_info.id = phys_disk.PhysDiskID;
+ hot_plug_info.channel = phys_disk.PhysDiskBus;
+ hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
+ break;
+ case MPI_PD_STATE_FAILED:
+ case MPI_PD_STATE_MISSING:
+ case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
+ case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
+ case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
+ hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
+ break;
+ default:
+ break;
+ }
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_DELETED:
+ if (!sdev)
+ break;
+ vdevice->vtarget->deleted = 1; /* block IO */
+ hot_plug_info.event_type = MPTSAS_DEL_RAID;
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_CREATED:
+ if (sdev) {
+ scsi_device_put(sdev);
+ break;
+ }
+ hot_plug_info.event_type = MPTSAS_ADD_RAID;
+ break;
+ case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
+ if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
+ if (!sdev)
+ break;
+ vdevice->vtarget->deleted = 1; /* block IO */
+ hot_plug_info.event_type = MPTSAS_DEL_RAID;
+ break;
+ }
+ switch (state) {
+ case MPI_RAIDVOL0_STATUS_STATE_FAILED:
+ case MPI_RAIDVOL0_STATUS_STATE_MISSING:
+ if (!sdev)
+ break;
+ vdevice->vtarget->deleted = 1; /* block IO */
+ hot_plug_info.event_type = MPTSAS_DEL_RAID;
+ break;
+ case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
+ case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
+ if (sdev) {
+ scsi_device_put(sdev);
+ break;
+ }
+ hot_plug_info.event_type = MPTSAS_ADD_RAID;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
+ mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
+ else
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+/**
+ * mptsas_issue_tm - send mptsas internal tm request
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @type: Task Management type
+ * @channel: channel number for task management
+ * @id: Logical Target ID for reset (if appropriate)
+ * @lun: Logical unit for reset (if appropriate)
+ * @task_context: Context for the task to be aborted
+ * @timeout: timeout for task management control
+ *
+ * return 0 on success and -1 on failure:
+ *
+ */
+static int
+mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
+ int task_context, ulong timeout, u8 *issue_reset)
+{
+ MPT_FRAME_HDR *mf;
+ SCSITaskMgmt_t *pScsiTm;
+ int retval;
+ unsigned long timeleft;
+
+ *issue_reset = 0;
+ mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
+ if (mf == NULL) {
+ retval = -1; /* return failure */
+ dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
+ "msg frames!!\n", ioc->name));
+ goto out;
+ }
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
+ "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
+ "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
+ type, timeout, channel, id, (unsigned long long)lun,
+ task_context));
+
+ pScsiTm = (SCSITaskMgmt_t *) mf;
+ memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
+ pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+ pScsiTm->TaskType = type;
+ pScsiTm->MsgFlags = 0;
+ pScsiTm->TargetID = id;
+ pScsiTm->Bus = channel;
+ pScsiTm->ChainOffset = 0;
+ pScsiTm->Reserved = 0;
+ pScsiTm->Reserved1 = 0;
+ pScsiTm->TaskMsgContext = task_context;
+ int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
+
+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+ retval = 0;
+ mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
+
+ /* Now wait for the command to complete */
+ timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+ timeout*HZ);
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ retval = -1; /* return failure */
+ dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
+ mpt_free_msg_frame(ioc, mf);
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ *issue_reset = 1;
+ goto out;
+ }
+
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ retval = -1; /* return failure */
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt request: failed with no reply\n", ioc->name));
+ goto out;
+ }
+
+ out:
+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ return retval;
+}
+
+/**
+ * mptsas_broadcast_primative_work - Handle broadcast primitives
+ * @work: work queue payload containing info describing the event
+ *
+ * this will be handled in workqueue context.
+ */
+static void
+mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc = fw_event->ioc;
+ MPT_FRAME_HDR *mf;
+ VirtDevice *vdevice;
+ int ii;
+ struct scsi_cmnd *sc;
+ SCSITaskMgmtReply_t *pScsiTmReply;
+ u8 issue_reset;
+ int task_context;
+ u8 channel, id;
+ int lun;
+ u32 termination_count;
+ u32 query_count;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s - enter\n", ioc->name, __func__));
+
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ mptsas_requeue_fw_event(ioc, fw_event, 1000);
+ return;
+ }
+
+ issue_reset = 0;
+ termination_count = 0;
+ query_count = 0;
+ mpt_findImVolumes(ioc);
+ pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
+
+ for (ii = 0; ii < ioc->req_depth; ii++) {
+ if (ioc->fw_events_off)
+ goto out;
+ sc = mptscsih_get_scsi_lookup(ioc, ii);
+ if (!sc)
+ continue;
+ mf = MPT_INDEX_2_MFPTR(ioc, ii);
+ if (!mf)
+ continue;
+ task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
+ vdevice = sc->device->hostdata;
+ if (!vdevice || !vdevice->vtarget)
+ continue;
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue; /* skip hidden raid components */
+ if (vdevice->vtarget->raidVolume)
+ continue; /* skip hidden raid components */
+ channel = vdevice->vtarget->channel;
+ id = vdevice->vtarget->id;
+ lun = vdevice->lun;
+ if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
+ channel, id, (u64)lun, task_context, 30, &issue_reset))
+ goto out;
+ query_count++;
+ termination_count +=
+ le32_to_cpu(pScsiTmReply->TerminationCount);
+ if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
+ (pScsiTmReply->ResponseCode ==
+ MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+ pScsiTmReply->ResponseCode ==
+ MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
+ continue;
+ if (mptsas_issue_tm(ioc,
+ MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
+ channel, id, (u64)lun, 0, 30, &issue_reset))
+ goto out;
+ termination_count +=
+ le32_to_cpu(pScsiTmReply->TerminationCount);
+ }
+
+ out:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s - exit, query_count = %d termination_count = %d\n",
+ ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+
+ if (issue_reset) {
+ printk(MYIOC_s_WARN_FMT
+ "Issuing Reset from %s!! doorbell=0x%08x\n",
+ ioc->name, __func__, mpt_GetIocState(ioc, 0));
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ }
+ mptsas_free_fw_event(ioc, fw_event);
+}
+
+/*
+ * mptsas_send_ir2_event - handle exposing hidden disk when
+ * an inactive raid volume is added
+ *
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @ir2_data
+ *
+ */
+static void
+mptsas_send_ir2_event(struct fw_event_work *fw_event)
+{
+ MPT_ADAPTER *ioc;
+ struct mptsas_hotplug_event hot_plug_info;
+ MPI_EVENT_DATA_IR2 *ir2_data;
+ u8 reasonCode;
+ RaidPhysDiskPage0_t phys_disk;
+
+ ioc = fw_event->ioc;
+ ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
+ reasonCode = ir2_data->ReasonCode;
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
+ "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
+
+ memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+ hot_plug_info.id = ir2_data->TargetID;
+ hot_plug_info.channel = ir2_data->Bus;
+ switch (reasonCode) {
+ case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
+ hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
+ hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
+ hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
+ break;
+ case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
+ hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
+ mpt_raid_phys_disk_pg0(ioc,
+ ir2_data->PhysDiskNum, &phys_disk);
+ hot_plug_info.id = phys_disk.PhysDiskID;
+ hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
+ break;
+ default:
+ mptsas_free_fw_event(ioc, fw_event);
+ return;
+ }
+ mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
+}
+
+static int
+mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
+{
+ u32 event = le32_to_cpu(reply->Event);
+ int sz, event_data_sz;
+ struct fw_event_work *fw_event;
+ unsigned long delay;
+
+ if (ioc->bus_type != SAS)
+ return 0;
+
+ /* events turned off due to host reset or driver unloading */
+ if (ioc->fw_events_off)
+ return 0;
+
+ delay = msecs_to_jiffies(1);
+ switch (event) {
+ case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
+ (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
+ if (broadcast_event_data->Primitive !=
+ MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+ return 0;
+ if (ioc->broadcast_aen_busy)
+ return 0;
+ ioc->broadcast_aen_busy = 1;
+ break;
+ }
+ case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ {
+ EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
+ (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
+ u16 ioc_stat;
+ ioc_stat = le16_to_cpu(reply->IOCStatus);
+
+ if (sas_event_data->ReasonCode ==
+ MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
+ mptsas_target_reset_queue(ioc, sas_event_data);
+ return 0;
+ }
+ if (sas_event_data->ReasonCode ==
+ MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ ioc->device_missing_delay &&
+ (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)) {
+ VirtTarget *vtarget = NULL;
+ u8 id, channel;
+
+ id = sas_event_data->TargetID;
+ channel = sas_event_data->Bus;
+
+ vtarget = mptsas_find_vtarget(ioc, channel, id);
+ if (vtarget) {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "LogInfo (0x%x) available for "
+ "INTERNAL_DEVICE_RESET"
+ "fw_id %d fw_channel %d\n", ioc->name,
+ le32_to_cpu(reply->IOCLogInfo),
+ id, channel));
+ if (vtarget->raidVolume) {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Skipping Raid Volume for inDMD\n",
+ ioc->name));
+ } else {
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Setting device flag inDMD\n",
+ ioc->name));
+ vtarget->inDMD = 1;
+ }
+
+ }
+
+ }
+
+ break;
+ }
+ case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+ {
+ MpiEventDataSasExpanderStatusChange_t *expander_data =
+ (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
+
+ if (ioc->old_sas_discovery_protocal)
+ return 0;
+
+ if (expander_data->ReasonCode ==
+ MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
+ ioc->device_missing_delay)
+ delay = HZ * ioc->device_missing_delay;
+ break;
+ }
+ case MPI_EVENT_SAS_DISCOVERY:
+ {
+ u32 discovery_status;
+ EventDataSasDiscovery_t *discovery_data =
+ (EventDataSasDiscovery_t *)reply->Data;
+
+ discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
+ ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
+ if (ioc->old_sas_discovery_protocal && !discovery_status)
+ mptsas_queue_rescan(ioc);
+ return 0;
+ }
+ case MPI_EVENT_INTEGRATED_RAID:
+ case MPI_EVENT_PERSISTENT_TABLE_FULL:
+ case MPI_EVENT_IR2:
+ case MPI_EVENT_SAS_PHY_LINK_STATUS:
+ case MPI_EVENT_QUEUE_FULL:
+ break;
+ default:
+ return 0;
+ }
+
+ event_data_sz = ((reply->MsgLength * 4) -
+ offsetof(EventNotificationReply_t, Data));
+ sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
+ fw_event = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event) {
+ printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
+ __func__, __LINE__);
+ return 0;
+ }
+ memcpy(fw_event->event_data, reply->Data, event_data_sz);
+ fw_event->event = event;
+ fw_event->ioc = ioc;
+ mptsas_add_fw_event(ioc, fw_event, delay);
+ return 0;
+}
+
+/* Delete a volume when no longer listed in ioc pg2
+ */
+static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
+{
+ struct scsi_device *sdev;
+ int i;
+
+ sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
+ if (!sdev)
+ return;
+ if (!ioc->raid_data.pIocPg2)
+ goto out;
+ if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
+ goto out;
+ for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
+ if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
+ goto release_sdev;
+ out:
+ printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
+ "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
+ scsi_remove_device(sdev);
+ release_sdev:
+ scsi_device_put(sdev);
+}
+
+static int
+mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *sh;
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+ unsigned long flags;
+ int ii;
+ int numSGE = 0;
+ int scale;
+ int ioc_cap;
+ int error=0;
+ int r;
+
+ r = mpt_attach(pdev,id);
+ if (r)
+ return r;
+
+ ioc = pci_get_drvdata(pdev);
+ mptsas_fw_event_off(ioc);
+ ioc->DoneCtx = mptsasDoneCtx;
+ ioc->TaskCtx = mptsasTaskCtx;
+ ioc->InternalCtx = mptsasInternalCtx;
+ ioc->schedule_target_reset = &mptsas_schedule_target_reset;
+ /* Added sanity check on readiness of the MPT adapter.
+ */
+ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping because it's not operational!\n",
+ ioc->name);
+ error = -ENODEV;
+ goto out_mptsas_probe;
+ }
+
+ if (!ioc->active) {
+ printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
+ ioc->name);
+ error = -ENODEV;
+ goto out_mptsas_probe;
+ }
+
+ /* Sanity check - ensure at least 1 port is INITIATOR capable
+ */
+ ioc_cap = 0;
+ for (ii = 0; ii < ioc->facts.NumberOfPorts; ii++) {
+ if (ioc->pfacts[ii].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_INITIATOR)
+ ioc_cap++;
+ }
+
+ if (!ioc_cap) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping ioc=%p because SCSI Initiator mode "
+ "is NOT enabled!\n", ioc->name, ioc);
+ return 0;
+ }
+
+ sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST));
+ if (!sh) {
+ printk(MYIOC_s_WARN_FMT
+ "Unable to register controller with SCSI subsystem\n",
+ ioc->name);
+ error = -1;
+ goto out_mptsas_probe;
+ }
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+ /* Attach the SCSI Host to the IOC structure
+ */
+ ioc->sh = sh;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->irq = 0;
+
+ /* set 16 byte cdb's */
+ sh->max_cmd_len = 16;
+ sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue);
+ sh->max_id = -1;
+ sh->max_lun = max_lun;
+ sh->transportt = mptsas_transport_template;
+
+ /* Required entry.
+ */
+ sh->unique_id = ioc->id;
+
+ INIT_LIST_HEAD(&ioc->sas_topology);
+ mutex_init(&ioc->sas_topology_mutex);
+ mutex_init(&ioc->sas_discovery_mutex);
+ mutex_init(&ioc->sas_mgmt.mutex);
+ init_completion(&ioc->sas_mgmt.done);
+
+ /* Verify that we won't exceed the maximum
+ * number of chain buffers
+ * We can optimize: ZZ = req_sz/sizeof(SGE)
+ * For 32bit SGE's:
+ * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
+ * + (req_sz - 64)/sizeof(SGE)
+ * A slightly different algorithm is required for
+ * 64bit SGEs.
+ */
+ scale = ioc->req_sz/ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64)) {
+ numSGE = (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 60) / ioc->SGE_size;
+ } else {
+ numSGE = 1 + (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 64) / ioc->SGE_size;
+ }
+
+ if (numSGE < sh->sg_tablesize) {
+ /* Reset this value */
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Resetting sg_tablesize to %d from %d\n",
+ ioc->name, numSGE, sh->sg_tablesize));
+ sh->sg_tablesize = numSGE;
+ }
+
+ hd = shost_priv(sh);
+ hd->ioc = ioc;
+
+ /* SCSI needs scsi_cmnd lookup table!
+ * (with size equal to req_depth*PtrSz!)
+ */
+ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
+ if (!ioc->ScsiLookup) {
+ error = -ENOMEM;
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+ goto out_mptsas_probe;
+ }
+ spin_lock_init(&ioc->scsi_lookup_lock);
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
+ ioc->name, ioc->ScsiLookup));
+
+ ioc->sas_data.ptClear = mpt_pt_clear;
+
+ hd->last_queue_full = 0;
+ INIT_LIST_HEAD(&hd->target_reset_list);
+ INIT_LIST_HEAD(&ioc->sas_device_info_list);
+ mutex_init(&ioc->sas_device_info_mutex);
+
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ if (ioc->sas_data.ptClear==1) {
+ mptbase_sas_persist_operation(
+ ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
+ }
+
+ error = scsi_add_host(sh, &ioc->pcidev->dev);
+ if (error) {
+ dprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "scsi_add_host failed\n", ioc->name));
+ goto out_mptsas_probe;
+ }
+
+ /* older firmware doesn't support expander events */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xE)
+ ioc->old_sas_discovery_protocal = 1;
+ mptsas_scan_sas_topology(ioc);
+ mptsas_fw_event_on(ioc);
+ return 0;
+
+ out_mptsas_probe:
+
+ mptscsih_remove(pdev);
+ return error;
+}
+
+void
+mptsas_shutdown(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+ mptsas_fw_event_off(ioc);
+ mptsas_cleanup_fw_event_q(ioc);
+}
+
+static void __devexit mptsas_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct mptsas_portinfo *p, *n;
+ int i;
+
+ if (!ioc->sh) {
+ printk(MYIOC_s_INFO_FMT "IOC is in Target mode\n", ioc->name);
+ mpt_detach(pdev);
+ return;
+ }
+
+ mptsas_shutdown(pdev);
+
+ mptsas_del_device_components(ioc);
+
+ ioc->sas_discovery_ignore_events = 1;
+ sas_remove_host(ioc->sh);
+
+ mutex_lock(&ioc->sas_topology_mutex);
+ list_for_each_entry_safe(p, n, &ioc->sas_topology, list) {
+ list_del(&p->list);
+ for (i = 0 ; i < p->num_phys ; i++)
+ mptsas_port_delete(ioc, p->phy_info[i].port_details);
+
+ kfree(p->phy_info);
+ kfree(p);
+ }
+ mutex_unlock(&ioc->sas_topology_mutex);
+ ioc->hba_port_info = NULL;
+ mptscsih_remove(pdev);
+}
+
+static struct pci_device_id mptsas_pci_table[] = {
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1064E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068E,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
+
+
+static struct pci_driver mptsas_driver = {
+ .name = "mptsas",
+ .id_table = mptsas_pci_table,
+ .probe = mptsas_probe,
+ .remove = __devexit_p(mptsas_remove),
+ .shutdown = mptsas_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mptscsih_suspend,
+ .resume = mptscsih_resume,
+#endif
+};
+
+static int __init
+mptsas_init(void)
+{
+ int error;
+
+ show_mptmod_ver(my_NAME, my_VERSION);
+
+ mptsas_transport_template =
+ sas_attach_transport(&mptsas_transport_functions);
+ if (!mptsas_transport_template)
+ return -ENODEV;
+ mptsas_transport_template->eh_timed_out = mptsas_eh_timed_out;
+
+ mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER,
+ "mptscsih_io_done");
+ mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER,
+ "mptscsih_taskmgmt_complete");
+ mptsasInternalCtx =
+ mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER,
+ "mptscsih_scandv_complete");
+ mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER,
+ "mptsas_mgmt_done");
+ mptsasDeviceResetCtx =
+ mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER,
+ "mptsas_taskmgmt_complete");
+
+ mpt_event_register(mptsasDoneCtx, mptsas_event_process);
+ mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
+
+ error = pci_register_driver(&mptsas_driver);
+ if (error)
+ sas_release_transport(mptsas_transport_template);
+
+ return error;
+}
+
+static void __exit
+mptsas_exit(void)
+{
+ pci_unregister_driver(&mptsas_driver);
+ sas_release_transport(mptsas_transport_template);
+
+ mpt_reset_deregister(mptsasDoneCtx);
+ mpt_event_deregister(mptsasDoneCtx);
+
+ mpt_deregister(mptsasMgmtCtx);
+ mpt_deregister(mptsasInternalCtx);
+ mpt_deregister(mptsasTaskCtx);
+ mpt_deregister(mptsasDoneCtx);
+ mpt_deregister(mptsasDeviceResetCtx);
+}
+
+module_init(mptsas_init);
+module_exit(mptsas_exit);
diff --git a/drivers/message/fusion/mptsas.h b/drivers/message/fusion/mptsas.h
new file mode 100644
index 00000000..57e86ab7
--- /dev/null
+++ b/drivers/message/fusion/mptsas.h
@@ -0,0 +1,192 @@
+/*
+ * linux/drivers/message/fusion/mptsas.h
+ * High performance SCSI + LAN / Fibre Channel device drivers.
+ * For use with PCI chip/adapter(s):
+ * LSIFC9xx/LSI409xx Fibre Channel
+ * running LSI MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef MPTSAS_H_INCLUDED
+#define MPTSAS_H_INCLUDED
+/*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+struct mptsas_target_reset_event {
+ struct list_head list;
+ EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
+ u8 target_reset_issued;
+ unsigned long time_count;
+};
+
+enum mptsas_hotplug_action {
+ MPTSAS_ADD_DEVICE,
+ MPTSAS_DEL_DEVICE,
+ MPTSAS_ADD_RAID,
+ MPTSAS_DEL_RAID,
+ MPTSAS_ADD_PHYSDISK,
+ MPTSAS_ADD_PHYSDISK_REPROBE,
+ MPTSAS_DEL_PHYSDISK,
+ MPTSAS_DEL_PHYSDISK_REPROBE,
+ MPTSAS_ADD_INACTIVE_VOLUME,
+ MPTSAS_IGNORE_EVENT,
+};
+
+struct mptsas_mapping{
+ u8 id;
+ u8 channel;
+};
+
+struct mptsas_device_info {
+ struct list_head list;
+ struct mptsas_mapping os; /* operating system mapping*/
+ struct mptsas_mapping fw; /* firmware mapping */
+ u64 sas_address;
+ u32 device_info; /* specific bits for devices */
+ u16 slot; /* enclosure slot id */
+ u64 enclosure_logical_id; /*enclosure address */
+ u8 is_logical_volume; /* is this logical volume */
+ /* this belongs to volume */
+ u8 is_hidden_raid_component;
+ /* this valid when is_hidden_raid_component set */
+ u8 volume_id;
+ /* cached data for a removed device */
+ u8 is_cached;
+};
+
+struct mptsas_hotplug_event {
+ MPT_ADAPTER *ioc;
+ enum mptsas_hotplug_action event_type;
+ u64 sas_address;
+ u8 channel;
+ u8 id;
+ u32 device_info;
+ u16 handle;
+ u8 phy_id;
+ u8 phys_disk_num; /* hrc - unique index*/
+ struct scsi_device *sdev;
+};
+
+struct fw_event_work {
+ struct list_head list;
+ struct delayed_work work;
+ MPT_ADAPTER *ioc;
+ u32 event;
+ u8 retries;
+ u8 __attribute__((aligned(4))) event_data[1];
+};
+
+struct mptsas_discovery_event {
+ struct work_struct work;
+ MPT_ADAPTER *ioc;
+};
+
+/*
+ * SAS topology structures
+ *
+ * The MPT Fusion firmware interface spreads information about the
+ * SAS topology over many manufacture pages, thus we need some data
+ * structure to collect it and process it for the SAS transport class.
+ */
+
+struct mptsas_devinfo {
+ u16 handle; /* unique id to address this device */
+ u16 handle_parent; /* unique id to address parent device */
+ u16 handle_enclosure; /* enclosure identifier of the enclosure */
+ u16 slot; /* physical slot in enclosure */
+ u8 phy_id; /* phy number of parent device */
+ u8 port_id; /* sas physical port this device
+ is assoc'd with */
+ u8 id; /* logical target id of this device */
+ u32 phys_disk_num; /* phys disk id, for csmi-ioctls */
+ u8 channel; /* logical bus number of this device */
+ u64 sas_address; /* WWN of this device,
+ SATA is assigned by HBA,expander */
+ u32 device_info; /* bitfield detailed info about this device */
+ u16 flags; /* sas device pg0 flags */
+};
+
+/*
+ * Specific details on ports, wide/narrow
+ */
+struct mptsas_portinfo_details{
+ u16 num_phys; /* number of phys belong to this port */
+ u64 phy_bitmask; /* TODO, extend support for 255 phys */
+ struct sas_rphy *rphy; /* transport layer rphy object */
+ struct sas_port *port; /* transport layer port object */
+ struct scsi_target *starget;
+ struct mptsas_portinfo *port_info;
+};
+
+struct mptsas_phyinfo {
+ u16 handle; /* unique id to address this */
+ u8 phy_id; /* phy index */
+ u8 port_id; /* firmware port identifier */
+ u8 negotiated_link_rate; /* nego'd link rate for this phy */
+ u8 hw_link_rate; /* hardware max/min phys link rate */
+ u8 programmed_link_rate; /* programmed max/min phy link rate */
+ u8 sas_port_add_phy; /* flag to request sas_port_add_phy*/
+ struct mptsas_devinfo identify; /* point to phy device info */
+ struct mptsas_devinfo attached; /* point to attached device info */
+ struct sas_phy *phy; /* transport layer phy object */
+ struct mptsas_portinfo *portinfo;
+ struct mptsas_portinfo_details * port_details;
+};
+
+struct mptsas_portinfo {
+ struct list_head list;
+ u16 num_phys; /* number of phys */
+ struct mptsas_phyinfo *phy_info;
+};
+
+struct mptsas_enclosure {
+ u64 enclosure_logical_id; /* The WWN for the enclosure */
+ u16 enclosure_handle; /* unique id to address this */
+ u16 flags; /* details enclosure management */
+ u16 num_slot; /* num slots */
+ u16 start_slot; /* first slot */
+ u8 start_id; /* starting logical target id */
+ u8 start_channel; /* starting logical channel id */
+ u8 sep_id; /* SEP device logical target id */
+ u8 sep_channel; /* SEP channel logical channel id */
+};
+
+/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#endif
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
new file mode 100644
index 00000000..a1d4ee66
--- /dev/null
+++ b/drivers/message/fusion/mptscsih.c
@@ -0,0 +1,3357 @@
+/*
+ * linux/drivers/message/fusion/mptscsih.c
+ * For use with LSI PCI chip/adapter(s)
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h> /* for mdelay */
+#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/reboot.h> /* notifier code */
+#include <linux/workqueue.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mptbase.h"
+#include "mptscsih.h"
+#include "lsi/mpi_log_sas.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME "Fusion MPT SCSI Host driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptscsih"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(my_VERSION);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Other private/forward protos...
+ */
+struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
+static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
+static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
+static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
+int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+static void mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq);
+int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+
+static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
+ SCSIIORequest_t *pReq, int req_idx);
+static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
+static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
+
+int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
+ int lun, int ctx2abort, ulong timeout);
+
+int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
+int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
+
+void
+mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
+static int mptscsih_get_completion_code(MPT_ADAPTER *ioc,
+ MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
+int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
+static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
+
+static int
+mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
+ SCSITaskMgmtReply_t *pScsiTmReply);
+void mptscsih_remove(struct pci_dev *);
+void mptscsih_shutdown(struct pci_dev *);
+#ifdef CONFIG_PM
+int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
+int mptscsih_resume(struct pci_dev *pdev);
+#endif
+
+#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_getFreeChainBuffer - Function to get a free chain
+ * from the MPT_SCSI_HOST FreeChainQ.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @req_idx: Index of the SCSI IO request frame. (output)
+ *
+ * return SUCCESS or FAILED
+ */
+static inline int
+mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
+{
+ MPT_FRAME_HDR *chainBuf;
+ unsigned long flags;
+ int rc;
+ int chain_idx;
+
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "getFreeChainBuffer called\n",
+ ioc->name));
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+ if (!list_empty(&ioc->FreeChainQ)) {
+ int offset;
+
+ chainBuf = list_entry(ioc->FreeChainQ.next, MPT_FRAME_HDR,
+ u.frame.linkage.list);
+ list_del(&chainBuf->u.frame.linkage.list);
+ offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
+ chain_idx = offset / ioc->req_sz;
+ rc = SUCCESS;
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "getFreeChainBuffer chainBuf=%p ChainBuffer=%p offset=%d chain_idx=%d\n",
+ ioc->name, chainBuf, ioc->ChainBuffer, offset, chain_idx));
+ } else {
+ rc = FAILED;
+ chain_idx = MPT_HOST_NO_CHAIN;
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n",
+ ioc->name));
+ }
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ *retIndex = chain_idx;
+ return rc;
+} /* mptscsih_getFreeChainBuffer() */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_AddSGE - Add a SGE (plus chain buffers) to the
+ * SCSIIORequest_t Message Frame.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @SCpnt: Pointer to scsi_cmnd structure
+ * @pReq: Pointer to SCSIIORequest_t structure
+ *
+ * Returns ...
+ */
+static int
+mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
+ SCSIIORequest_t *pReq, int req_idx)
+{
+ char *psge;
+ char *chainSge;
+ struct scatterlist *sg;
+ int frm_sz;
+ int sges_left, sg_done;
+ int chain_idx = MPT_HOST_NO_CHAIN;
+ int sgeOffset;
+ int numSgeSlots, numSgeThisFrame;
+ u32 sgflags, sgdir, thisxfer = 0;
+ int chain_dma_off = 0;
+ int newIndex;
+ int ii;
+ dma_addr_t v2;
+ u32 RequestNB;
+
+ sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
+ if (sgdir == MPI_SCSIIO_CONTROL_WRITE) {
+ sgdir = MPT_TRANSFER_HOST_TO_IOC;
+ } else {
+ sgdir = MPT_TRANSFER_IOC_TO_HOST;
+ }
+
+ psge = (char *) &pReq->SGL;
+ frm_sz = ioc->req_sz;
+
+ /* Map the data portion, if any.
+ * sges_left = 0 if no data transfer.
+ */
+ sges_left = scsi_dma_map(SCpnt);
+ if (sges_left < 0)
+ return FAILED;
+
+ /* Handle the SG case.
+ */
+ sg = scsi_sglist(SCpnt);
+ sg_done = 0;
+ sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
+ chainSge = NULL;
+
+ /* Prior to entering this loop - the following must be set
+ * current MF: sgeOffset (bytes)
+ * chainSge (Null if original MF is not a chain buffer)
+ * sg_done (num SGE done for this MF)
+ */
+
+nextSGEset:
+ numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
+ numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
+
+ sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
+
+ /* Get first (num - 1) SG elements
+ * Skip any SG entries with a length of 0
+ * NOTE: at finish, sg and psge pointed to NEXT data/location positions
+ */
+ for (ii=0; ii < (numSgeThisFrame-1); ii++) {
+ thisxfer = sg_dma_len(sg);
+ if (thisxfer == 0) {
+ /* Get next SG element from the OS */
+ sg = sg_next(sg);
+ sg_done++;
+ continue;
+ }
+
+ v2 = sg_dma_address(sg);
+ ioc->add_sge(psge, sgflags | thisxfer, v2);
+
+ /* Get next SG element from the OS */
+ sg = sg_next(sg);
+ psge += ioc->SGE_size;
+ sgeOffset += ioc->SGE_size;
+ sg_done++;
+ }
+
+ if (numSgeThisFrame == sges_left) {
+ /* Add last element, end of buffer and end of list flags.
+ */
+ sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT |
+ MPT_SGE_FLAGS_END_OF_BUFFER |
+ MPT_SGE_FLAGS_END_OF_LIST;
+
+ /* Add last SGE and set termination flags.
+ * Note: Last SGE may have a length of 0 - which should be ok.
+ */
+ thisxfer = sg_dma_len(sg);
+
+ v2 = sg_dma_address(sg);
+ ioc->add_sge(psge, sgflags | thisxfer, v2);
+ sgeOffset += ioc->SGE_size;
+ sg_done++;
+
+ if (chainSge) {
+ /* The current buffer is a chain buffer,
+ * but there is not another one.
+ * Update the chain element
+ * Offset and Length fields.
+ */
+ ioc->add_chain((char *)chainSge, 0, sgeOffset,
+ ioc->ChainBufferDMA + chain_dma_off);
+ } else {
+ /* The current buffer is the original MF
+ * and there is no Chain buffer.
+ */
+ pReq->ChainOffset = 0;
+ RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
+ ioc->RequestNB[req_idx] = RequestNB;
+ }
+ } else {
+ /* At least one chain buffer is needed.
+ * Complete the first MF
+ * - last SGE element, set the LastElement bit
+ * - set ChainOffset (words) for orig MF
+ * (OR finish previous MF chain buffer)
+ * - update MFStructPtr ChainIndex
+ * - Populate chain element
+ * Also
+ * Loop until done.
+ */
+
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "SG: Chain Required! sg done %d\n",
+ ioc->name, sg_done));
+
+ /* Set LAST_ELEMENT flag for last non-chain element
+ * in the buffer. Since psge points at the NEXT
+ * SGE element, go back one SGE element, update the flags
+ * and reset the pointer. (Note: sgflags & thisxfer are already
+ * set properly).
+ */
+ if (sg_done) {
+ u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
+ sgflags = le32_to_cpu(*ptmp);
+ sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
+ *ptmp = cpu_to_le32(sgflags);
+ }
+
+ if (chainSge) {
+ /* The current buffer is a chain buffer.
+ * chainSge points to the previous Chain Element.
+ * Update its chain element Offset and Length (must
+ * include chain element size) fields.
+ * Old chain element is now complete.
+ */
+ u8 nextChain = (u8) (sgeOffset >> 2);
+ sgeOffset += ioc->SGE_size;
+ ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
+ ioc->ChainBufferDMA + chain_dma_off);
+ } else {
+ /* The original MF buffer requires a chain buffer -
+ * set the offset.
+ * Last element in this MF is a chain element.
+ */
+ pReq->ChainOffset = (u8) (sgeOffset >> 2);
+ RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor) + 1) & 0x03;
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Chain Buffer Needed, RequestNB=%x sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
+ ioc->RequestNB[req_idx] = RequestNB;
+ }
+
+ sges_left -= sg_done;
+
+
+ /* NOTE: psge points to the beginning of the chain element
+ * in current buffer. Get a chain buffer.
+ */
+ if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED) {
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "getFreeChainBuffer FAILED SCSI cmd=%02x (%p)\n",
+ ioc->name, pReq->CDB[0], SCpnt));
+ return FAILED;
+ }
+
+ /* Update the tracking arrays.
+ * If chainSge == NULL, update ReqToChain, else ChainToChain
+ */
+ if (chainSge) {
+ ioc->ChainToChain[chain_idx] = newIndex;
+ } else {
+ ioc->ReqToChain[req_idx] = newIndex;
+ }
+ chain_idx = newIndex;
+ chain_dma_off = ioc->req_sz * chain_idx;
+
+ /* Populate the chainSGE for the current buffer.
+ * - Set chain buffer pointer to psge and fill
+ * out the Address and Flags fields.
+ */
+ chainSge = (char *) psge;
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Current buff @ %p (index 0x%x)",
+ ioc->name, psge, req_idx));
+
+ /* Start the SGE for the next buffer
+ */
+ psge = (char *) (ioc->ChainBuffer + chain_dma_off);
+ sgeOffset = 0;
+ sg_done = 0;
+
+ dsgprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Chain buff @ %p (index 0x%x)\n",
+ ioc->name, psge, chain_idx));
+
+ /* Start the SGE for the next buffer
+ */
+
+ goto nextSGEset;
+ }
+
+ return SUCCESS;
+} /* mptscsih_AddSGE() */
+
+static void
+mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
+ U32 SlotStatus)
+{
+ MPT_FRAME_HDR *mf;
+ SEPRequest_t *SEPMsg;
+
+ if (ioc->bus_type != SAS)
+ return;
+
+ /* Not supported for hidden raid components
+ */
+ if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+ return;
+
+ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
+ ioc->name,__func__));
+ return;
+ }
+
+ SEPMsg = (SEPRequest_t *)mf;
+ SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ SEPMsg->Bus = vtarget->channel;
+ SEPMsg->TargetID = vtarget->id;
+ SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS;
+ SEPMsg->SlotStatus = SlotStatus;
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Sending SEP cmd=%x channel=%d id=%d\n",
+ ioc->name, SlotStatus, SEPMsg->Bus, SEPMsg->TargetID));
+ mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
+}
+
+#ifdef CONFIG_FUSION_LOGGING
+/**
+ * mptscsih_info_scsiio - debug print info on reply frame
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sc: original scsi cmnd pointer
+ * @pScsiReply: Pointer to MPT reply frame
+ *
+ * MPT_DEBUG_REPLY needs to be enabled to obtain this info
+ *
+ * Refer to lsi/mpi.h.
+ **/
+static void
+mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pScsiReply)
+{
+ char *desc = NULL;
+ char *desc1 = NULL;
+ u16 ioc_status;
+ u8 skey, asc, ascq;
+
+ ioc_status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+
+ switch (ioc_status) {
+
+ case MPI_IOCSTATUS_SUCCESS:
+ desc = "success";
+ break;
+ case MPI_IOCSTATUS_SCSI_INVALID_BUS:
+ desc = "invalid bus";
+ break;
+ case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
+ desc = "invalid target_id";
+ break;
+ case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ desc = "device not there";
+ break;
+ case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
+ desc = "data overrun";
+ break;
+ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ desc = "data underrun";
+ break;
+ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ desc = "I/O data error";
+ break;
+ case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ desc = "protocol error";
+ break;
+ case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
+ desc = "task terminated";
+ break;
+ case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ desc = "residual mismatch";
+ break;
+ case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ desc = "task management failed";
+ break;
+ case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
+ desc = "IOC terminated";
+ break;
+ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
+ desc = "ext terminated";
+ break;
+ default:
+ desc = "";
+ break;
+ }
+
+ switch (pScsiReply->SCSIStatus)
+ {
+
+ case MPI_SCSI_STATUS_SUCCESS:
+ desc1 = "success";
+ break;
+ case MPI_SCSI_STATUS_CHECK_CONDITION:
+ desc1 = "check condition";
+ break;
+ case MPI_SCSI_STATUS_CONDITION_MET:
+ desc1 = "condition met";
+ break;
+ case MPI_SCSI_STATUS_BUSY:
+ desc1 = "busy";
+ break;
+ case MPI_SCSI_STATUS_INTERMEDIATE:
+ desc1 = "intermediate";
+ break;
+ case MPI_SCSI_STATUS_INTERMEDIATE_CONDMET:
+ desc1 = "intermediate condmet";
+ break;
+ case MPI_SCSI_STATUS_RESERVATION_CONFLICT:
+ desc1 = "reservation conflict";
+ break;
+ case MPI_SCSI_STATUS_COMMAND_TERMINATED:
+ desc1 = "command terminated";
+ break;
+ case MPI_SCSI_STATUS_TASK_SET_FULL:
+ desc1 = "task set full";
+ break;
+ case MPI_SCSI_STATUS_ACA_ACTIVE:
+ desc1 = "aca active";
+ break;
+ case MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT:
+ desc1 = "fcpext device logged out";
+ break;
+ case MPI_SCSI_STATUS_FCPEXT_NO_LINK:
+ desc1 = "fcpext no link";
+ break;
+ case MPI_SCSI_STATUS_FCPEXT_UNASSIGNED:
+ desc1 = "fcpext unassigned";
+ break;
+ default:
+ desc1 = "";
+ break;
+ }
+
+ scsi_print_command(sc);
+ printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
+ ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
+ printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
+ "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
+ scsi_get_resid(sc));
+ printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
+ "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
+ le32_to_cpu(pScsiReply->TransferCount), sc->result);
+
+ printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
+ "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
+ ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
+ pScsiReply->SCSIState);
+
+ if (pScsiReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
+ skey = sc->sense_buffer[2] & 0x0F;
+ asc = sc->sense_buffer[12];
+ ascq = sc->sense_buffer[13];
+
+ printk(MYIOC_s_DEBUG_FMT "\t[sense_key,asc,ascq]: "
+ "[0x%02x,0x%02x,0x%02x]\n", ioc->name, skey, asc, ascq);
+ }
+
+ /*
+ * Look for + dump FCP ResponseInfo[]!
+ */
+ if (pScsiReply->SCSIState & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
+ pScsiReply->ResponseInfo)
+ printk(MYIOC_s_DEBUG_FMT "response_info = %08xh\n",
+ ioc->name, le32_to_cpu(pScsiReply->ResponseInfo));
+}
+#endif
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_io_done - Main SCSI IO callback routine registered to
+ * Fusion MPT (base) driver
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @mf: Pointer to original MPT request frame
+ * @r: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+ * This routine is called from mpt.c::mpt_interrupt() at the completion
+ * of any SCSI IO request.
+ * This routine is registered with the Fusion MPT (base) driver at driver
+ * load/init time via the mpt_register() API call.
+ *
+ * Returns 1 indicating alloc'd request frame ptr should be freed.
+ */
+int
+mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+ struct scsi_cmnd *sc;
+ MPT_SCSI_HOST *hd;
+ SCSIIORequest_t *pScsiReq;
+ SCSIIOReply_t *pScsiReply;
+ u16 req_idx, req_idx_MR;
+ VirtDevice *vdevice;
+ VirtTarget *vtarget;
+
+ hd = shost_priv(ioc->sh);
+ req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+ req_idx_MR = (mr != NULL) ?
+ le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
+
+ /* Special case, where already freed message frame is received from
+ * Firmware. It happens with Resetting IOC.
+ * Return immediately. Do not care
+ */
+ if ((req_idx != req_idx_MR) ||
+ (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
+ return 0;
+
+ sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
+ if (sc == NULL) {
+ MPIHeader_t *hdr = (MPIHeader_t *)mf;
+
+ /* Remark: writeSDP1 will use the ScsiDoneCtx
+ * If a SCSI I/O cmd, device disabled by OS and
+ * completion done. Cannot touch sc struct. Just free mem.
+ */
+ if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST)
+ printk(MYIOC_s_ERR_FMT "NULL ScsiCmd ptr!\n",
+ ioc->name);
+
+ mptscsih_freeChainBuffers(ioc, req_idx);
+ return 1;
+ }
+
+ if ((unsigned char *)mf != sc->host_scribble) {
+ mptscsih_freeChainBuffers(ioc, req_idx);
+ return 1;
+ }
+
+ if (ioc->bus_type == SAS) {
+ VirtDevice *vdevice = sc->device->hostdata;
+
+ if (!vdevice || !vdevice->vtarget ||
+ vdevice->vtarget->deleted) {
+ sc->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+ }
+
+ sc->host_scribble = NULL;
+ sc->result = DID_OK << 16; /* Set default reply as OK */
+ pScsiReq = (SCSIIORequest_t *) mf;
+ pScsiReply = (SCSIIOReply_t *) mr;
+
+ if((ioc->facts.MsgVersion >= MPI_VERSION_01_05) && pScsiReply){
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d,task-tag=%d)\n",
+ ioc->name, mf, mr, sc, req_idx, pScsiReply->TaskTag));
+ }else{
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
+ ioc->name, mf, mr, sc, req_idx));
+ }
+
+ if (pScsiReply == NULL) {
+ /* special context reply handling */
+ ;
+ } else {
+ u32 xfer_cnt;
+ u16 status;
+ u8 scsi_state, scsi_status;
+ u32 log_info;
+
+ status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+
+ scsi_state = pScsiReply->SCSIState;
+ scsi_status = pScsiReply->SCSIStatus;
+ xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
+ scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
+ log_info = le32_to_cpu(pScsiReply->IOCLogInfo);
+
+ /*
+ * if we get a data underrun indication, yet no data was
+ * transferred and the SCSI status indicates that the
+ * command was never started, change the data underrun
+ * to success
+ */
+ if (status == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI_SCSI_STATUS_BUSY ||
+ scsi_status == MPI_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)) {
+ status = MPI_IOCSTATUS_SUCCESS;
+ }
+
+ if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
+ mptscsih_copy_sense_data(sc, hd, mf, pScsiReply);
+
+ /*
+ * Look for + dump FCP ResponseInfo[]!
+ */
+ if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID &&
+ pScsiReply->ResponseInfo) {
+ printk(MYIOC_s_NOTE_FMT "[%d:%d:%d:%d] "
+ "FCP_ResponseInfo=%08xh\n", ioc->name,
+ sc->device->host->host_no, sc->device->channel,
+ sc->device->id, sc->device->lun,
+ le32_to_cpu(pScsiReply->ResponseInfo));
+ }
+
+ switch(status) {
+ case MPI_IOCSTATUS_BUSY: /* 0x0002 */
+ case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
+ /* CHECKME!
+ * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
+ * But not: DID_BUS_BUSY lest one risk
+ * killing interrupt handler:-(
+ */
+ sc->result = SAM_STAT_BUSY;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
+ case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
+ sc->result = DID_BAD_TARGET << 16;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
+ /* Spoof to SCSI Selection Timeout! */
+ if (ioc->bus_type != FC)
+ sc->result = DID_NO_CONNECT << 16;
+ /* else fibre, just stall until rescan event */
+ else
+ sc->result = DID_REQUEUE << 16;
+
+ if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
+ hd->sel_timeout[pScsiReq->TargetID]++;
+
+ vdevice = sc->device->hostdata;
+ if (!vdevice)
+ break;
+ vtarget = vdevice->vtarget;
+ if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) {
+ mptscsih_issue_sep_command(ioc, vtarget,
+ MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED);
+ vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON;
+ }
+ break;
+
+ case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
+ if ( ioc->bus_type == SAS ) {
+ u16 ioc_status =
+ le16_to_cpu(pScsiReply->IOCStatus);
+ if ((ioc_status &
+ MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ &&
+ ((log_info & SAS_LOGINFO_MASK) ==
+ SAS_LOGINFO_NEXUS_LOSS)) {
+ VirtDevice *vdevice =
+ sc->device->hostdata;
+
+ /* flag the device as being in
+ * device removal delay so we can
+ * notify the midlayer to hold off
+ * on timeout eh */
+ if (vdevice && vdevice->
+ vtarget &&
+ vdevice->vtarget->
+ raidVolume)
+ printk(KERN_INFO
+ "Skipping Raid Volume"
+ "for inDMD\n");
+ else if (vdevice &&
+ vdevice->vtarget)
+ vdevice->vtarget->
+ inDMD = 1;
+
+ sc->result =
+ (DID_TRANSPORT_DISRUPTED
+ << 16);
+ break;
+ }
+ } else if (ioc->bus_type == FC) {
+ /*
+ * The FC IOC may kill a request for variety of
+ * reasons, some of which may be recovered by a
+ * retry, some which are unlikely to be
+ * recovered. Return DID_ERROR instead of
+ * DID_RESET to permit retry of the command,
+ * just not an infinite number of them
+ */
+ sc->result = DID_ERROR << 16;
+ break;
+ }
+
+ /*
+ * Allow non-SAS & non-NEXUS_LOSS to drop into below code
+ */
+
+ case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
+ /* Linux handles an unsolicited DID_RESET better
+ * than an unsolicited DID_ABORT.
+ */
+ sc->result = DID_RESET << 16;
+
+ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
+ if (ioc->bus_type == FC)
+ sc->result = DID_ERROR << 16;
+ else
+ sc->result = DID_RESET << 16;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
+ scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
+ if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
+ sc->result=DID_SOFT_ERROR << 16;
+ else /* Sufficient data transfer occurred */
+ sc->result = (DID_OK << 16) | scsi_status;
+ dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "RESIDUAL_MISMATCH: result=%x on channel=%d id=%d\n",
+ ioc->name, sc->result, sc->device->channel, sc->device->id));
+ break;
+
+ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
+ /*
+ * Do upfront check for valid SenseData and give it
+ * precedence!
+ */
+ sc->result = (DID_OK << 16) | scsi_status;
+ if (!(scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
+
+ /*
+ * For an Errata on LSI53C1030
+ * When the length of request data
+ * and transfer data are different
+ * with result of command (READ or VERIFY),
+ * DID_SOFT_ERROR is set.
+ */
+ if (ioc->bus_type == SPI) {
+ if (pScsiReq->CDB[0] == READ_6 ||
+ pScsiReq->CDB[0] == READ_10 ||
+ pScsiReq->CDB[0] == READ_12 ||
+ pScsiReq->CDB[0] == READ_16 ||
+ pScsiReq->CDB[0] == VERIFY ||
+ pScsiReq->CDB[0] == VERIFY_16) {
+ if (scsi_bufflen(sc) !=
+ xfer_cnt) {
+ sc->result =
+ DID_SOFT_ERROR << 16;
+ printk(KERN_WARNING "Errata"
+ "on LSI53C1030 occurred."
+ "sc->req_bufflen=0x%02x,"
+ "xfer_cnt=0x%02x\n",
+ scsi_bufflen(sc),
+ xfer_cnt);
+ }
+ }
+ }
+
+ if (xfer_cnt < sc->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ sc->result = SAM_STAT_BUSY;
+ else
+ sc->result = DID_SOFT_ERROR << 16;
+ }
+ if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
+ /* What to do?
+ */
+ sc->result = DID_SOFT_ERROR << 16;
+ }
+ else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
+ /* Not real sure here either... */
+ sc->result = DID_RESET << 16;
+ }
+ }
+
+
+ dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ " sc->underflow={report ERR if < %02xh bytes xfer'd}\n",
+ ioc->name, sc->underflow));
+ dreplyprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ " ActBytesXferd=%02xh\n", ioc->name, xfer_cnt));
+
+ /* Report Queue Full
+ */
+ if (scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)
+ mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
+
+ break;
+
+ case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
+ scsi_set_resid(sc, 0);
+ case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
+ case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
+ sc->result = (DID_OK << 16) | scsi_status;
+ if (scsi_state == 0) {
+ ;
+ } else if (scsi_state &
+ MPI_SCSI_STATE_AUTOSENSE_VALID) {
+
+ /*
+ * For potential trouble on LSI53C1030.
+ * (date:2007.xx.)
+ * It is checked whether the length of
+ * request data is equal to
+ * the length of transfer and residual.
+ * MEDIUM_ERROR is set by incorrect data.
+ */
+ if ((ioc->bus_type == SPI) &&
+ (sc->sense_buffer[2] & 0x20)) {
+ u32 difftransfer;
+ difftransfer =
+ sc->sense_buffer[3] << 24 |
+ sc->sense_buffer[4] << 16 |
+ sc->sense_buffer[5] << 8 |
+ sc->sense_buffer[6];
+ if (((sc->sense_buffer[3] & 0x80) ==
+ 0x80) && (scsi_bufflen(sc)
+ != xfer_cnt)) {
+ sc->sense_buffer[2] =
+ MEDIUM_ERROR;
+ sc->sense_buffer[12] = 0xff;
+ sc->sense_buffer[13] = 0xff;
+ printk(KERN_WARNING"Errata"
+ "on LSI53C1030 occurred."
+ "sc->req_bufflen=0x%02x,"
+ "xfer_cnt=0x%02x\n" ,
+ scsi_bufflen(sc),
+ xfer_cnt);
+ }
+ if (((sc->sense_buffer[3] & 0x80)
+ != 0x80) &&
+ (scsi_bufflen(sc) !=
+ xfer_cnt + difftransfer)) {
+ sc->sense_buffer[2] =
+ MEDIUM_ERROR;
+ sc->sense_buffer[12] = 0xff;
+ sc->sense_buffer[13] = 0xff;
+ printk(KERN_WARNING
+ "Errata on LSI53C1030 occurred"
+ "sc->req_bufflen=0x%02x,"
+ " xfer_cnt=0x%02x,"
+ "difftransfer=0x%02x\n",
+ scsi_bufflen(sc),
+ xfer_cnt,
+ difftransfer);
+ }
+ }
+
+ /*
+ * If running against circa 200003dd 909 MPT f/w,
+ * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
+ * (QUEUE_FULL) returned from device! --> get 0x0000?128
+ * and with SenseBytes set to 0.
+ */
+ if (pScsiReply->SCSIStatus == MPI_SCSI_STATUS_TASK_SET_FULL)
+ mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
+
+ }
+ else if (scsi_state &
+ (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)
+ ) {
+ /*
+ * What to do?
+ */
+ sc->result = DID_SOFT_ERROR << 16;
+ }
+ else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
+ /* Not real sure here either... */
+ sc->result = DID_RESET << 16;
+ }
+ else if (scsi_state & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) {
+ /* Device Inq. data indicates that it supports
+ * QTags, but rejects QTag messages.
+ * This command completed OK.
+ *
+ * Not real sure here either so do nothing... */
+ }
+
+ if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL)
+ mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
+
+ /* Add handling of:
+ * Reservation Conflict, Busy,
+ * Command Terminated, CHECK
+ */
+ break;
+
+ case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
+ sc->result = DID_SOFT_ERROR << 16;
+ break;
+
+ case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
+ case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
+ case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
+ case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
+ case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
+ case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
+ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
+ case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
+ default:
+ /*
+ * What to do?
+ */
+ sc->result = DID_SOFT_ERROR << 16;
+ break;
+
+ } /* switch(status) */
+
+#ifdef CONFIG_FUSION_LOGGING
+ if (sc->result && (ioc->debug_level & MPT_DEBUG_REPLY))
+ mptscsih_info_scsiio(ioc, sc, pScsiReply);
+#endif
+
+ } /* end of address reply case */
+out:
+ /* Unmap the DMA buffers, if any. */
+ scsi_dma_unmap(sc);
+
+ sc->scsi_done(sc); /* Issue the command callback */
+
+ /* Free Chain buffers */
+ mptscsih_freeChainBuffers(ioc, req_idx);
+ return 1;
+}
+
+/*
+ * mptscsih_flush_running_cmds - For each command found, search
+ * Scsi_Host instance taskQ and reply to OS.
+ * Called only if recovering from a FW reload.
+ * @hd: Pointer to a SCSI HOST structure
+ *
+ * Returns: None.
+ *
+ * Must be called while new I/Os are being queued.
+ */
+static void
+mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ struct scsi_cmnd *sc;
+ SCSIIORequest_t *mf = NULL;
+ int ii;
+ int channel, id;
+
+ for (ii= 0; ii < ioc->req_depth; ii++) {
+ sc = mptscsih_getclear_scsi_lookup(ioc, ii);
+ if (!sc)
+ continue;
+ mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(ioc, ii);
+ if (!mf)
+ continue;
+ channel = mf->Bus;
+ id = mf->TargetID;
+ mptscsih_freeChainBuffers(ioc, ii);
+ mpt_free_msg_frame(ioc, (MPT_FRAME_HDR *)mf);
+ if ((unsigned char *)mf != sc->host_scribble)
+ continue;
+ scsi_dma_unmap(sc);
+ sc->result = DID_RESET << 16;
+ sc->host_scribble = NULL;
+ dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
+ "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
+ "idx=%x\n", ioc->name, channel, id, sc, mf, ii));
+ sc->scsi_done(sc);
+ }
+}
+
+/*
+ * mptscsih_search_running_cmds - Delete any commands associated
+ * with the specified target and lun. Function called only
+ * when a lun is disable by mid-layer.
+ * Do NOT access the referenced scsi_cmnd structure or
+ * members. Will cause either a paging or NULL ptr error.
+ * (BUT, BUT, BUT, the code does reference it! - mdr)
+ * @hd: Pointer to a SCSI HOST structure
+ * @vdevice: per device private data
+ *
+ * Returns: None.
+ *
+ * Called from slave_destroy.
+ */
+static void
+mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
+{
+ SCSIIORequest_t *mf = NULL;
+ int ii;
+ struct scsi_cmnd *sc;
+ struct scsi_lun lun;
+ MPT_ADAPTER *ioc = hd->ioc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (ii = 0; ii < ioc->req_depth; ii++) {
+ if ((sc = ioc->ScsiLookup[ii]) != NULL) {
+
+ mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(ioc, ii);
+ if (mf == NULL)
+ continue;
+ /* If the device is a hidden raid component, then its
+ * expected that the mf->function will be RAID_SCSI_IO
+ */
+ if (vdevice->vtarget->tflags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT && mf->Function !=
+ MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)
+ continue;
+
+ int_to_scsilun(vdevice->lun, &lun);
+ if ((mf->Bus != vdevice->vtarget->channel) ||
+ (mf->TargetID != vdevice->vtarget->id) ||
+ memcmp(lun.scsi_lun, mf->LUN, 8))
+ continue;
+
+ if ((unsigned char *)mf != sc->host_scribble)
+ continue;
+ ioc->ScsiLookup[ii] = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ mptscsih_freeChainBuffers(ioc, ii);
+ mpt_free_msg_frame(ioc, (MPT_FRAME_HDR *)mf);
+ scsi_dma_unmap(sc);
+ sc->host_scribble = NULL;
+ sc->result = DID_NO_CONNECT << 16;
+ dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
+ MYIOC_s_FMT "completing cmds: fw_channel %d, "
+ "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
+ vdevice->vtarget->channel, vdevice->vtarget->id,
+ sc, mf, ii));
+ sc->scsi_done(sc);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_report_queue_full - Report QUEUE_FULL status returned
+ * from a SCSI target device.
+ * @sc: Pointer to scsi_cmnd structure
+ * @pScsiReply: Pointer to SCSIIOReply_t
+ * @pScsiReq: Pointer to original SCSI request
+ *
+ * This routine periodically reports QUEUE_FULL status returned from a
+ * SCSI target device. It reports this to the console via kernel
+ * printk() API call, not more than once every 10 seconds.
+ */
+static void
+mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq)
+{
+ long time = jiffies;
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+
+ if (sc->device == NULL)
+ return;
+ if (sc->device->host == NULL)
+ return;
+ if ((hd = shost_priv(sc->device->host)) == NULL)
+ return;
+ ioc = hd->ioc;
+ if (time - hd->last_queue_full > 10 * HZ) {
+ dprintk(ioc, printk(MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n",
+ ioc->name, 0, sc->device->id, sc->device->lun));
+ hd->last_queue_full = time;
+ }
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_remove - Removed scsi devices
+ * @pdev: Pointer to pci_dev structure
+ *
+ *
+ */
+void
+mptscsih_remove(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct Scsi_Host *host = ioc->sh;
+ MPT_SCSI_HOST *hd;
+ int sz1;
+
+ scsi_remove_host(host);
+
+ if((hd = shost_priv(host)) == NULL)
+ return;
+
+ mptscsih_shutdown(pdev);
+
+ sz1=0;
+
+ if (ioc->ScsiLookup != NULL) {
+ sz1 = ioc->req_depth * sizeof(void *);
+ kfree(ioc->ScsiLookup);
+ ioc->ScsiLookup = NULL;
+ }
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Free'd ScsiLookup (%d) memory\n",
+ ioc->name, sz1));
+
+ kfree(hd->info_kbuf);
+
+ /* NULL the Scsi_Host pointer
+ */
+ ioc->sh = NULL;
+
+ scsi_host_put(host);
+
+ mpt_detach(pdev);
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_shutdown - reboot notifier
+ *
+ */
+void
+mptscsih_shutdown(struct pci_dev *pdev)
+{
+}
+
+#ifdef CONFIG_PM
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_suspend - Fusion MPT scsi driver suspend routine.
+ *
+ *
+ */
+int
+mptscsih_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+ scsi_block_requests(ioc->sh);
+ flush_scheduled_work();
+ mptscsih_shutdown(pdev);
+ return mpt_suspend(pdev,state);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_resume - Fusion MPT scsi driver resume routine.
+ *
+ *
+ */
+int
+mptscsih_resume(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ int rc;
+
+ rc = mpt_resume(pdev);
+ scsi_unblock_requests(ioc->sh);
+ return rc;
+}
+
+#endif
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_info - Return information about MPT adapter
+ * @SChost: Pointer to Scsi_Host structure
+ *
+ * (linux scsi_host_template.info routine)
+ *
+ * Returns pointer to buffer where information was written.
+ */
+const char *
+mptscsih_info(struct Scsi_Host *SChost)
+{
+ MPT_SCSI_HOST *h;
+ int size = 0;
+
+ h = shost_priv(SChost);
+
+ if (h) {
+ if (h->info_kbuf == NULL)
+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+ return h->info_kbuf;
+ h->info_kbuf[0] = '\0';
+
+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
+ h->info_kbuf[size-1] = '\0';
+ }
+
+ return h->info_kbuf;
+}
+
+struct info_str {
+ char *buffer;
+ int length;
+ int offset;
+ int pos;
+};
+
+static void
+mptscsih_copy_mem_info(struct info_str *info, char *data, int len)
+{
+ if (info->pos + len > info->length)
+ len = info->length - info->pos;
+
+ if (info->pos + len < info->offset) {
+ info->pos += len;
+ return;
+ }
+
+ if (info->pos < info->offset) {
+ data += (info->offset - info->pos);
+ len -= (info->offset - info->pos);
+ }
+
+ if (len > 0) {
+ memcpy(info->buffer + info->pos, data, len);
+ info->pos += len;
+ }
+}
+
+static int
+mptscsih_copy_info(struct info_str *info, char *fmt, ...)
+{
+ va_list args;
+ char buf[81];
+ int len;
+
+ va_start(args, fmt);
+ len = vsprintf(buf, fmt, args);
+ va_end(args);
+
+ mptscsih_copy_mem_info(info, buf, len);
+ return len;
+}
+
+static int
+mptscsih_host_info(MPT_ADAPTER *ioc, char *pbuf, off_t offset, int len)
+{
+ struct info_str info;
+
+ info.buffer = pbuf;
+ info.length = len;
+ info.offset = offset;
+ info.pos = 0;
+
+ mptscsih_copy_info(&info, "%s: %s, ", ioc->name, ioc->prod_name);
+ mptscsih_copy_info(&info, "%s%08xh, ", MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word);
+ mptscsih_copy_info(&info, "Ports=%d, ", ioc->facts.NumberOfPorts);
+ mptscsih_copy_info(&info, "MaxQ=%d\n", ioc->req_depth);
+
+ return ((info.pos > info.offset) ? info.pos - info.offset : 0);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_proc_info - Return information about MPT adapter
+ * @host: scsi host struct
+ * @buffer: if write, user data; if read, buffer for user
+ * @start: returns the buffer address
+ * @offset: if write, 0; if read, the current offset into the buffer from
+ * the previous read.
+ * @length: if write, return length;
+ * @func: write = 1; read = 0
+ *
+ * (linux scsi_host_template.info routine)
+ */
+int
+mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
+ int length, int func)
+{
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+ int size = 0;
+
+ if (func) {
+ /*
+ * write is not supported
+ */
+ } else {
+ if (start)
+ *start = buffer;
+
+ size = mptscsih_host_info(ioc, buffer, offset, length);
+ }
+
+ return size;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define ADD_INDEX_LOG(req_ent) do { } while(0)
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
+ * @SCpnt: Pointer to scsi_cmnd structure
+ * @done: Pointer SCSI mid-layer IO completion function
+ *
+ * (linux scsi_host_template.queuecommand routine)
+ * This is the primary SCSI IO start routine. Create a MPI SCSIIORequest
+ * from a linux scsi_cmnd request and send it to the IOC.
+ *
+ * Returns 0. (rtn value discarded by linux scsi mid-layer)
+ */
+int
+mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ MPT_SCSI_HOST *hd;
+ MPT_FRAME_HDR *mf;
+ SCSIIORequest_t *pScsiReq;
+ VirtDevice *vdevice = SCpnt->device->hostdata;
+ u32 datalen;
+ u32 scsictl;
+ u32 scsidir;
+ u32 cmd_len;
+ int my_idx;
+ int ii;
+ MPT_ADAPTER *ioc;
+
+ hd = shost_priv(SCpnt->device->host);
+ ioc = hd->ioc;
+ SCpnt->scsi_done = done;
+
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
+ ioc->name, SCpnt, done));
+
+ if (ioc->taskmgmt_quiesce_io)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /*
+ * Put together a MPT SCSI request...
+ */
+ if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
+ dprintk(ioc, printk(MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n",
+ ioc->name));
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ pScsiReq = (SCSIIORequest_t *) mf;
+
+ my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+
+ ADD_INDEX_LOG(my_idx);
+
+ /* TUR's being issued with scsictl=0x02000000 (DATA_IN)!
+ * Seems we may receive a buffer (datalen>0) even when there
+ * will be no data transfer! GRRRRR...
+ */
+ if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
+ datalen = scsi_bufflen(SCpnt);
+ scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */
+ } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
+ datalen = scsi_bufflen(SCpnt);
+ scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */
+ } else {
+ datalen = 0;
+ scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER;
+ }
+
+ /* Default to untagged. Once a target structure has been allocated,
+ * use the Inquiry data to determine if device supports tagged.
+ */
+ if (vdevice
+ && (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_Q_YES)
+ && (SCpnt->device->tagged_supported)) {
+ scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
+ if (SCpnt->request && SCpnt->request->ioprio) {
+ if (((SCpnt->request->ioprio & 0x7) == 1) ||
+ !(SCpnt->request->ioprio & 0x7))
+ scsictl |= MPI_SCSIIO_CONTROL_HEADOFQ;
+ }
+ } else
+ scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
+
+
+ /* Use the above information to set up the message frame
+ */
+ pScsiReq->TargetID = (u8) vdevice->vtarget->id;
+ pScsiReq->Bus = vdevice->vtarget->channel;
+ pScsiReq->ChainOffset = 0;
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+ pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+ else
+ pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
+ pScsiReq->CDBLength = SCpnt->cmd_len;
+ pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+ pScsiReq->Reserved = 0;
+ pScsiReq->MsgFlags = mpt_msg_flags(ioc);
+ int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
+ pScsiReq->Control = cpu_to_le32(scsictl);
+
+ /*
+ * Write SCSI CDB into the message
+ */
+ cmd_len = SCpnt->cmd_len;
+ for (ii=0; ii < cmd_len; ii++)
+ pScsiReq->CDB[ii] = SCpnt->cmnd[ii];
+
+ for (ii=cmd_len; ii < 16; ii++)
+ pScsiReq->CDB[ii] = 0;
+
+ /* DataLength */
+ pScsiReq->DataLength = cpu_to_le32(datalen);
+
+ /* SenseBuffer low address */
+ pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
+ + (my_idx * MPT_SENSE_BUFFER_ALLOC));
+
+ /* Now add the SG list
+ * Always have a SGE even if null length.
+ */
+ if (datalen == 0) {
+ /* Add a NULL SGE */
+ ioc->add_sge((char *)&pScsiReq->SGL,
+ MPT_SGE_FLAGS_SSIMPLE_READ | 0,
+ (dma_addr_t) -1);
+ } else {
+ /* Add a 32 or 64 bit SGE */
+ if (mptscsih_AddSGE(ioc, SCpnt, pScsiReq, my_idx) != SUCCESS)
+ goto fail;
+ }
+
+ SCpnt->host_scribble = (unsigned char *)mf;
+ mptscsih_set_scsi_lookup(ioc, my_idx, SCpnt);
+
+ mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
+ ioc->name, SCpnt, mf, my_idx));
+ DBG_DUMP_REQUEST_FRAME(ioc, (u32 *)mf);
+ return 0;
+
+ fail:
+ mptscsih_freeChainBuffers(ioc, my_idx);
+ mpt_free_msg_frame(ioc, mf);
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_freeChainBuffers - Function to free chain buffers associated
+ * with a SCSI IO request
+ * @hd: Pointer to the MPT_SCSI_HOST instance
+ * @req_idx: Index of the SCSI IO request frame.
+ *
+ * Called if SG chain buffer allocation fails and mptscsih callbacks.
+ * No return.
+ */
+static void
+mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
+{
+ MPT_FRAME_HDR *chain;
+ unsigned long flags;
+ int chain_idx;
+ int next;
+
+ /* Get the first chain index and reset
+ * tracker state.
+ */
+ chain_idx = ioc->ReqToChain[req_idx];
+ ioc->ReqToChain[req_idx] = MPT_HOST_NO_CHAIN;
+
+ while (chain_idx != MPT_HOST_NO_CHAIN) {
+
+ /* Save the next chain buffer index */
+ next = ioc->ChainToChain[chain_idx];
+
+ /* Free this chain buffer and reset
+ * tracker
+ */
+ ioc->ChainToChain[chain_idx] = MPT_HOST_NO_CHAIN;
+
+ chain = (MPT_FRAME_HDR *) (ioc->ChainBuffer
+ + (chain_idx * ioc->req_sz));
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+ list_add_tail(&chain->u.frame.linkage.list, &ioc->FreeChainQ);
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FreeChainBuffers (index %d)\n",
+ ioc->name, chain_idx));
+
+ /* handle next */
+ chain_idx = next;
+ }
+ return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Reset Handling
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_IssueTaskMgmt - Generic send Task Management function.
+ * @hd: Pointer to MPT_SCSI_HOST structure
+ * @type: Task Management type
+ * @channel: channel number for task management
+ * @id: Logical Target ID for reset (if appropriate)
+ * @lun: Logical Unit for reset (if appropriate)
+ * @ctx2abort: Context for the task to be aborted (if appropriate)
+ * @timeout: timeout for task management control
+ *
+ * Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
+ * or a non-interrupt thread. In the former, must not call schedule().
+ *
+ * Not all fields are meaningfull for all task types.
+ *
+ * Returns 0 for SUCCESS, or FAILED.
+ *
+ **/
+int
+mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
+ int ctx2abort, ulong timeout)
+{
+ MPT_FRAME_HDR *mf;
+ SCSITaskMgmt_t *pScsiTm;
+ int ii;
+ int retval;
+ MPT_ADAPTER *ioc = hd->ioc;
+ unsigned long timeleft;
+ u8 issue_hard_reset;
+ u32 ioc_raw_state;
+ unsigned long time_count;
+
+ issue_hard_reset = 0;
+ ioc_raw_state = mpt_GetIocState(ioc, 0);
+
+ if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
+ printk(MYIOC_s_WARN_FMT
+ "TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
+ ioc->name, type, ioc_raw_state);
+ printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
+ ioc->name, __func__);
+ if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
+ printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
+ "FAILED!!\n", ioc->name);
+ return 0;
+ }
+
+ if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
+ printk(MYIOC_s_WARN_FMT
+ "TaskMgmt type=%x: ioc_state: "
+ "DOORBELL_ACTIVE (0x%x)!\n",
+ ioc->name, type, ioc_raw_state);
+ return FAILED;
+ }
+
+ mutex_lock(&ioc->taskmgmt_cmds.mutex);
+ if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+ mf = NULL;
+ retval = FAILED;
+ goto out;
+ }
+
+ /* Return Fail to calling function if no message frames available.
+ */
+ if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt no msg frames!!\n", ioc->name));
+ retval = FAILED;
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ goto out;
+ }
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
+ ioc->name, mf));
+
+ /* Format the Request
+ */
+ pScsiTm = (SCSITaskMgmt_t *) mf;
+ pScsiTm->TargetID = id;
+ pScsiTm->Bus = channel;
+ pScsiTm->ChainOffset = 0;
+ pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+
+ pScsiTm->Reserved = 0;
+ pScsiTm->TaskType = type;
+ pScsiTm->Reserved1 = 0;
+ pScsiTm->MsgFlags = (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS)
+ ? MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION : 0;
+
+ int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
+
+ for (ii=0; ii < 7; ii++)
+ pScsiTm->Reserved2[ii] = 0;
+
+ pScsiTm->TaskMsgContext = ctx2abort;
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
+ "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
+ type, timeout));
+
+ DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
+
+ INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ time_count = jiffies;
+ if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
+ (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
+ mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
+ else {
+ retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
+ sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
+ if (retval) {
+ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
+ ioc->name, mf, retval));
+ mpt_free_msg_frame(ioc, mf);
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ goto out;
+ }
+ }
+
+ timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+ timeout*HZ);
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ retval = FAILED;
+ dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ issue_hard_reset = 1;
+ goto out;
+ }
+
+ retval = mptscsih_taskmgmt_reply(ioc, type,
+ (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt completed (%d seconds)\n",
+ ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
+
+ out:
+
+ CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+ if (issue_hard_reset) {
+ printk(MYIOC_s_WARN_FMT
+ "Issuing Reset from %s!! doorbell=0x%08x\n",
+ ioc->name, __func__, mpt_GetIocState(ioc, 0));
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+
+ retval = (retval == 0) ? 0 : FAILED;
+ mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+ return retval;
+}
+EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
+
+static int
+mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
+{
+ switch (ioc->bus_type) {
+ case FC:
+ return 40;
+ case SAS:
+ return 30;
+ case SPI:
+ default:
+ return 10;
+ }
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_abort - Abort linux scsi_cmnd routine, new_eh variant
+ * @SCpnt: Pointer to scsi_cmnd structure, IO to be aborted
+ *
+ * (linux scsi_host_template.eh_abort_handler routine)
+ *
+ * Returns SUCCESS or FAILED.
+ **/
+int
+mptscsih_abort(struct scsi_cmnd * SCpnt)
+{
+ MPT_SCSI_HOST *hd;
+ MPT_FRAME_HDR *mf;
+ u32 ctx2abort;
+ int scpnt_idx;
+ int retval;
+ VirtDevice *vdevice;
+ MPT_ADAPTER *ioc;
+
+ /* If we can't locate our host adapter structure, return FAILED status.
+ */
+ if ((hd = shost_priv(SCpnt->device->host)) == NULL) {
+ SCpnt->result = DID_RESET << 16;
+ SCpnt->scsi_done(SCpnt);
+ printk(KERN_ERR MYNAM ": task abort: "
+ "can't locate host! (sc=%p)\n", SCpnt);
+ return FAILED;
+ }
+
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting task abort! (sc=%p)\n",
+ ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
+
+ vdevice = SCpnt->device->hostdata;
+ if (!vdevice || !vdevice->vtarget) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: device has been deleted (sc=%p)\n",
+ ioc->name, SCpnt));
+ SCpnt->result = DID_NO_CONNECT << 16;
+ SCpnt->scsi_done(SCpnt);
+ retval = SUCCESS;
+ goto out;
+ }
+
+ /* Task aborts are not supported for hidden raid components.
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: hidden raid component (sc=%p)\n",
+ ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
+ /* Task aborts are not supported for volumes.
+ */
+ if (vdevice->vtarget->raidVolume) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: raid volume (sc=%p)\n",
+ ioc->name, SCpnt));
+ SCpnt->result = DID_RESET << 16;
+ retval = FAILED;
+ goto out;
+ }
+
+ /* Find this command
+ */
+ if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(ioc, SCpnt)) < 0) {
+ /* Cmd not found in ScsiLookup.
+ * Do OS callback.
+ */
+ SCpnt->result = DID_RESET << 16;
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
+ "Command not in the active list! (sc=%p)\n", ioc->name,
+ SCpnt));
+ retval = SUCCESS;
+ goto out;
+ }
+
+ if (ioc->timeouts < -1)
+ ioc->timeouts++;
+
+ if (mpt_fwfault_debug)
+ mpt_halt_firmware(ioc);
+
+ /* Most important! Set TaskMsgContext to SCpnt's MsgContext!
+ * (the IO to be ABORT'd)
+ *
+ * NOTE: Since we do not byteswap MsgContext, we do not
+ * swap it here either. It is an opaque cookie to
+ * the controller, so it does not matter. -DaveM
+ */
+ mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx);
+ ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
+ retval = mptscsih_IssueTaskMgmt(hd,
+ MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ vdevice->vtarget->channel,
+ vdevice->vtarget->id, vdevice->lun,
+ ctx2abort, mptscsih_get_tm_timeout(ioc));
+
+ if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx) {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: command still in active list! (sc=%p)\n",
+ ioc->name, SCpnt));
+ retval = FAILED;
+ } else {
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "task abort: command cleared from active list! (sc=%p)\n",
+ ioc->name, SCpnt));
+ retval = SUCCESS;
+ }
+
+ out:
+ printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p)\n",
+ ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
+ SCpnt);
+
+ return retval;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_dev_reset - Perform a SCSI TARGET_RESET! new_eh variant
+ * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
+ *
+ * (linux scsi_host_template.eh_dev_reset_handler routine)
+ *
+ * Returns SUCCESS or FAILED.
+ **/
+int
+mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
+{
+ MPT_SCSI_HOST *hd;
+ int retval;
+ VirtDevice *vdevice;
+ MPT_ADAPTER *ioc;
+
+ /* If we can't locate our host adapter structure, return FAILED status.
+ */
+ if ((hd = shost_priv(SCpnt->device->host)) == NULL){
+ printk(KERN_ERR MYNAM ": target reset: "
+ "Can't locate host! (sc=%p)\n", SCpnt);
+ return FAILED;
+ }
+
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting target reset! (sc=%p)\n",
+ ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
+
+ vdevice = SCpnt->device->hostdata;
+ if (!vdevice || !vdevice->vtarget) {
+ retval = 0;
+ goto out;
+ }
+
+ /* Target reset to hidden raid component is not supported
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ retval = FAILED;
+ goto out;
+ }
+
+ retval = mptscsih_IssueTaskMgmt(hd,
+ MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+ vdevice->vtarget->channel,
+ vdevice->vtarget->id, 0, 0,
+ mptscsih_get_tm_timeout(ioc));
+
+ out:
+ printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+ if (retval == 0)
+ return SUCCESS;
+ else
+ return FAILED;
+}
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_bus_reset - Perform a SCSI BUS_RESET! new_eh variant
+ * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
+ *
+ * (linux scsi_host_template.eh_bus_reset_handler routine)
+ *
+ * Returns SUCCESS or FAILED.
+ **/
+int
+mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
+{
+ MPT_SCSI_HOST *hd;
+ int retval;
+ VirtDevice *vdevice;
+ MPT_ADAPTER *ioc;
+
+ /* If we can't locate our host adapter structure, return FAILED status.
+ */
+ if ((hd = shost_priv(SCpnt->device->host)) == NULL){
+ printk(KERN_ERR MYNAM ": bus reset: "
+ "Can't locate host! (sc=%p)\n", SCpnt);
+ return FAILED;
+ }
+
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting bus reset! (sc=%p)\n",
+ ioc->name, SCpnt);
+ scsi_print_command(SCpnt);
+
+ if (ioc->timeouts < -1)
+ ioc->timeouts++;
+
+ vdevice = SCpnt->device->hostdata;
+ if (!vdevice || !vdevice->vtarget)
+ return SUCCESS;
+ retval = mptscsih_IssueTaskMgmt(hd,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ vdevice->vtarget->channel, 0, 0, 0,
+ mptscsih_get_tm_timeout(ioc));
+
+ printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+ if (retval == 0)
+ return SUCCESS;
+ else
+ return FAILED;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_host_reset - Perform a SCSI host adapter RESET (new_eh variant)
+ * @SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
+ *
+ * (linux scsi_host_template.eh_host_reset_handler routine)
+ *
+ * Returns SUCCESS or FAILED.
+ */
+int
+mptscsih_host_reset(struct scsi_cmnd *SCpnt)
+{
+ MPT_SCSI_HOST * hd;
+ int status = SUCCESS;
+ MPT_ADAPTER *ioc;
+ int retval;
+
+ /* If we can't locate the host to reset, then we failed. */
+ if ((hd = shost_priv(SCpnt->device->host)) == NULL){
+ printk(KERN_ERR MYNAM ": host reset: "
+ "Can't locate host! (sc=%p)\n", SCpnt);
+ return FAILED;
+ }
+
+ /* make sure we have no outstanding commands at this stage */
+ mptscsih_flush_running_cmds(hd);
+
+ ioc = hd->ioc;
+ printk(MYIOC_s_INFO_FMT "attempting host reset! (sc=%p)\n",
+ ioc->name, SCpnt);
+
+ /* If our attempts to reset the host failed, then return a failed
+ * status. The host will be taken off line by the SCSI mid-layer.
+ */
+ retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ if (retval < 0)
+ status = FAILED;
+ else
+ status = SUCCESS;
+
+ printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
+ ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+
+ return status;
+}
+
+static int
+mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
+ SCSITaskMgmtReply_t *pScsiTmReply)
+{
+ u16 iocstatus;
+ u32 termination_count;
+ int retval;
+
+ if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+ retval = FAILED;
+ goto out;
+ }
+
+ DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
+
+ iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+ termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
+
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
+ "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
+ "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
+ pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
+ le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
+ termination_count));
+
+ if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
+ pScsiTmReply->ResponseCode)
+ mptscsih_taskmgmt_response_code(ioc,
+ pScsiTmReply->ResponseCode);
+
+ if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
+ retval = 0;
+ goto out;
+ }
+
+ retval = FAILED;
+ if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ if (termination_count == 1)
+ retval = 0;
+ goto out;
+ }
+
+ if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
+ iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
+ retval = 0;
+
+ out:
+ return retval;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+void
+mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
+{
+ char *desc;
+
+ switch (response_code) {
+ case MPI_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "The task completed.";
+ break;
+ case MPI_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "The IOC received an invalid frame status.";
+ break;
+ case MPI_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "The task type is not supported.";
+ break;
+ case MPI_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "The requested task failed.";
+ break;
+ case MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "The task completed successfully.";
+ break;
+ case MPI_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "The LUN request is invalid.";
+ break;
+ case MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "The task is in the IOC queue and has not been sent to target.";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
+ ioc->name, response_code, desc);
+}
+EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @mf: Pointer to SCSI task mgmt request frame
+ * @mr: Pointer to SCSI task mgmt reply frame
+ *
+ * This routine is called from mptbase.c::mpt_interrupt() at the completion
+ * of any SCSI task management request.
+ * This routine is registered with the MPT (base) driver at driver
+ * load/init time via the mpt_register() API call.
+ *
+ * Returns 1 indicating alloc'd request frame ptr should be freed.
+ **/
+int
+mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
+ MPT_FRAME_HDR *mr)
+{
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
+
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+
+ if (!mr)
+ goto out;
+
+ ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->taskmgmt_cmds.reply, mr,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
+ out:
+ if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ mpt_clear_taskmgmt_in_progress_flag(ioc);
+ ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->taskmgmt_cmds.done);
+ if (ioc->bus_type == SAS)
+ ioc->schedule_target_reset(ioc);
+ return 1;
+ }
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This is anyones guess quite frankly.
+ */
+int
+mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders,dummy);
+
+ /*
+ * Handle extended translation size for logical drives
+ * > 1Gb
+ */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders,dummy);
+ }
+
+ /* return result */
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+/* Search IOC page 3 to determine if this is hidden physical disk
+ *
+ */
+int
+mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ struct inactive_raid_component_info *component_info;
+ int i, j;
+ RaidPhysDiskPage1_t *phys_disk;
+ int rc = 0;
+ int num_paths;
+
+ if (!ioc->raid_data.pIocPg3)
+ goto out;
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) &&
+ (channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) {
+ rc = 1;
+ goto out;
+ }
+ }
+
+ if (ioc->bus_type != SAS)
+ goto out;
+
+ /*
+ * Check if dual path
+ */
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
+ if (num_paths < 2)
+ continue;
+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+ (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+ if (!phys_disk)
+ continue;
+ if ((mpt_raid_phys_disk_pg1(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
+ phys_disk))) {
+ kfree(phys_disk);
+ continue;
+ }
+ for (j = 0; j < num_paths; j++) {
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_INVALID))
+ continue;
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_BROKEN))
+ continue;
+ if ((id == phys_disk->Path[j].PhysDiskID) &&
+ (channel == phys_disk->Path[j].PhysDiskBus)) {
+ rc = 1;
+ kfree(phys_disk);
+ goto out;
+ }
+ }
+ kfree(phys_disk);
+ }
+
+
+ /*
+ * Check inactive list for matching phys disks
+ */
+ if (list_empty(&ioc->raid_data.inactive_list))
+ goto out;
+
+ mutex_lock(&ioc->raid_data.inactive_list_mutex);
+ list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
+ list) {
+ if ((component_info->d.PhysDiskID == id) &&
+ (component_info->d.PhysDiskBus == channel))
+ rc = 1;
+ }
+ mutex_unlock(&ioc->raid_data.inactive_list_mutex);
+
+ out:
+ return rc;
+}
+EXPORT_SYMBOL(mptscsih_is_phys_disk);
+
+u8
+mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+ struct inactive_raid_component_info *component_info;
+ int i, j;
+ RaidPhysDiskPage1_t *phys_disk;
+ int rc = -ENXIO;
+ int num_paths;
+
+ if (!ioc->raid_data.pIocPg3)
+ goto out;
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ if ((id == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskID) &&
+ (channel == ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskBus)) {
+ rc = ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum;
+ goto out;
+ }
+ }
+
+ if (ioc->bus_type != SAS)
+ goto out;
+
+ /*
+ * Check if dual path
+ */
+ for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+ num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
+ if (num_paths < 2)
+ continue;
+ phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+ (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+ if (!phys_disk)
+ continue;
+ if ((mpt_raid_phys_disk_pg1(ioc,
+ ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
+ phys_disk))) {
+ kfree(phys_disk);
+ continue;
+ }
+ for (j = 0; j < num_paths; j++) {
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_INVALID))
+ continue;
+ if ((phys_disk->Path[j].Flags &
+ MPI_RAID_PHYSDISK1_FLAG_BROKEN))
+ continue;
+ if ((id == phys_disk->Path[j].PhysDiskID) &&
+ (channel == phys_disk->Path[j].PhysDiskBus)) {
+ rc = phys_disk->PhysDiskNum;
+ kfree(phys_disk);
+ goto out;
+ }
+ }
+ kfree(phys_disk);
+ }
+
+ /*
+ * Check inactive list for matching phys disks
+ */
+ if (list_empty(&ioc->raid_data.inactive_list))
+ goto out;
+
+ mutex_lock(&ioc->raid_data.inactive_list_mutex);
+ list_for_each_entry(component_info, &ioc->raid_data.inactive_list,
+ list) {
+ if ((component_info->d.PhysDiskID == id) &&
+ (component_info->d.PhysDiskBus == channel))
+ rc = component_info->d.PhysDiskNum;
+ }
+ mutex_unlock(&ioc->raid_data.inactive_list_mutex);
+
+ out:
+ return rc;
+}
+EXPORT_SYMBOL(mptscsih_raid_id_to_num);
+
+/*
+ * OS entry point to allow for host driver to free allocated memory
+ * Called if no device present or device being unloaded
+ */
+void
+mptscsih_slave_destroy(struct scsi_device *sdev)
+{
+ struct Scsi_Host *host = sdev->host;
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ VirtTarget *vtarget;
+ VirtDevice *vdevice;
+ struct scsi_target *starget;
+
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ vdevice = sdev->hostdata;
+ if (!vdevice)
+ return;
+
+ mptscsih_search_running_cmds(hd, vdevice);
+ vtarget->num_luns--;
+ mptscsih_synchronize_cache(hd, vdevice);
+ kfree(vdevice);
+ sdev->hostdata = NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_change_queue_depth - This function will set a devices queue depth
+ * @sdev: per scsi_device pointer
+ * @qdepth: requested queue depth
+ * @reason: calling context
+ *
+ * Adding support for new 'change_queue_depth' api.
+*/
+int
+mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+ MPT_SCSI_HOST *hd = shost_priv(sdev->host);
+ VirtTarget *vtarget;
+ struct scsi_target *starget;
+ int max_depth;
+ int tagged;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+
+ if (reason != SCSI_QDEPTH_DEFAULT)
+ return -EOPNOTSUPP;
+
+ if (ioc->bus_type == SPI) {
+ if (!(vtarget->tflags & MPT_TARGET_FLAGS_Q_YES))
+ max_depth = 1;
+ else if (sdev->type == TYPE_DISK &&
+ vtarget->minSyncFactor <= MPT_ULTRA160)
+ max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
+ else
+ max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
+ } else
+ max_depth = ioc->sh->can_queue;
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ if (qdepth == 1)
+ tagged = 0;
+ else
+ tagged = MSG_SIMPLE_TAG;
+
+ scsi_adjust_queue_depth(sdev, tagged, qdepth);
+ return sdev->queue_depth;
+}
+
+/*
+ * OS entry point to adjust the queue_depths on a per-device basis.
+ * Called once per device the bus scan. Use it to force the queue_depth
+ * member to 1 if a device does not support Q tags.
+ * Return non-zero if fails.
+ */
+int
+mptscsih_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *sh = sdev->host;
+ VirtTarget *vtarget;
+ VirtDevice *vdevice;
+ struct scsi_target *starget;
+ MPT_SCSI_HOST *hd = shost_priv(sh);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ vdevice = sdev->hostdata;
+
+ dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "device @ %p, channel=%d, id=%d, lun=%d\n",
+ ioc->name, sdev, sdev->channel, sdev->id, sdev->lun));
+ if (ioc->bus_type == SPI)
+ dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "sdtr %d wdtr %d ppr %d inq length=%d\n",
+ ioc->name, sdev->sdtr, sdev->wdtr,
+ sdev->ppr, sdev->inquiry_len));
+
+ vdevice->configured_lun = 1;
+
+ dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Queue depth=%d, tflags=%x\n",
+ ioc->name, sdev->queue_depth, vtarget->tflags));
+
+ if (ioc->bus_type == SPI)
+ dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
+ ioc->name, vtarget->negoFlags, vtarget->maxOffset,
+ vtarget->minSyncFactor));
+
+ mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH,
+ SCSI_QDEPTH_DEFAULT);
+ dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "tagged %d, simple %d, ordered %d\n",
+ ioc->name,sdev->tagged_supported, sdev->simple_tags,
+ sdev->ordered_tags));
+
+ blk_queue_dma_alignment (sdev->request_queue, 512 - 1);
+
+ return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Private routines...
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Utility function to copy sense data from the scsi_cmnd buffer
+ * to the FC and SCSI target structures.
+ *
+ */
+static void
+mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply)
+{
+ VirtDevice *vdevice;
+ SCSIIORequest_t *pReq;
+ u32 sense_count = le32_to_cpu(pScsiReply->SenseCount);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ /* Get target structure
+ */
+ pReq = (SCSIIORequest_t *) mf;
+ vdevice = sc->device->hostdata;
+
+ if (sense_count) {
+ u8 *sense_data;
+ int req_index;
+
+ /* Copy the sense received into the scsi command block. */
+ req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+ sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
+ memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
+
+ /* Log SMART data (asc = 0x5D, non-IM case only) if required.
+ */
+ if ((ioc->events) && (ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) {
+ if ((sense_data[12] == 0x5D) && (vdevice->vtarget->raidVolume == 0)) {
+ int idx;
+
+ idx = ioc->eventContext % MPTCTL_EVENT_LOG_SIZE;
+ ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
+ ioc->events[idx].eventContext = ioc->eventContext;
+
+ ioc->events[idx].data[0] = (pReq->LUN[1] << 24) |
+ (MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) |
+ (sc->device->channel << 8) | sc->device->id;
+
+ ioc->events[idx].data[1] = (sense_data[13] << 8) | sense_data[12];
+
+ ioc->eventContext++;
+ if (ioc->pcidev->vendor ==
+ PCI_VENDOR_ID_IBM) {
+ mptscsih_issue_sep_command(ioc,
+ vdevice->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ vdevice->vtarget->tflags |=
+ MPT_TARGET_FLAGS_LED_ON;
+ }
+ }
+ }
+ } else {
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Hmmm... SenseData len=0! (?)\n",
+ ioc->name));
+ }
+}
+
+/**
+ * mptscsih_get_scsi_lookup - retrieves scmd entry
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @i: index into the array
+ *
+ * Returns the scsi_cmd pointer
+ */
+struct scsi_cmnd *
+mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->ScsiLookup[i];
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
+
+/**
+ * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @i: index into the array
+ *
+ * Returns the scsi_cmd pointer
+ *
+ **/
+static struct scsi_cmnd *
+mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->ScsiLookup[i];
+ ioc->ScsiLookup[i] = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+
+/**
+ * mptscsih_set_scsi_lookup - write a scmd entry into the ScsiLookup[] array list
+ *
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @i: index into the array
+ * @scmd: scsi_cmnd pointer
+ *
+ **/
+static void
+mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ ioc->ScsiLookup[i] = scmd;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+}
+
+/**
+ * SCPNT_TO_LOOKUP_IDX - searches for a given scmd in the ScsiLookup[] array list
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sc: scsi_cmnd pointer
+ */
+static int
+SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *sc)
+{
+ unsigned long flags;
+ int i, index=-1;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = 0; i < ioc->req_depth; i++) {
+ if (ioc->ScsiLookup[i] == sc) {
+ index = i;
+ goto out;
+ }
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return index;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+int
+mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ MPT_SCSI_HOST *hd;
+
+ if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
+ return 0;
+
+ hd = shost_priv(ioc->sh);
+ switch (reset_phase) {
+ case MPT_IOC_SETUP_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+ break;
+ case MPT_IOC_PRE_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+ mptscsih_flush_running_cmds(hd);
+ break;
+ case MPT_IOC_POST_RESET:
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+ if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
+ ioc->internal_cmds.status |=
+ MPT_MGMT_STATUS_DID_IOCRESET;
+ complete(&ioc->internal_cmds.done);
+ }
+ break;
+ default:
+ break;
+ }
+ return 1; /* currently means nothing really */
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+int
+mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
+{
+ u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
+
+ devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "MPT event (=%02Xh) routed to SCSI host driver!\n",
+ ioc->name, event));
+
+ if ((event == MPI_EVENT_IOC_BUS_RESET ||
+ event == MPI_EVENT_EXT_BUS_RESET) &&
+ (ioc->bus_type == SPI) && (ioc->soft_resets < -1))
+ ioc->soft_resets++;
+
+ return 1; /* currently means nothing really */
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Bus Scan and Domain Validation functionality ...
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptscsih_scandv_complete - Scan and DV callback routine registered
+ * to Fustion MPT (base) driver.
+ *
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @mf: Pointer to original MPT request frame
+ * @mr: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+ * This routine is called from mpt.c::mpt_interrupt() at the completion
+ * of any SCSI IO request.
+ * This routine is registered with the Fusion MPT (base) driver at driver
+ * load/init time via the mpt_register() API call.
+ *
+ * Returns 1 indicating alloc'd request frame ptr should be freed.
+ *
+ * Remark: Sets a completion code and (possibly) saves sense data
+ * in the IOC member localReply structure.
+ * Used ONLY for DV and other internal commands.
+ */
+int
+mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+ MPT_FRAME_HDR *reply)
+{
+ SCSIIORequest_t *pReq;
+ SCSIIOReply_t *pReply;
+ u8 cmd;
+ u16 req_idx;
+ u8 *sense_data;
+ int sz;
+
+ ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+ ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
+ if (!reply)
+ goto out;
+
+ pReply = (SCSIIOReply_t *) reply;
+ pReq = (SCSIIORequest_t *) req;
+ ioc->internal_cmds.completion_code =
+ mptscsih_get_completion_code(ioc, req, reply);
+ ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+ memcpy(ioc->internal_cmds.reply, reply,
+ min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
+ cmd = reply->u.hdr.Function;
+ if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+ (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
+ (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
+ req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
+ sense_data = ((u8 *)ioc->sense_buf_pool +
+ (req_idx * MPT_SENSE_BUFFER_ALLOC));
+ sz = min_t(int, pReq->SenseBufferLength,
+ MPT_SENSE_BUFFER_ALLOC);
+ memcpy(ioc->internal_cmds.sense, sense_data, sz);
+ }
+ out:
+ if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
+ return 0;
+ ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+ complete(&ioc->internal_cmds.done);
+ return 1;
+}
+
+
+/**
+ * mptscsih_get_completion_code - get completion code from MPT request
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @req: Pointer to original MPT request frame
+ * @reply: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+ **/
+static int
+mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+ MPT_FRAME_HDR *reply)
+{
+ SCSIIOReply_t *pReply;
+ MpiRaidActionReply_t *pr;
+ u8 scsi_status;
+ u16 status;
+ int completion_code;
+
+ pReply = (SCSIIOReply_t *)reply;
+ status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+ scsi_status = pReply->SCSIStatus;
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
+ "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
+ scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
+
+ switch (status) {
+
+ case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
+ completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
+ case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
+ case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
+ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
+ completion_code = MPT_SCANDV_DID_RESET;
+ break;
+
+ case MPI_IOCSTATUS_BUSY:
+ case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ completion_code = MPT_SCANDV_BUSY;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
+ case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
+ case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
+ if (pReply->Function == MPI_FUNCTION_CONFIG) {
+ completion_code = MPT_SCANDV_GOOD;
+ } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
+ pr = (MpiRaidActionReply_t *)reply;
+ if (le16_to_cpu(pr->ActionStatus) ==
+ MPI_RAID_ACTION_ASTATUS_SUCCESS)
+ completion_code = MPT_SCANDV_GOOD;
+ else
+ completion_code = MPT_SCANDV_SOME_ERROR;
+ } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
+ completion_code = MPT_SCANDV_SENSE;
+ else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
+ if (req->u.scsireq.CDB[0] == INQUIRY)
+ completion_code = MPT_SCANDV_ISSUE_SENSE;
+ else
+ completion_code = MPT_SCANDV_DID_RESET;
+ } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
+ completion_code = MPT_SCANDV_DID_RESET;
+ else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+ completion_code = MPT_SCANDV_DID_RESET;
+ else if (scsi_status == MPI_SCSI_STATUS_BUSY)
+ completion_code = MPT_SCANDV_BUSY;
+ else
+ completion_code = MPT_SCANDV_GOOD;
+ break;
+
+ case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
+ if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+ completion_code = MPT_SCANDV_DID_RESET;
+ else
+ completion_code = MPT_SCANDV_SOME_ERROR;
+ break;
+ default:
+ completion_code = MPT_SCANDV_SOME_ERROR;
+ break;
+
+ } /* switch(status) */
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ " completionCode set to %08xh\n", ioc->name, completion_code));
+ return completion_code;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_do_cmd - Do internal command.
+ * @hd: MPT_SCSI_HOST pointer
+ * @io: INTERNAL_CMD pointer.
+ *
+ * Issue the specified internally generated command and do command
+ * specific cleanup. For bus scan / DV only.
+ * NOTES: If command is Inquiry and status is good,
+ * initialize a target structure, save the data
+ *
+ * Remark: Single threaded access only.
+ *
+ * Return:
+ * < 0 if an illegal command or no resources
+ *
+ * 0 if good
+ *
+ * > 0 if command complete but some type of completion error.
+ */
+static int
+mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
+{
+ MPT_FRAME_HDR *mf;
+ SCSIIORequest_t *pScsiReq;
+ int my_idx, ii, dir;
+ int timeout;
+ char cmdLen;
+ char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+ u8 cmd = io->cmd;
+ MPT_ADAPTER *ioc = hd->ioc;
+ int ret = 0;
+ unsigned long timeleft;
+ unsigned long flags;
+
+ /* don't send internal command during diag reset */
+ spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+ if (ioc->ioc_reset_in_progress) {
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: busy with host reset\n", ioc->name, __func__));
+ return MPT_SCANDV_BUSY;
+ }
+ spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+ mutex_lock(&ioc->internal_cmds.mutex);
+
+ /* Set command specific information
+ */
+ switch (cmd) {
+ case INQUIRY:
+ cmdLen = 6;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ CDB[0] = cmd;
+ CDB[4] = io->size;
+ timeout = 10;
+ break;
+
+ case TEST_UNIT_READY:
+ cmdLen = 6;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ timeout = 10;
+ break;
+
+ case START_STOP:
+ cmdLen = 6;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ CDB[0] = cmd;
+ CDB[4] = 1; /*Spin up the disk */
+ timeout = 15;
+ break;
+
+ case REQUEST_SENSE:
+ cmdLen = 6;
+ CDB[0] = cmd;
+ CDB[4] = io->size;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ timeout = 10;
+ break;
+
+ case READ_BUFFER:
+ cmdLen = 10;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ CDB[0] = cmd;
+ if (io->flags & MPT_ICFLAG_ECHO) {
+ CDB[1] = 0x0A;
+ } else {
+ CDB[1] = 0x02;
+ }
+
+ if (io->flags & MPT_ICFLAG_BUF_CAP) {
+ CDB[1] |= 0x01;
+ }
+ CDB[6] = (io->size >> 16) & 0xFF;
+ CDB[7] = (io->size >> 8) & 0xFF;
+ CDB[8] = io->size & 0xFF;
+ timeout = 10;
+ break;
+
+ case WRITE_BUFFER:
+ cmdLen = 10;
+ dir = MPI_SCSIIO_CONTROL_WRITE;
+ CDB[0] = cmd;
+ if (io->flags & MPT_ICFLAG_ECHO) {
+ CDB[1] = 0x0A;
+ } else {
+ CDB[1] = 0x02;
+ }
+ CDB[6] = (io->size >> 16) & 0xFF;
+ CDB[7] = (io->size >> 8) & 0xFF;
+ CDB[8] = io->size & 0xFF;
+ timeout = 10;
+ break;
+
+ case RESERVE:
+ cmdLen = 6;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ CDB[0] = cmd;
+ timeout = 10;
+ break;
+
+ case RELEASE:
+ cmdLen = 6;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ CDB[0] = cmd;
+ timeout = 10;
+ break;
+
+ case SYNCHRONIZE_CACHE:
+ cmdLen = 10;
+ dir = MPI_SCSIIO_CONTROL_READ;
+ CDB[0] = cmd;
+// CDB[1] = 0x02; /* set immediate bit */
+ timeout = 10;
+ break;
+
+ default:
+ /* Error Case */
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Get and Populate a free Frame
+ * MsgContext set in mpt_get_msg_frame call
+ */
+ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
+ ioc->name, __func__));
+ ret = MPT_SCANDV_BUSY;
+ goto out;
+ }
+
+ pScsiReq = (SCSIIORequest_t *) mf;
+
+ /* Get the request index */
+ my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+ ADD_INDEX_LOG(my_idx); /* for debug */
+
+ if (io->flags & MPT_ICFLAG_PHYS_DISK) {
+ pScsiReq->TargetID = io->physDiskNum;
+ pScsiReq->Bus = 0;
+ pScsiReq->ChainOffset = 0;
+ pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+ } else {
+ pScsiReq->TargetID = io->id;
+ pScsiReq->Bus = io->channel;
+ pScsiReq->ChainOffset = 0;
+ pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
+ }
+
+ pScsiReq->CDBLength = cmdLen;
+ pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+
+ pScsiReq->Reserved = 0;
+
+ pScsiReq->MsgFlags = mpt_msg_flags(ioc);
+ /* MsgContext set in mpt_get_msg_fram call */
+
+ int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
+
+ if (io->flags & MPT_ICFLAG_TAGGED_CMD)
+ pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_SIMPLEQ);
+ else
+ pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
+
+ if (cmd == REQUEST_SENSE) {
+ pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
+ }
+
+ for (ii = 0; ii < 16; ii++)
+ pScsiReq->CDB[ii] = CDB[ii];
+
+ pScsiReq->DataLength = cpu_to_le32(io->size);
+ pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
+ + (my_idx * MPT_SENSE_BUFFER_ALLOC));
+
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
+ ioc->name, __func__, cmd, io->channel, io->id, io->lun));
+
+ if (dir == MPI_SCSIIO_CONTROL_READ)
+ ioc->add_sge((char *) &pScsiReq->SGL,
+ MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
+ else
+ ioc->add_sge((char *) &pScsiReq->SGL,
+ MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
+
+ INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
+ mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
+ timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
+ timeout*HZ);
+ if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = MPT_SCANDV_DID_RESET;
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
+ cmd));
+ if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+ mpt_free_msg_frame(ioc, mf);
+ goto out;
+ }
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT
+ "Issuing Reset from %s!! doorbell=0x%08xh"
+ " cmd=0x%02x\n",
+ ioc->name, __func__, mpt_GetIocState(ioc, 0),
+ cmd);
+ mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+ goto out;
+ }
+
+ ret = ioc->internal_cmds.completion_code;
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
+ ioc->name, __func__, ret));
+
+ out:
+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+ mutex_unlock(&ioc->internal_cmds.mutex);
+ return ret;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
+ * @hd: Pointer to a SCSI HOST structure
+ * @vdevice: virtual target device
+ *
+ * Uses the ISR, but with special processing.
+ * MUST be single-threaded.
+ *
+ */
+static void
+mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
+{
+ INTERNAL_CMD iocmd;
+
+ /* Ignore hidden raid components, this is handled when the command
+ * is sent to the volume
+ */
+ if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+ return;
+
+ if (vdevice->vtarget->type != TYPE_DISK || vdevice->vtarget->deleted ||
+ !vdevice->configured_lun)
+ return;
+
+ /* Following parameters will not change
+ * in this routine.
+ */
+ iocmd.cmd = SYNCHRONIZE_CACHE;
+ iocmd.flags = 0;
+ iocmd.physDiskNum = -1;
+ iocmd.data = NULL;
+ iocmd.data_dma = -1;
+ iocmd.size = 0;
+ iocmd.rsvd = iocmd.rsvd2 = 0;
+ iocmd.channel = vdevice->vtarget->channel;
+ iocmd.id = vdevice->vtarget->id;
+ iocmd.lun = vdevice->lun;
+
+ mptscsih_do_cmd(hd, &iocmd);
+}
+
+static ssize_t
+mptscsih_version_fw_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF);
+}
+static DEVICE_ATTR(version_fw, S_IRUGO, mptscsih_version_fw_show, NULL);
+
+static ssize_t
+mptscsih_version_bios_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ (ioc->biosVersion & 0xFF000000) >> 24,
+ (ioc->biosVersion & 0x00FF0000) >> 16,
+ (ioc->biosVersion & 0x0000FF00) >> 8,
+ ioc->biosVersion & 0x000000FF);
+}
+static DEVICE_ATTR(version_bios, S_IRUGO, mptscsih_version_bios_show, NULL);
+
+static ssize_t
+mptscsih_version_mpi_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%03x\n", ioc->facts.MsgVersion);
+}
+static DEVICE_ATTR(version_mpi, S_IRUGO, mptscsih_version_mpi_show, NULL);
+
+static ssize_t
+mptscsih_version_product_show(struct device *dev,
+ struct device_attribute *attr,
+char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->prod_name);
+}
+static DEVICE_ATTR(version_product, S_IRUGO,
+ mptscsih_version_product_show, NULL);
+
+static ssize_t
+mptscsih_version_nvdata_persistent_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02xh\n",
+ ioc->nvdata_version_persistent);
+}
+static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
+ mptscsih_version_nvdata_persistent_show, NULL);
+
+static ssize_t
+mptscsih_version_nvdata_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02xh\n",ioc->nvdata_version_default);
+}
+static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
+ mptscsih_version_nvdata_default_show, NULL);
+
+static ssize_t
+mptscsih_board_name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_name);
+}
+static DEVICE_ATTR(board_name, S_IRUGO, mptscsih_board_name_show, NULL);
+
+static ssize_t
+mptscsih_board_assembly_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_assembly);
+}
+static DEVICE_ATTR(board_assembly, S_IRUGO,
+ mptscsih_board_assembly_show, NULL);
+
+static ssize_t
+mptscsih_board_tracer_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ioc->board_tracer);
+}
+static DEVICE_ATTR(board_tracer, S_IRUGO,
+ mptscsih_board_tracer_show, NULL);
+
+static ssize_t
+mptscsih_io_delay_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
+}
+static DEVICE_ATTR(io_delay, S_IRUGO,
+ mptscsih_io_delay_show, NULL);
+
+static ssize_t
+mptscsih_device_delay_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
+}
+static DEVICE_ATTR(device_delay, S_IRUGO,
+ mptscsih_device_delay_show, NULL);
+
+static ssize_t
+mptscsih_debug_level_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->debug_level);
+}
+static ssize_t
+mptscsih_debug_level_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *host = class_to_shost(dev);
+ MPT_SCSI_HOST *hd = shost_priv(host);
+ MPT_ADAPTER *ioc = hd->ioc;
+ int val = 0;
+
+ if (sscanf(buf, "%x", &val) != 1)
+ return -EINVAL;
+
+ ioc->debug_level = val;
+ printk(MYIOC_s_INFO_FMT "debug_level=%08xh\n",
+ ioc->name, ioc->debug_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR(debug_level, S_IRUGO | S_IWUSR,
+ mptscsih_debug_level_show, mptscsih_debug_level_store);
+
+struct device_attribute *mptscsih_host_attrs[] = {
+ &dev_attr_version_fw,
+ &dev_attr_version_bios,
+ &dev_attr_version_mpi,
+ &dev_attr_version_product,
+ &dev_attr_version_nvdata_persistent,
+ &dev_attr_version_nvdata_default,
+ &dev_attr_board_name,
+ &dev_attr_board_assembly,
+ &dev_attr_board_tracer,
+ &dev_attr_io_delay,
+ &dev_attr_device_delay,
+ &dev_attr_debug_level,
+ NULL,
+};
+
+EXPORT_SYMBOL(mptscsih_host_attrs);
+
+EXPORT_SYMBOL(mptscsih_remove);
+EXPORT_SYMBOL(mptscsih_shutdown);
+#ifdef CONFIG_PM
+EXPORT_SYMBOL(mptscsih_suspend);
+EXPORT_SYMBOL(mptscsih_resume);
+#endif
+EXPORT_SYMBOL(mptscsih_proc_info);
+EXPORT_SYMBOL(mptscsih_info);
+EXPORT_SYMBOL(mptscsih_qcmd);
+EXPORT_SYMBOL(mptscsih_slave_destroy);
+EXPORT_SYMBOL(mptscsih_slave_configure);
+EXPORT_SYMBOL(mptscsih_abort);
+EXPORT_SYMBOL(mptscsih_dev_reset);
+EXPORT_SYMBOL(mptscsih_bus_reset);
+EXPORT_SYMBOL(mptscsih_host_reset);
+EXPORT_SYMBOL(mptscsih_bios_param);
+EXPORT_SYMBOL(mptscsih_io_done);
+EXPORT_SYMBOL(mptscsih_taskmgmt_complete);
+EXPORT_SYMBOL(mptscsih_scandv_complete);
+EXPORT_SYMBOL(mptscsih_event_process);
+EXPORT_SYMBOL(mptscsih_ioc_reset);
+EXPORT_SYMBOL(mptscsih_change_queue_depth);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
new file mode 100644
index 00000000..45a5ff3e
--- /dev/null
+++ b/drivers/message/fusion/mptscsih.h
@@ -0,0 +1,137 @@
+/*
+ * linux/drivers/message/fusion/mptscsih.h
+ * High performance SCSI / Fibre Channel SCSI Host device driver.
+ * For use with PCI chip/adapter(s):
+ * LSIFC9xx/LSI409xx Fibre Channel
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#ifndef SCSIHOST_H_INCLUDED
+#define SCSIHOST_H_INCLUDED
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * SCSI Public stuff...
+ */
+
+#define MPT_SCANDV_GOOD (0x00000000) /* must be 0 */
+#define MPT_SCANDV_DID_RESET (0x00000001)
+#define MPT_SCANDV_SENSE (0x00000002)
+#define MPT_SCANDV_SOME_ERROR (0x00000004)
+#define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008)
+#define MPT_SCANDV_ISSUE_SENSE (0x00000010)
+#define MPT_SCANDV_FALLBACK (0x00000020)
+#define MPT_SCANDV_BUSY (0x00000040)
+
+#define MPT_SCANDV_MAX_RETRIES (10)
+
+#define MPT_ICFLAG_BUF_CAP 0x01 /* ReadBuffer Read Capacity format */
+#define MPT_ICFLAG_ECHO 0x02 /* ReadBuffer Echo buffer format */
+#define MPT_ICFLAG_EBOS 0x04 /* ReadBuffer Echo buffer has EBOS */
+#define MPT_ICFLAG_PHYS_DISK 0x08 /* Any SCSI IO but do Phys Disk Format */
+#define MPT_ICFLAG_TAGGED_CMD 0x10 /* Do tagged IO */
+#define MPT_ICFLAG_DID_RESET 0x20 /* Bus Reset occurred with this command */
+#define MPT_ICFLAG_RESERVED 0x40 /* Reserved has been issued */
+
+#define MPT_SCSI_CMD_PER_DEV_HIGH 64
+#define MPT_SCSI_CMD_PER_DEV_LOW 32
+
+#define MPT_SCSI_CMD_PER_LUN 7
+
+#define MPT_SCSI_MAX_SECTORS 8192
+
+/* SCSI driver setup structure. Settings can be overridden
+ * by command line options.
+ */
+#define MPTSCSIH_DOMAIN_VALIDATION 1
+#define MPTSCSIH_MAX_WIDTH 1
+#define MPTSCSIH_MIN_SYNC 0x08
+#define MPTSCSIH_SAF_TE 0
+#define MPTSCSIH_PT_CLEAR 0
+
+#endif
+
+
+typedef struct _internal_cmd {
+ char *data; /* data pointer */
+ dma_addr_t data_dma; /* data dma address */
+ int size; /* transfer size */
+ u8 cmd; /* SCSI Op Code */
+ u8 channel; /* bus number */
+ u8 id; /* SCSI ID (virtual) */
+ int lun;
+ u8 flags; /* Bit Field - See above */
+ u8 physDiskNum; /* Phys disk number, -1 else */
+ u8 rsvd2;
+ u8 rsvd;
+} INTERNAL_CMD;
+
+extern void mptscsih_remove(struct pci_dev *);
+extern void mptscsih_shutdown(struct pci_dev *);
+#ifdef CONFIG_PM
+extern int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
+extern int mptscsih_resume(struct pci_dev *pdev);
+#endif
+extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
+extern const char * mptscsih_info(struct Scsi_Host *SChost);
+extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
+extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
+ u8 id, int lun, int ctx2abort, ulong timeout);
+extern void mptscsih_slave_destroy(struct scsi_device *device);
+extern int mptscsih_slave_configure(struct scsi_device *device);
+extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
+extern int mptscsih_dev_reset(struct scsi_cmnd * SCpnt);
+extern int mptscsih_bus_reset(struct scsi_cmnd * SCpnt);
+extern int mptscsih_host_reset(struct scsi_cmnd *SCpnt);
+extern int mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev, sector_t capacity, int geom[]);
+extern int mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+extern int mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+extern int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
+extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
+extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason);
+extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
+extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
+extern struct device_attribute *mptscsih_host_attrs[];
+extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
+extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
new file mode 100644
index 00000000..8f61ba6a
--- /dev/null
+++ b/drivers/message/fusion/mptspi.c
@@ -0,0 +1,1615 @@
+/*
+ * linux/drivers/message/fusion/mptspi.c
+ * For use with LSI PCI chip/adapter(s)
+ * running LSI Fusion MPT (Message Passing Technology) firmware.
+ *
+ * Copyright (c) 1999-2008 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h> /* for mdelay */
+#include <linux/interrupt.h> /* needed for in_interrupt() proto */
+#include <linux/reboot.h> /* notifier code */
+#include <linux/workqueue.h>
+#include <linux/raid_class.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_spi.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mptbase.h"
+#include "mptscsih.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME "Fusion MPT SPI Host driver"
+#define my_VERSION MPT_LINUX_VERSION_COMMON
+#define MYNAM "mptspi"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(my_VERSION);
+
+/* Command line args */
+static int mpt_saf_te = MPTSCSIH_SAF_TE;
+module_param(mpt_saf_te, int, 0);
+MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
+
+static void mptspi_write_offset(struct scsi_target *, int);
+static void mptspi_write_width(struct scsi_target *, int);
+static int mptspi_write_spi_device_pg1(struct scsi_target *,
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 *);
+
+static struct scsi_transport_template *mptspi_transport_template = NULL;
+
+static u8 mptspiDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptspiTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptspiInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
+
+/**
+ * mptspi_setTargetNegoParms - Update the target negotiation parameters
+ * @hd: Pointer to a SCSI Host Structure
+ * @target: per target private data
+ * @sdev: SCSI device
+ *
+ * Update the target negotiation parameters based on the the Inquiry
+ * data, adapter capabilities, and NVRAM settings.
+ **/
+static void
+mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target,
+ struct scsi_device *sdev)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ SpiCfgData *pspi_data = &ioc->spi_data;
+ int id = (int) target->id;
+ int nvram;
+ u8 width = MPT_NARROW;
+ u8 factor = MPT_ASYNC;
+ u8 offset = 0;
+ u8 nfactor;
+ u8 noQas = 1;
+
+ target->negoFlags = pspi_data->noQas;
+
+ if (sdev->scsi_level < SCSI_2) {
+ width = 0;
+ factor = MPT_ULTRA2;
+ offset = pspi_data->maxSyncOffset;
+ target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
+ } else {
+ if (scsi_device_wide(sdev))
+ width = 1;
+
+ if (scsi_device_sync(sdev)) {
+ factor = pspi_data->minSyncFactor;
+ if (!scsi_device_dt(sdev))
+ factor = MPT_ULTRA2;
+ else {
+ if (!scsi_device_ius(sdev) &&
+ !scsi_device_qas(sdev))
+ factor = MPT_ULTRA160;
+ else {
+ factor = MPT_ULTRA320;
+ if (scsi_device_qas(sdev)) {
+ ddvprintk(ioc,
+ printk(MYIOC_s_DEBUG_FMT "Enabling QAS due to "
+ "byte56=%02x on id=%d!\n", ioc->name,
+ scsi_device_qas(sdev), id));
+ noQas = 0;
+ }
+ if (sdev->type == TYPE_TAPE &&
+ scsi_device_ius(sdev))
+ target->negoFlags |= MPT_TAPE_NEGO_IDP;
+ }
+ }
+ offset = pspi_data->maxSyncOffset;
+
+ /* If RAID, never disable QAS
+ * else if non RAID, do not disable
+ * QAS if bit 1 is set
+ * bit 1 QAS support, non-raid only
+ * bit 0 IU support
+ */
+ if (target->raidVolume == 1)
+ noQas = 0;
+ } else {
+ factor = MPT_ASYNC;
+ offset = 0;
+ }
+ }
+
+ if (!sdev->tagged_supported)
+ target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
+
+ /* Update tflags based on NVRAM settings. (SCSI only)
+ */
+ if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
+ nvram = pspi_data->nvram[id];
+ nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
+
+ if (width)
+ width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
+
+ if (offset > 0) {
+ /* Ensure factor is set to the
+ * maximum of: adapter, nvram, inquiry
+ */
+ if (nfactor) {
+ if (nfactor < pspi_data->minSyncFactor )
+ nfactor = pspi_data->minSyncFactor;
+
+ factor = max(factor, nfactor);
+ if (factor == MPT_ASYNC)
+ offset = 0;
+ } else {
+ offset = 0;
+ factor = MPT_ASYNC;
+ }
+ } else {
+ factor = MPT_ASYNC;
+ }
+ }
+
+ /* Make sure data is consistent
+ */
+ if ((!width) && (factor < MPT_ULTRA2))
+ factor = MPT_ULTRA2;
+
+ /* Save the data to the target structure.
+ */
+ target->minSyncFactor = factor;
+ target->maxOffset = offset;
+ target->maxWidth = width;
+
+ spi_min_period(scsi_target(sdev)) = factor;
+ spi_max_offset(scsi_target(sdev)) = offset;
+ spi_max_width(scsi_target(sdev)) = width;
+
+ target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
+
+ /* Disable unused features.
+ */
+ if (!width)
+ target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
+
+ if (!offset)
+ target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
+
+ if ( factor > MPT_ULTRA320 )
+ noQas = 0;
+
+ if (noQas && (pspi_data->noQas == 0)) {
+ pspi_data->noQas |= MPT_TARGET_NO_NEGO_QAS;
+ target->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
+
+ /* Disable QAS in a mixed configuration case
+ */
+
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Disabling QAS due to noQas=%02x on id=%d!\n", ioc->name, noQas, id));
+ }
+}
+
+/**
+ * mptspi_writeIOCPage4 - write IOC Page 4
+ * @hd: Pointer to a SCSI Host Structure
+ * @channel: channel number
+ * @id: write IOC Page4 for this ID & Bus
+ *
+ * Return: -EAGAIN if unable to obtain a Message Frame
+ * or 0 if success.
+ *
+ * Remark: We do not wait for a return, write pages sequentially.
+ **/
+static int
+mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ Config_t *pReq;
+ IOCPage4_t *IOCPage4Ptr;
+ MPT_FRAME_HDR *mf;
+ dma_addr_t dataDma;
+ u16 req_idx;
+ u32 frameOffset;
+ u32 flagsLength;
+ int ii;
+
+ /* Get a MF for this command.
+ */
+ if ((mf = mpt_get_msg_frame(ioc->DoneCtx, ioc)) == NULL) {
+ dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+ "writeIOCPage4 : no msg frames!\n",ioc->name));
+ return -EAGAIN;
+ }
+
+ /* Set the request and the data pointers.
+ * Place data at end of MF.
+ */
+ pReq = (Config_t *)mf;
+
+ req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+ frameOffset = ioc->req_sz - sizeof(IOCPage4_t);
+
+ /* Complete the request frame (same for all requests).
+ */
+ pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ pReq->Reserved = 0;
+ pReq->ChainOffset = 0;
+ pReq->Function = MPI_FUNCTION_CONFIG;
+ pReq->ExtPageLength = 0;
+ pReq->ExtPageType = 0;
+ pReq->MsgFlags = 0;
+ for (ii=0; ii < 8; ii++) {
+ pReq->Reserved2[ii] = 0;
+ }
+
+ IOCPage4Ptr = ioc->spi_data.pIocPg4;
+ dataDma = ioc->spi_data.IocPg4_dma;
+ ii = IOCPage4Ptr->ActiveSEP++;
+ IOCPage4Ptr->SEP[ii].SEPTargetID = id;
+ IOCPage4Ptr->SEP[ii].SEPBus = channel;
+ pReq->Header = IOCPage4Ptr->Header;
+ pReq->PageAddress = cpu_to_le32(id | (channel << 8 ));
+
+ /* Add a SGE to the config request.
+ */
+ flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
+ (IOCPage4Ptr->Header.PageLength + ii) * 4;
+
+ ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
+
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
+ ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, id, channel));
+
+ mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
+
+ return 0;
+}
+
+/**
+ * mptspi_initTarget - Target, LUN alloc/free functionality.
+ * @hd: Pointer to MPT_SCSI_HOST structure
+ * @vtarget: per target private data
+ * @sdev: SCSI device
+ *
+ * NOTE: It's only SAFE to call this routine if data points to
+ * sane & valid STANDARD INQUIRY data!
+ *
+ * Allocate and initialize memory for this target.
+ * Save inquiry data.
+ *
+ **/
+static void
+mptspi_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget,
+ struct scsi_device *sdev)
+{
+
+ /* Is LUN supported? If so, upper 2 bits will be 0
+ * in first byte of inquiry data.
+ */
+ if (sdev->inq_periph_qual != 0)
+ return;
+
+ if (vtarget == NULL)
+ return;
+
+ vtarget->type = sdev->type;
+
+ if ((sdev->type == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
+ /* Treat all Processors as SAF-TE if
+ * command line option is set */
+ vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
+ mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id);
+ }else if ((sdev->type == TYPE_PROCESSOR) &&
+ !(vtarget->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
+ if (sdev->inquiry_len > 49 ) {
+ if (sdev->inquiry[44] == 'S' &&
+ sdev->inquiry[45] == 'A' &&
+ sdev->inquiry[46] == 'F' &&
+ sdev->inquiry[47] == '-' &&
+ sdev->inquiry[48] == 'T' &&
+ sdev->inquiry[49] == 'E' ) {
+ vtarget->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
+ mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id);
+ }
+ }
+ }
+ mptspi_setTargetNegoParms(hd, vtarget, sdev);
+}
+
+/**
+ * mptspi_is_raid - Determines whether target is belonging to volume
+ * @hd: Pointer to a SCSI HOST structure
+ * @id: target device id
+ *
+ * Return:
+ * non-zero = true
+ * zero = false
+ *
+ */
+static int
+mptspi_is_raid(struct _MPT_SCSI_HOST *hd, u32 id)
+{
+ int i, rc = 0;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ if (!ioc->raid_data.pIocPg2)
+ goto out;
+
+ if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
+ goto out;
+ for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+ if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) {
+ rc = 1;
+ goto out;
+ }
+ }
+
+ out:
+ return rc;
+}
+
+static int mptspi_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct _MPT_SCSI_HOST *hd = shost_priv(shost);
+ VirtTarget *vtarget;
+ MPT_ADAPTER *ioc;
+
+ if (hd == NULL)
+ return -ENODEV;
+
+ ioc = hd->ioc;
+ vtarget = kzalloc(sizeof(VirtTarget), GFP_KERNEL);
+ if (!vtarget)
+ return -ENOMEM;
+
+ vtarget->ioc_id = ioc->id;
+ vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
+ vtarget->id = (u8)starget->id;
+ vtarget->channel = (u8)starget->channel;
+ vtarget->starget = starget;
+ starget->hostdata = vtarget;
+
+ if (starget->channel == 1) {
+ if (mptscsih_is_phys_disk(ioc, 0, starget->id) == 0)
+ return 0;
+ vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
+ /* The real channel for this device is zero */
+ vtarget->channel = 0;
+ /* The actual physdisknum (for RAID passthrough) */
+ vtarget->id = mptscsih_raid_id_to_num(ioc, 0,
+ starget->id);
+ }
+
+ if (starget->channel == 0 &&
+ mptspi_is_raid(hd, starget->id)) {
+ vtarget->raidVolume = 1;
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "RAID Volume @ channel=%d id=%d\n", ioc->name, starget->channel,
+ starget->id));
+ }
+
+ if (ioc->spi_data.nvram &&
+ ioc->spi_data.nvram[starget->id] != MPT_HOST_NVRAM_INVALID) {
+ u32 nvram = ioc->spi_data.nvram[starget->id];
+ spi_min_period(starget) = (nvram & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
+ spi_max_width(starget) = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
+ } else {
+ spi_min_period(starget) = ioc->spi_data.minSyncFactor;
+ spi_max_width(starget) = ioc->spi_data.maxBusWidth;
+ }
+ spi_max_offset(starget) = ioc->spi_data.maxSyncOffset;
+
+ spi_offset(starget) = 0;
+ spi_period(starget) = 0xFF;
+ mptspi_write_width(starget, 0);
+
+ return 0;
+}
+
+static void
+mptspi_target_destroy(struct scsi_target *starget)
+{
+ if (starget->hostdata)
+ kfree(starget->hostdata);
+ starget->hostdata = NULL;
+}
+
+/**
+ * mptspi_print_write_nego - negotiation parameters debug info that is being sent
+ * @hd: Pointer to a SCSI HOST structure
+ * @starget: SCSI target
+ * @ii: negotiation parameters
+ *
+ */
+static void
+mptspi_print_write_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii)
+{
+ ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Requested = 0x%08x"
+ " ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n",
+ hd->ioc->name, starget->id, ii,
+ ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "",
+ ((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF),
+ ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""));
+}
+
+/**
+ * mptspi_print_read_nego - negotiation parameters debug info that is being read
+ * @hd: Pointer to a SCSI HOST structure
+ * @starget: SCSI target
+ * @ii: negotiation parameters
+ *
+ */
+static void
+mptspi_print_read_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii)
+{
+ ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Read = 0x%08x"
+ " ( %s factor = 0x%02x @ offset = 0x%02x %s%s%s%s%s%s%s%s)\n",
+ hd->ioc->name, starget->id, ii,
+ ii & MPI_SCSIDEVPAGE0_NP_WIDE ? "Wide ": "",
+ ((ii >> 8) & 0xFF), ((ii >> 16) & 0xFF),
+ ii & MPI_SCSIDEVPAGE0_NP_IU ? "IU ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_DT ? "DT ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_QAS ? "QAS ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_HOLD_MCS ? "HOLDMCS ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_WR_FLOW ? "WRFLOW ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_RD_STRM ? "RDSTRM ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_RTI ? "RTI ": "",
+ ii & MPI_SCSIDEVPAGE0_NP_PCOMP_EN ? "PCOMP ": ""));
+}
+
+static int mptspi_read_spi_device_pg0(struct scsi_target *starget,
+ struct _CONFIG_PAGE_SCSI_DEVICE_0 *pass_pg0)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct _MPT_SCSI_HOST *hd = shost_priv(shost);
+ struct _MPT_ADAPTER *ioc = hd->ioc;
+ struct _CONFIG_PAGE_SCSI_DEVICE_0 *spi_dev_pg0;
+ dma_addr_t spi_dev_pg0_dma;
+ int size;
+ struct _x_config_parms cfg;
+ struct _CONFIG_PAGE_HEADER hdr;
+ int err = -EBUSY;
+
+ /* No SPI parameters for RAID devices */
+ if (starget->channel == 0 &&
+ mptspi_is_raid(hd, starget->id))
+ return -1;
+
+ size = ioc->spi_data.sdp0length * 4;
+ /*
+ if (ioc->spi_data.sdp0length & 1)
+ size += size + 4;
+ size += 2048;
+ */
+
+ spi_dev_pg0 = dma_alloc_coherent(&ioc->pcidev->dev, size, &spi_dev_pg0_dma, GFP_KERNEL);
+ if (spi_dev_pg0 == NULL) {
+ starget_printk(KERN_ERR, starget, MYIOC_s_FMT
+ "dma_alloc_coherent for parameters failed\n", ioc->name);
+ return -EINVAL;
+ }
+
+ memset(&hdr, 0, sizeof(hdr));
+
+ hdr.PageVersion = ioc->spi_data.sdp0version;
+ hdr.PageLength = ioc->spi_data.sdp0length;
+ hdr.PageNumber = 0;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = spi_dev_pg0_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+ cfg.dir = 0;
+ cfg.pageAddr = starget->id;
+ cfg.timeout = 60;
+
+ if (mpt_config(ioc, &cfg)) {
+ starget_printk(KERN_ERR, starget, MYIOC_s_FMT "mpt_config failed\n", ioc->name);
+ goto out_free;
+ }
+ err = 0;
+ memcpy(pass_pg0, spi_dev_pg0, size);
+
+ mptspi_print_read_nego(hd, starget, le32_to_cpu(spi_dev_pg0->NegotiatedParameters));
+
+ out_free:
+ dma_free_coherent(&ioc->pcidev->dev, size, spi_dev_pg0, spi_dev_pg0_dma);
+ return err;
+}
+
+static u32 mptspi_getRP(struct scsi_target *starget)
+{
+ u32 nego = 0;
+
+ nego |= spi_iu(starget) ? MPI_SCSIDEVPAGE1_RP_IU : 0;
+ nego |= spi_dt(starget) ? MPI_SCSIDEVPAGE1_RP_DT : 0;
+ nego |= spi_qas(starget) ? MPI_SCSIDEVPAGE1_RP_QAS : 0;
+ nego |= spi_hold_mcs(starget) ? MPI_SCSIDEVPAGE1_RP_HOLD_MCS : 0;
+ nego |= spi_wr_flow(starget) ? MPI_SCSIDEVPAGE1_RP_WR_FLOW : 0;
+ nego |= spi_rd_strm(starget) ? MPI_SCSIDEVPAGE1_RP_RD_STRM : 0;
+ nego |= spi_rti(starget) ? MPI_SCSIDEVPAGE1_RP_RTI : 0;
+ nego |= spi_pcomp_en(starget) ? MPI_SCSIDEVPAGE1_RP_PCOMP_EN : 0;
+
+ nego |= (spi_period(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD) & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
+ nego |= (spi_offset(starget) << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET) & MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
+ nego |= spi_width(starget) ? MPI_SCSIDEVPAGE1_RP_WIDE : 0;
+
+ return nego;
+}
+
+static void mptspi_read_parameters(struct scsi_target *starget)
+{
+ int nego;
+ struct _CONFIG_PAGE_SCSI_DEVICE_0 spi_dev_pg0;
+
+ mptspi_read_spi_device_pg0(starget, &spi_dev_pg0);
+
+ nego = le32_to_cpu(spi_dev_pg0.NegotiatedParameters);
+
+ spi_iu(starget) = (nego & MPI_SCSIDEVPAGE0_NP_IU) ? 1 : 0;
+ spi_dt(starget) = (nego & MPI_SCSIDEVPAGE0_NP_DT) ? 1 : 0;
+ spi_qas(starget) = (nego & MPI_SCSIDEVPAGE0_NP_QAS) ? 1 : 0;
+ spi_wr_flow(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WR_FLOW) ? 1 : 0;
+ spi_rd_strm(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RD_STRM) ? 1 : 0;
+ spi_rti(starget) = (nego & MPI_SCSIDEVPAGE0_NP_RTI) ? 1 : 0;
+ spi_pcomp_en(starget) = (nego & MPI_SCSIDEVPAGE0_NP_PCOMP_EN) ? 1 : 0;
+ spi_hold_mcs(starget) = (nego & MPI_SCSIDEVPAGE0_NP_HOLD_MCS) ? 1 : 0;
+ spi_period(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
+ spi_offset(starget) = (nego & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
+ spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
+}
+
+int
+mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
+{
+ MPT_ADAPTER *ioc = hd->ioc;
+ MpiRaidActionRequest_t *pReq;
+ MPT_FRAME_HDR *mf;
+ int ret;
+ unsigned long timeleft;
+
+ mutex_lock(&ioc->internal_cmds.mutex);
+
+ /* Get and Populate a free Frame
+ */
+ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
+ dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
+ "%s: no msg frames!\n", ioc->name, __func__));
+ ret = -EAGAIN;
+ goto out;
+ }
+ pReq = (MpiRaidActionRequest_t *)mf;
+ if (quiesce)
+ pReq->Action = MPI_RAID_ACTION_QUIESCE_PHYS_IO;
+ else
+ pReq->Action = MPI_RAID_ACTION_ENABLE_PHYS_IO;
+ pReq->Reserved1 = 0;
+ pReq->ChainOffset = 0;
+ pReq->Function = MPI_FUNCTION_RAID_ACTION;
+ pReq->VolumeID = id;
+ pReq->VolumeBus = channel;
+ pReq->PhysDiskNum = 0;
+ pReq->MsgFlags = 0;
+ pReq->Reserved2 = 0;
+ pReq->ActionDataWord = 0; /* Reserved for this action */
+
+ ioc->add_sge((char *)&pReq->ActionDataSGE,
+ MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
+
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
+ ioc->name, pReq->Action, channel, id));
+
+ INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
+ mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
+ timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
+ if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+ ret = -ETIME;
+ dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
+ ioc->name, __func__));
+ if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+ goto out;
+ if (!timeleft) {
+ printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+ ioc->name, __func__);
+ mpt_HardResetHandler(ioc, CAN_SLEEP);
+ mpt_free_msg_frame(ioc, mf);
+ }
+ goto out;
+ }
+
+ ret = ioc->internal_cmds.completion_code;
+
+ out:
+ CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+ mutex_unlock(&ioc->internal_cmds.mutex);
+ return ret;
+}
+
+static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
+ struct scsi_device *sdev)
+{
+ VirtTarget *vtarget = scsi_target(sdev)->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ /* no DV on RAID devices */
+ if (sdev->channel == 0 &&
+ mptspi_is_raid(hd, sdev->id))
+ return;
+
+ /* If this is a piece of a RAID, then quiesce first */
+ if (sdev->channel == 1 &&
+ mptscsih_quiesce_raid(hd, 1, vtarget->channel, vtarget->id) < 0) {
+ starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT
+ "Integrated RAID quiesce failed\n", ioc->name);
+ return;
+ }
+
+ hd->spi_pending |= (1 << sdev->id);
+ spi_dv_device(sdev);
+ hd->spi_pending &= ~(1 << sdev->id);
+
+ if (sdev->channel == 1 &&
+ mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0)
+ starget_printk(KERN_ERR, scsi_target(sdev), MYIOC_s_FMT
+ "Integrated RAID resume failed\n", ioc->name);
+
+ mptspi_read_parameters(sdev->sdev_target);
+ spi_display_xfer_agreement(sdev->sdev_target);
+ mptspi_read_parameters(sdev->sdev_target);
+}
+
+static int mptspi_slave_alloc(struct scsi_device *sdev)
+{
+ MPT_SCSI_HOST *hd = shost_priv(sdev->host);
+ VirtTarget *vtarget;
+ VirtDevice *vdevice;
+ struct scsi_target *starget;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ if (sdev->channel == 1 &&
+ mptscsih_is_phys_disk(ioc, 0, sdev->id) == 0)
+ return -ENXIO;
+
+ vdevice = kzalloc(sizeof(VirtDevice), GFP_KERNEL);
+ if (!vdevice) {
+ printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+ ioc->name, sizeof(VirtDevice));
+ return -ENOMEM;
+ }
+
+ vdevice->lun = sdev->lun;
+ sdev->hostdata = vdevice;
+
+ starget = scsi_target(sdev);
+ vtarget = starget->hostdata;
+ vdevice->vtarget = vtarget;
+ vtarget->num_luns++;
+
+ if (sdev->channel == 1)
+ sdev->no_uld_attach = 1;
+
+ return 0;
+}
+
+static int mptspi_slave_configure(struct scsi_device *sdev)
+{
+ struct _MPT_SCSI_HOST *hd = shost_priv(sdev->host);
+ VirtTarget *vtarget = scsi_target(sdev)->hostdata;
+ int ret;
+
+ mptspi_initTarget(hd, vtarget, sdev);
+
+ ret = mptscsih_slave_configure(sdev);
+
+ if (ret)
+ return ret;
+
+ ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d min_period=0x%02x"
+ " max_offset=0x%02x max_width=%d\n", hd->ioc->name,
+ sdev->id, spi_min_period(scsi_target(sdev)),
+ spi_max_offset(scsi_target(sdev)),
+ spi_max_width(scsi_target(sdev))));
+
+ if ((sdev->channel == 1 ||
+ !(mptspi_is_raid(hd, sdev->id))) &&
+ !spi_initial_dv(sdev->sdev_target))
+ mptspi_dv_device(hd, sdev);
+
+ return 0;
+}
+
+static int
+mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+ struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host);
+ VirtDevice *vdevice = SCpnt->device->hostdata;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ if (!vdevice || !vdevice->vtarget) {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ if (SCpnt->device->channel == 1 &&
+ mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) {
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ return 0;
+ }
+
+ if (spi_dv_pending(scsi_target(SCpnt->device)))
+ ddvprintk(ioc, scsi_print_command(SCpnt));
+
+ return mptscsih_qcmd(SCpnt,done);
+}
+
+static DEF_SCSI_QCMD(mptspi_qcmd)
+
+static void mptspi_slave_destroy(struct scsi_device *sdev)
+{
+ struct scsi_target *starget = scsi_target(sdev);
+ VirtTarget *vtarget = starget->hostdata;
+ VirtDevice *vdevice = sdev->hostdata;
+
+ /* Will this be the last lun on a non-raid device? */
+ if (vtarget->num_luns == 1 && vdevice->configured_lun) {
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+
+ /* Async Narrow */
+ pg1.RequestedParameters = 0;
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+
+ mptspi_write_spi_device_pg1(starget, &pg1);
+ }
+
+ mptscsih_slave_destroy(sdev);
+}
+
+static struct scsi_host_template mptspi_driver_template = {
+ .module = THIS_MODULE,
+ .proc_name = "mptspi",
+ .proc_info = mptscsih_proc_info,
+ .name = "MPT SPI Host",
+ .info = mptscsih_info,
+ .queuecommand = mptspi_qcmd,
+ .target_alloc = mptspi_target_alloc,
+ .slave_alloc = mptspi_slave_alloc,
+ .slave_configure = mptspi_slave_configure,
+ .target_destroy = mptspi_target_destroy,
+ .slave_destroy = mptspi_slave_destroy,
+ .change_queue_depth = mptscsih_change_queue_depth,
+ .eh_abort_handler = mptscsih_abort,
+ .eh_device_reset_handler = mptscsih_dev_reset,
+ .eh_bus_reset_handler = mptscsih_bus_reset,
+ .eh_host_reset_handler = mptscsih_host_reset,
+ .bios_param = mptscsih_bios_param,
+ .can_queue = MPT_SCSI_CAN_QUEUE,
+ .this_id = -1,
+ .sg_tablesize = MPT_SCSI_SG_DEPTH,
+ .max_sectors = 8192,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mptscsih_host_attrs,
+};
+
+static int mptspi_write_spi_device_pg1(struct scsi_target *starget,
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 *pass_pg1)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct _MPT_SCSI_HOST *hd = shost_priv(shost);
+ struct _MPT_ADAPTER *ioc = hd->ioc;
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 *pg1;
+ dma_addr_t pg1_dma;
+ int size;
+ struct _x_config_parms cfg;
+ struct _CONFIG_PAGE_HEADER hdr;
+ int err = -EBUSY;
+ u32 nego_parms;
+ u32 period;
+ struct scsi_device *sdev;
+ int i;
+
+ /* don't allow updating nego parameters on RAID devices */
+ if (starget->channel == 0 &&
+ mptspi_is_raid(hd, starget->id))
+ return -1;
+
+ size = ioc->spi_data.sdp1length * 4;
+
+ pg1 = dma_alloc_coherent(&ioc->pcidev->dev, size, &pg1_dma, GFP_KERNEL);
+ if (pg1 == NULL) {
+ starget_printk(KERN_ERR, starget, MYIOC_s_FMT
+ "dma_alloc_coherent for parameters failed\n", ioc->name);
+ return -EINVAL;
+ }
+
+ memset(&hdr, 0, sizeof(hdr));
+
+ hdr.PageVersion = ioc->spi_data.sdp1version;
+ hdr.PageLength = ioc->spi_data.sdp1length;
+ hdr.PageNumber = 1;
+ hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+ memset(&cfg, 0, sizeof(cfg));
+
+ cfg.cfghdr.hdr = &hdr;
+ cfg.physAddr = pg1_dma;
+ cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ cfg.dir = 1;
+ cfg.pageAddr = starget->id;
+
+ memcpy(pg1, pass_pg1, size);
+
+ pg1->Header.PageVersion = hdr.PageVersion;
+ pg1->Header.PageLength = hdr.PageLength;
+ pg1->Header.PageNumber = hdr.PageNumber;
+ pg1->Header.PageType = hdr.PageType;
+
+ nego_parms = le32_to_cpu(pg1->RequestedParameters);
+ period = (nego_parms & MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK) >>
+ MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
+ if (period == 8) {
+ /* Turn on inline data padding for TAPE when running U320 */
+ for (i = 0 ; i < 16; i++) {
+ sdev = scsi_device_lookup_by_target(starget, i);
+ if (sdev && sdev->type == TYPE_TAPE) {
+ sdev_printk(KERN_DEBUG, sdev, MYIOC_s_FMT
+ "IDP:ON\n", ioc->name);
+ nego_parms |= MPI_SCSIDEVPAGE1_RP_IDP;
+ pg1->RequestedParameters =
+ cpu_to_le32(nego_parms);
+ break;
+ }
+ }
+ }
+
+ mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters));
+
+ if (mpt_config(ioc, &cfg)) {
+ starget_printk(KERN_ERR, starget, MYIOC_s_FMT
+ "mpt_config failed\n", ioc->name);
+ goto out_free;
+ }
+ err = 0;
+
+ out_free:
+ dma_free_coherent(&ioc->pcidev->dev, size, pg1, pg1_dma);
+ return err;
+}
+
+static void mptspi_write_offset(struct scsi_target *starget, int offset)
+{
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+ u32 nego;
+
+ if (offset < 0)
+ offset = 0;
+
+ if (offset > 255)
+ offset = 255;
+
+ if (spi_offset(starget) == -1)
+ mptspi_read_parameters(starget);
+
+ spi_offset(starget) = offset;
+
+ nego = mptspi_getRP(starget);
+
+ pg1.RequestedParameters = cpu_to_le32(nego);
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+
+ mptspi_write_spi_device_pg1(starget, &pg1);
+}
+
+static void mptspi_write_period(struct scsi_target *starget, int period)
+{
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+ u32 nego;
+
+ if (period < 8)
+ period = 8;
+
+ if (period > 255)
+ period = 255;
+
+ if (spi_period(starget) == -1)
+ mptspi_read_parameters(starget);
+
+ if (period == 8) {
+ spi_iu(starget) = 1;
+ spi_dt(starget) = 1;
+ } else if (period == 9) {
+ spi_dt(starget) = 1;
+ }
+
+ spi_period(starget) = period;
+
+ nego = mptspi_getRP(starget);
+
+ pg1.RequestedParameters = cpu_to_le32(nego);
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+
+ mptspi_write_spi_device_pg1(starget, &pg1);
+}
+
+static void mptspi_write_dt(struct scsi_target *starget, int dt)
+{
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+ u32 nego;
+
+ if (spi_period(starget) == -1)
+ mptspi_read_parameters(starget);
+
+ if (!dt && spi_period(starget) < 10)
+ spi_period(starget) = 10;
+
+ spi_dt(starget) = dt;
+
+ nego = mptspi_getRP(starget);
+
+
+ pg1.RequestedParameters = cpu_to_le32(nego);
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+
+ mptspi_write_spi_device_pg1(starget, &pg1);
+}
+
+static void mptspi_write_iu(struct scsi_target *starget, int iu)
+{
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+ u32 nego;
+
+ if (spi_period(starget) == -1)
+ mptspi_read_parameters(starget);
+
+ if (!iu && spi_period(starget) < 9)
+ spi_period(starget) = 9;
+
+ spi_iu(starget) = iu;
+
+ nego = mptspi_getRP(starget);
+
+ pg1.RequestedParameters = cpu_to_le32(nego);
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+
+ mptspi_write_spi_device_pg1(starget, &pg1);
+}
+
+#define MPTSPI_SIMPLE_TRANSPORT_PARM(parm) \
+static void mptspi_write_##parm(struct scsi_target *starget, int parm)\
+{ \
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1; \
+ u32 nego; \
+ \
+ spi_##parm(starget) = parm; \
+ \
+ nego = mptspi_getRP(starget); \
+ \
+ pg1.RequestedParameters = cpu_to_le32(nego); \
+ pg1.Reserved = 0; \
+ pg1.Configuration = 0; \
+ \
+ mptspi_write_spi_device_pg1(starget, &pg1); \
+}
+
+MPTSPI_SIMPLE_TRANSPORT_PARM(rd_strm)
+MPTSPI_SIMPLE_TRANSPORT_PARM(wr_flow)
+MPTSPI_SIMPLE_TRANSPORT_PARM(rti)
+MPTSPI_SIMPLE_TRANSPORT_PARM(hold_mcs)
+MPTSPI_SIMPLE_TRANSPORT_PARM(pcomp_en)
+
+static void mptspi_write_qas(struct scsi_target *starget, int qas)
+{
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct _MPT_SCSI_HOST *hd = shost_priv(shost);
+ VirtTarget *vtarget = starget->hostdata;
+ u32 nego;
+
+ if ((vtarget->negoFlags & MPT_TARGET_NO_NEGO_QAS) ||
+ hd->ioc->spi_data.noQas)
+ spi_qas(starget) = 0;
+ else
+ spi_qas(starget) = qas;
+
+ nego = mptspi_getRP(starget);
+
+ pg1.RequestedParameters = cpu_to_le32(nego);
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+
+ mptspi_write_spi_device_pg1(starget, &pg1);
+}
+
+static void mptspi_write_width(struct scsi_target *starget, int width)
+{
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+ u32 nego;
+
+ if (!width) {
+ spi_dt(starget) = 0;
+ if (spi_period(starget) < 10)
+ spi_period(starget) = 10;
+ }
+
+ spi_width(starget) = width;
+
+ nego = mptspi_getRP(starget);
+
+ pg1.RequestedParameters = cpu_to_le32(nego);
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+
+ mptspi_write_spi_device_pg1(starget, &pg1);
+}
+
+struct work_queue_wrapper {
+ struct work_struct work;
+ struct _MPT_SCSI_HOST *hd;
+ int disk;
+};
+
+static void mpt_work_wrapper(struct work_struct *work)
+{
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
+ struct _MPT_SCSI_HOST *hd = wqw->hd;
+ MPT_ADAPTER *ioc = hd->ioc;
+ struct Scsi_Host *shost = ioc->sh;
+ struct scsi_device *sdev;
+ int disk = wqw->disk;
+ struct _CONFIG_PAGE_IOC_3 *pg3;
+
+ kfree(wqw);
+
+ mpt_findImVolumes(ioc);
+ pg3 = ioc->raid_data.pIocPg3;
+ if (!pg3)
+ return;
+
+ shost_for_each_device(sdev,shost) {
+ struct scsi_target *starget = scsi_target(sdev);
+ VirtTarget *vtarget = starget->hostdata;
+
+ /* only want to search RAID components */
+ if (sdev->channel != 1)
+ continue;
+
+ /* The id is the raid PhysDiskNum, even if
+ * starget->id is the actual target address */
+ if(vtarget->id != disk)
+ continue;
+
+ starget_printk(KERN_INFO, vtarget->starget, MYIOC_s_FMT
+ "Integrated RAID requests DV of new device\n", ioc->name);
+ mptspi_dv_device(hd, sdev);
+ }
+ shost_printk(KERN_INFO, shost, MYIOC_s_FMT
+ "Integrated RAID detects new device %d\n", ioc->name, disk);
+ scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, 1);
+}
+
+
+static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk)
+{
+ struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ if (!wqw) {
+ shost_printk(KERN_ERR, ioc->sh, MYIOC_s_FMT
+ "Failed to act on RAID event for physical disk %d\n",
+ ioc->name, disk);
+ return;
+ }
+ INIT_WORK(&wqw->work, mpt_work_wrapper);
+ wqw->hd = hd;
+ wqw->disk = disk;
+
+ schedule_work(&wqw->work);
+}
+
+static int
+mptspi_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
+{
+ u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
+ struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+
+ if (ioc->bus_type != SPI)
+ return 0;
+
+ if (hd && event == MPI_EVENT_INTEGRATED_RAID) {
+ int reason
+ = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
+
+ if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
+ int disk = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
+ mpt_dv_raid(hd, disk);
+ }
+ }
+ return mptscsih_event_process(ioc, pEvReply);
+}
+
+static int
+mptspi_deny_binding(struct scsi_target *starget)
+{
+ struct _MPT_SCSI_HOST *hd =
+ (struct _MPT_SCSI_HOST *)dev_to_shost(starget->dev.parent)->hostdata;
+ return ((mptspi_is_raid(hd, starget->id)) &&
+ starget->channel == 0) ? 1 : 0;
+}
+
+static struct spi_function_template mptspi_transport_functions = {
+ .get_offset = mptspi_read_parameters,
+ .set_offset = mptspi_write_offset,
+ .show_offset = 1,
+ .get_period = mptspi_read_parameters,
+ .set_period = mptspi_write_period,
+ .show_period = 1,
+ .get_width = mptspi_read_parameters,
+ .set_width = mptspi_write_width,
+ .show_width = 1,
+ .get_iu = mptspi_read_parameters,
+ .set_iu = mptspi_write_iu,
+ .show_iu = 1,
+ .get_dt = mptspi_read_parameters,
+ .set_dt = mptspi_write_dt,
+ .show_dt = 1,
+ .get_qas = mptspi_read_parameters,
+ .set_qas = mptspi_write_qas,
+ .show_qas = 1,
+ .get_wr_flow = mptspi_read_parameters,
+ .set_wr_flow = mptspi_write_wr_flow,
+ .show_wr_flow = 1,
+ .get_rd_strm = mptspi_read_parameters,
+ .set_rd_strm = mptspi_write_rd_strm,
+ .show_rd_strm = 1,
+ .get_rti = mptspi_read_parameters,
+ .set_rti = mptspi_write_rti,
+ .show_rti = 1,
+ .get_pcomp_en = mptspi_read_parameters,
+ .set_pcomp_en = mptspi_write_pcomp_en,
+ .show_pcomp_en = 1,
+ .get_hold_mcs = mptspi_read_parameters,
+ .set_hold_mcs = mptspi_write_hold_mcs,
+ .show_hold_mcs = 1,
+ .deny_binding = mptspi_deny_binding,
+};
+
+/****************************************************************************
+ * Supported hardware
+ */
+
+static struct pci_device_id mptspi_pci_table[] = {
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1030,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_ATTO, MPI_MANUFACTPAGE_DEVID_53C1030,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_53C1035,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, mptspi_pci_table);
+
+
+/*
+ * renegotiate for a given target
+ */
+static void
+mptspi_dv_renegotiate_work(struct work_struct *work)
+{
+ struct work_queue_wrapper *wqw =
+ container_of(work, struct work_queue_wrapper, work);
+ struct _MPT_SCSI_HOST *hd = wqw->hd;
+ struct scsi_device *sdev;
+ struct scsi_target *starget;
+ struct _CONFIG_PAGE_SCSI_DEVICE_1 pg1;
+ u32 nego;
+ MPT_ADAPTER *ioc = hd->ioc;
+
+ kfree(wqw);
+
+ if (hd->spi_pending) {
+ shost_for_each_device(sdev, ioc->sh) {
+ if (hd->spi_pending & (1 << sdev->id))
+ continue;
+ starget = scsi_target(sdev);
+ nego = mptspi_getRP(starget);
+ pg1.RequestedParameters = cpu_to_le32(nego);
+ pg1.Reserved = 0;
+ pg1.Configuration = 0;
+ mptspi_write_spi_device_pg1(starget, &pg1);
+ }
+ } else {
+ shost_for_each_device(sdev, ioc->sh)
+ mptspi_dv_device(hd, sdev);
+ }
+}
+
+static void
+mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd)
+{
+ struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC);
+
+ if (!wqw)
+ return;
+
+ INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work);
+ wqw->hd = hd;
+
+ schedule_work(&wqw->work);
+}
+
+/*
+ * spi module reset handler
+ */
+static int
+mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+ int rc;
+
+ rc = mptscsih_ioc_reset(ioc, reset_phase);
+ if ((ioc->bus_type != SPI) || (!rc))
+ return rc;
+
+ /* only try to do a renegotiation if we're properly set up
+ * if we get an ioc fault on bringup, ioc->sh will be NULL */
+ if (reset_phase == MPT_IOC_POST_RESET &&
+ ioc->sh) {
+ struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+
+ mptspi_dv_renegotiate(hd);
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_PM
+/*
+ * spi module resume handler
+ */
+static int
+mptspi_resume(struct pci_dev *pdev)
+{
+ MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+ struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ int rc;
+
+ rc = mptscsih_resume(pdev);
+ mptspi_dv_renegotiate(hd);
+
+ return rc;
+}
+#endif
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * mptspi_probe - Installs scsi devices per bus.
+ * @pdev: Pointer to pci_dev structure
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct Scsi_Host *sh;
+ MPT_SCSI_HOST *hd;
+ MPT_ADAPTER *ioc;
+ unsigned long flags;
+ int ii;
+ int numSGE = 0;
+ int scale;
+ int ioc_cap;
+ int error=0;
+ int r;
+
+ if ((r = mpt_attach(pdev,id)) != 0)
+ return r;
+
+ ioc = pci_get_drvdata(pdev);
+ ioc->DoneCtx = mptspiDoneCtx;
+ ioc->TaskCtx = mptspiTaskCtx;
+ ioc->InternalCtx = mptspiInternalCtx;
+
+ /* Added sanity check on readiness of the MPT adapter.
+ */
+ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping because it's not operational!\n",
+ ioc->name);
+ error = -ENODEV;
+ goto out_mptspi_probe;
+ }
+
+ if (!ioc->active) {
+ printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
+ ioc->name);
+ error = -ENODEV;
+ goto out_mptspi_probe;
+ }
+
+ /* Sanity check - ensure at least 1 port is INITIATOR capable
+ */
+ ioc_cap = 0;
+ for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+ if (ioc->pfacts[ii].ProtocolFlags &
+ MPI_PORTFACTS_PROTOCOL_INITIATOR)
+ ioc_cap ++;
+ }
+
+ if (!ioc_cap) {
+ printk(MYIOC_s_WARN_FMT
+ "Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
+ ioc->name, ioc);
+ return 0;
+ }
+
+ sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST));
+
+ if (!sh) {
+ printk(MYIOC_s_WARN_FMT
+ "Unable to register controller with SCSI subsystem\n",
+ ioc->name);
+ error = -1;
+ goto out_mptspi_probe;
+ }
+
+ spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+ /* Attach the SCSI Host to the IOC structure
+ */
+ ioc->sh = sh;
+
+ sh->io_port = 0;
+ sh->n_io_port = 0;
+ sh->irq = 0;
+
+ /* set 16 byte cdb's */
+ sh->max_cmd_len = 16;
+
+ /* Yikes! This is important!
+ * Otherwise, by default, linux
+ * only scans target IDs 0-7!
+ * pfactsN->MaxDevices unreliable
+ * (not supported in early
+ * versions of the FW).
+ * max_id = 1 + actual max id,
+ * max_lun = 1 + actual last lun,
+ * see hosts.h :o(
+ */
+ sh->max_id = ioc->devices_per_bus;
+
+ sh->max_lun = MPT_LAST_LUN + 1;
+ /*
+ * If RAID Firmware Detected, setup virtual channel
+ */
+ if (ioc->ir_firmware)
+ sh->max_channel = 1;
+ else
+ sh->max_channel = 0;
+ sh->this_id = ioc->pfacts[0].PortSCSIID;
+
+ /* Required entry.
+ */
+ sh->unique_id = ioc->id;
+
+ /* Verify that we won't exceed the maximum
+ * number of chain buffers
+ * We can optimize: ZZ = req_sz/sizeof(SGE)
+ * For 32bit SGE's:
+ * numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
+ * + (req_sz - 64)/sizeof(SGE)
+ * A slightly different algorithm is required for
+ * 64bit SGEs.
+ */
+ scale = ioc->req_sz/ioc->SGE_size;
+ if (ioc->sg_addr_size == sizeof(u64)) {
+ numSGE = (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 60) / ioc->SGE_size;
+ } else {
+ numSGE = 1 + (scale - 1) *
+ (ioc->facts.MaxChainDepth-1) + scale +
+ (ioc->req_sz - 64) / ioc->SGE_size;
+ }
+
+ if (numSGE < sh->sg_tablesize) {
+ /* Reset this value */
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "Resetting sg_tablesize to %d from %d\n",
+ ioc->name, numSGE, sh->sg_tablesize));
+ sh->sg_tablesize = numSGE;
+ }
+
+ spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+ hd = shost_priv(sh);
+ hd->ioc = ioc;
+
+ /* SCSI needs scsi_cmnd lookup table!
+ * (with size equal to req_depth*PtrSz!)
+ */
+ ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
+ if (!ioc->ScsiLookup) {
+ error = -ENOMEM;
+ goto out_mptspi_probe;
+ }
+ spin_lock_init(&ioc->scsi_lookup_lock);
+
+ dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
+ ioc->name, ioc->ScsiLookup));
+
+ ioc->spi_data.Saf_Te = mpt_saf_te;
+ ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+ "saf_te %x\n",
+ ioc->name,
+ mpt_saf_te));
+ ioc->spi_data.noQas = 0;
+
+ hd->last_queue_full = 0;
+ hd->spi_pending = 0;
+
+ /* Some versions of the firmware don't support page 0; without
+ * that we can't get the parameters */
+ if (ioc->spi_data.sdp0length != 0)
+ sh->transportt = mptspi_transport_template;
+
+ error = scsi_add_host (sh, &ioc->pcidev->dev);
+ if(error) {
+ dprintk(ioc, printk(MYIOC_s_ERR_FMT
+ "scsi_add_host failed\n", ioc->name));
+ goto out_mptspi_probe;
+ }
+
+ /*
+ * issue internal bus reset
+ */
+ if (ioc->spi_data.bus_reset)
+ mptscsih_IssueTaskMgmt(hd,
+ MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+ 0, 0, 0, 0, 5);
+
+ scsi_scan_host(sh);
+ return 0;
+
+out_mptspi_probe:
+
+ mptscsih_remove(pdev);
+ return error;
+}
+
+static struct pci_driver mptspi_driver = {
+ .name = "mptspi",
+ .id_table = mptspi_pci_table,
+ .probe = mptspi_probe,
+ .remove = __devexit_p(mptscsih_remove),
+ .shutdown = mptscsih_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mptscsih_suspend,
+ .resume = mptspi_resume,
+#endif
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptspi_init - Register MPT adapter(s) as SCSI host(s) with SCSI mid-layer.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int __init
+mptspi_init(void)
+{
+ int error;
+
+ show_mptmod_ver(my_NAME, my_VERSION);
+
+ mptspi_transport_template = spi_attach_transport(&mptspi_transport_functions);
+ if (!mptspi_transport_template)
+ return -ENODEV;
+
+ mptspiDoneCtx = mpt_register(mptscsih_io_done, MPTSPI_DRIVER,
+ "mptscsih_io_done");
+ mptspiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSPI_DRIVER,
+ "mptscsih_taskmgmt_complete");
+ mptspiInternalCtx = mpt_register(mptscsih_scandv_complete,
+ MPTSPI_DRIVER, "mptscsih_scandv_complete");
+
+ mpt_event_register(mptspiDoneCtx, mptspi_event_process);
+ mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset);
+
+ error = pci_register_driver(&mptspi_driver);
+ if (error)
+ spi_release_transport(mptspi_transport_template);
+
+ return error;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ * mptspi_exit - Unregisters MPT adapter(s)
+ */
+static void __exit
+mptspi_exit(void)
+{
+ pci_unregister_driver(&mptspi_driver);
+
+ mpt_reset_deregister(mptspiDoneCtx);
+ mpt_event_deregister(mptspiDoneCtx);
+
+ mpt_deregister(mptspiInternalCtx);
+ mpt_deregister(mptspiTaskCtx);
+ mpt_deregister(mptspiDoneCtx);
+ spi_release_transport(mptspi_transport_template);
+}
+
+module_init(mptspi_init);
+module_exit(mptspi_exit);
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
new file mode 100644
index 00000000..5afa0e39
--- /dev/null
+++ b/drivers/message/i2o/Kconfig
@@ -0,0 +1,121 @@
+
+menuconfig I2O
+ tristate "I2O device support"
+ depends on PCI
+ ---help---
+ The Intelligent Input/Output (I2O) architecture allows hardware
+ drivers to be split into two parts: an operating system specific
+ module called the OSM and an hardware specific module called the
+ HDM. The OSM can talk to a whole range of HDM's, and ideally the
+ HDM's are not OS dependent. This allows for the same HDM driver to
+ be used under different operating systems if the relevant OSM is in
+ place. In order for this to work, you need to have an I2O interface
+ adapter card in your computer. This card contains a special I/O
+ processor (IOP), thus allowing high speeds since the CPU does not
+ have to deal with I/O.
+
+ If you say Y here, you will get a choice of interface adapter
+ drivers and OSM's with the following questions.
+
+ To compile this support as a module, choose M here: the
+ modules will be called i2o_core.
+
+ If unsure, say N.
+
+if I2O
+
+config I2O_LCT_NOTIFY_ON_CHANGES
+ bool "Enable LCT notification"
+ default y
+ ---help---
+ Only say N here if you have a I2O controller from SUN. The SUN
+ firmware doesn't support LCT notification on changes. If this option
+ is enabled on such a controller the driver will hang up in a endless
+ loop. On all other controllers say Y.
+
+ If unsure, say Y.
+
+config I2O_EXT_ADAPTEC
+ bool "Enable Adaptec extensions"
+ default y
+ ---help---
+ Say Y for support of raidutils for Adaptec I2O controllers. You also
+ have to say Y to "I2O Configuration support", "I2O SCSI OSM" below
+ and to "SCSI generic support" under "SCSI device configuration".
+
+config I2O_EXT_ADAPTEC_DMA64
+ bool "Enable 64-bit DMA"
+ depends on I2O_EXT_ADAPTEC && ( 64BIT || HIGHMEM64G )
+ default y
+ ---help---
+ Say Y for support of 64-bit DMA transfer mode on Adaptec I2O
+ controllers.
+ Note: You need at least firmware version 3709.
+
+config I2O_CONFIG
+ tristate "I2O Configuration support"
+ depends on VIRT_TO_BUS
+ ---help---
+ Say Y for support of the configuration interface for the I2O adapters.
+ If you have a RAID controller from Adaptec and you want to use the
+ raidutils to manage your RAID array, you have to say Y here.
+
+ To compile this support as a module, choose M here: the
+ module will be called i2o_config.
+
+ Note: If you want to use the new API you have to download the
+ i2o_config patch from http://i2o.shadowconnect.com/
+
+config I2O_CONFIG_OLD_IOCTL
+ bool "Enable ioctls (OBSOLETE)"
+ depends on I2O_CONFIG
+ default y
+ ---help---
+ Enables old ioctls.
+
+config I2O_BUS
+ tristate "I2O Bus Adapter OSM"
+ ---help---
+ Include support for the I2O Bus Adapter OSM. The Bus Adapter OSM
+ provides access to the busses on the I2O controller. The main purpose
+ is to rescan the bus to find new devices.
+
+ To compile this support as a module, choose M here: the
+ module will be called i2o_bus.
+
+config I2O_BLOCK
+ tristate "I2O Block OSM"
+ depends on BLOCK
+ ---help---
+ Include support for the I2O Block OSM. The Block OSM presents disk
+ and other structured block devices to the operating system. If you
+ are using an RAID controller, you could access the array only by
+ the Block OSM driver. But it is possible to access the single disks
+ by the SCSI OSM driver, for example to monitor the disks.
+
+ To compile this support as a module, choose M here: the
+ module will be called i2o_block.
+
+config I2O_SCSI
+ tristate "I2O SCSI OSM"
+ depends on SCSI
+ ---help---
+ Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel
+ I2O controller. You can use both the SCSI and Block OSM together if
+ you wish. To access a RAID array, you must use the Block OSM driver.
+ But you could use the SCSI OSM driver to monitor the single disks.
+
+ To compile this support as a module, choose M here: the
+ module will be called i2o_scsi.
+
+config I2O_PROC
+ tristate "I2O /proc support"
+ ---help---
+ If you say Y here and to "/proc file system support", you will be
+ able to read I2O related information from the virtual directory
+ /proc/i2o.
+
+ To compile this support as a module, choose M here: the
+ module will be called i2o_proc.
+
+endif # I2O
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile
new file mode 100644
index 00000000..b0982dac
--- /dev/null
+++ b/drivers/message/i2o/Makefile
@@ -0,0 +1,16 @@
+#
+# Makefile for the kernel I2O OSM.
+#
+# Note : at this point, these files are compiled on all systems.
+# In the future, some of these should be built conditionally.
+#
+
+i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o memory.o
+i2o_bus-y += bus-osm.o
+i2o_config-y += config-osm.o
+obj-$(CONFIG_I2O) += i2o_core.o
+obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o
+obj-$(CONFIG_I2O_BUS) += i2o_bus.o
+obj-$(CONFIG_I2O_BLOCK) += i2o_block.o
+obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o
+obj-$(CONFIG_I2O_PROC) += i2o_proc.o
diff --git a/drivers/message/i2o/README b/drivers/message/i2o/README
new file mode 100644
index 00000000..f072a8eb
--- /dev/null
+++ b/drivers/message/i2o/README
@@ -0,0 +1,98 @@
+
+ Linux I2O Support (c) Copyright 1999 Red Hat Software
+ and others.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License
+ as published by the Free Software Foundation; either version
+ 2 of the License, or (at your option) any later version.
+
+AUTHORS (so far)
+
+Alan Cox, Building Number Three Ltd.
+ Core code, SCSI and Block OSMs
+
+Steve Ralston, LSI Logic Corp.
+ Debugging SCSI and Block OSM
+
+Deepak Saxena, Intel Corp.
+ Various core/block extensions
+ /proc interface, bug fixes
+ Ioctl interfaces for control
+ Debugging LAN OSM
+
+Philip Rumpf
+ Fixed assorted dumb SMP locking bugs
+
+Juha Sievanen, University of Helsinki Finland
+ LAN OSM code
+ /proc interface to LAN class
+ Bug fixes
+ Core code extensions
+
+Auvo Häkkinen, University of Helsinki Finland
+ LAN OSM code
+ /Proc interface to LAN class
+ Bug fixes
+ Core code extensions
+
+Taneli Vähäkangas, University of Helsinki Finland
+ Fixes to i2o_config
+
+CREDITS
+
+ This work was made possible by
+
+Red Hat Software
+ Funding for the Building #3 part of the project
+
+Symbios Logic (Now LSI)
+ Host adapters, hints, known to work platforms when I hit
+ compatibility problems
+
+BoxHill Corporation
+ Loan of initial FibreChannel disk array used for development work.
+
+European Commission
+ Funding the work done by the University of Helsinki
+
+SysKonnect
+ Loan of FDDI and Gigabit Ethernet cards
+
+ASUSTeK
+ Loan of I2O motherboard
+
+STATUS:
+
+o The core setup works within limits.
+o The scsi layer seems to almost work.
+ I'm still chasing down the hang bug.
+o The block OSM is mostly functional
+o LAN OSM works with FDDI and Ethernet cards.
+
+TO DO:
+
+General:
+o Provide hidden address space if asked
+o Long term message flow control
+o PCI IOP's without interrupts are not supported yet
+o Push FAIL handling into the core
+o DDM control interfaces for module load etc
+o Add I2O 2.0 support (Deffered to 2.5 kernel)
+
+Block:
+o Multiple major numbers
+o Read ahead and cache handling stuff. Talk to Ingo and people
+o Power management
+o Finish Media changers
+
+SCSI:
+o Find the right way to associate drives/luns/busses
+
+Lan:
+o Performance tuning
+o Test Fibre Channel code
+
+Tape:
+o Anyone seen anything implementing this ?
+ (D.S: Will attempt to do so if spare cycles permit)
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl
new file mode 100644
index 00000000..5fb195af
--- /dev/null
+++ b/drivers/message/i2o/README.ioctl
@@ -0,0 +1,394 @@
+
+Linux I2O User Space Interface
+rev 0.3 - 04/20/99
+
+=============================================================================
+Originally written by Deepak Saxena(deepak@plexity.net)
+Currently maintained by Deepak Saxena(deepak@plexity.net)
+=============================================================================
+
+I. Introduction
+
+The Linux I2O subsystem provides a set of ioctl() commands that can be
+utilized by user space applications to communicate with IOPs and devices
+on individual IOPs. This document defines the specific ioctl() commands
+that are available to the user and provides examples of their uses.
+
+This document assumes the reader is familiar with or has access to the
+I2O specification as no I2O message parameters are outlined. For information
+on the specification, see http://www.i2osig.org
+
+This document and the I2O user space interface are currently maintained
+by Deepak Saxena. Please send all comments, errata, and bug fixes to
+deepak@csociety.purdue.edu
+
+II. IOP Access
+
+Access to the I2O subsystem is provided through the device file named
+/dev/i2o/ctl. This file is a character file with major number 10 and minor
+number 166. It can be created through the following command:
+
+ mknod /dev/i2o/ctl c 10 166
+
+III. Determining the IOP Count
+
+ SYNOPSIS
+
+ ioctl(fd, I2OGETIOPS, int *count);
+
+ u8 count[MAX_I2O_CONTROLLERS];
+
+ DESCRIPTION
+
+ This function returns the system's active IOP table. count should
+ point to a buffer containing MAX_I2O_CONTROLLERS entries. Upon
+ returning, each entry will contain a non-zero value if the given
+ IOP unit is active, and NULL if it is inactive or non-existent.
+
+ RETURN VALUE.
+
+ Returns 0 if no errors occur, and -1 otherwise. If an error occurs,
+ errno is set appropriately:
+
+ EFAULT Invalid user space pointer was passed
+
+IV. Getting Hardware Resource Table
+
+ SYNOPSIS
+
+ ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt);
+
+ struct i2o_cmd_hrtlct
+ {
+ u32 iop; /* IOP unit number */
+ void *resbuf; /* Buffer for result */
+ u32 *reslen; /* Buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function returns the Hardware Resource Table of the IOP specified
+ by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of
+ the data is written into *(hrt->reslen).
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriately:
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(hrt->reslen)
+
+V. Getting Logical Configuration Table
+
+ SYNOPSIS
+
+ ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct);
+
+ struct i2o_cmd_hrtlct
+ {
+ u32 iop; /* IOP unit number */
+ void *resbuf; /* Buffer for result */
+ u32 *reslen; /* Buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function returns the Logical Configuration Table of the IOP specified
+ by lct->iop in the buffer pointed to by lct->resbuf. The actual size of
+ the data is written into *(lct->reslen).
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriately:
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(lct->reslen)
+
+VI. Setting Parameters
+
+ SYNOPSIS
+
+ ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops);
+
+ struct i2o_cmd_psetget
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device TID */
+ void *opbuf; /* Operation List buffer */
+ u32 oplen; /* Operation List buffer length in bytes */
+ void *resbuf; /* Result List buffer */
+ u32 *reslen; /* Result List buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts a UtilParamsSet message to the device identified
+ by ops->iop and ops->tid. The operation list for the message is
+ sent through the ops->opbuf buffer, and the result list is written
+ into the buffer pointed to by ops->resbuf. The number of bytes
+ written is placed into *(ops->reslen).
+
+ RETURNS
+
+ The return value is the size in bytes of the data written into
+ ops->resbuf if no errors occur. If an error occurs, -1 is returned
+ and errno is set appropriatly:
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+
+ A return value of 0 does not mean that the value was actually
+ changed properly on the IOP. The user should check the result
+ list to determine the specific status of the transaction.
+
+VII. Getting Parameters
+
+ SYNOPSIS
+
+ ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops);
+
+ struct i2o_parm_setget
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device TID */
+ void *opbuf; /* Operation List buffer */
+ u32 oplen; /* Operation List buffer length in bytes */
+ void *resbuf; /* Result List buffer */
+ u32 *reslen; /* Result List buffer length in bytes */
+ };
+
+ DESCRIPTION
+
+ This function posts a UtilParamsGet message to the device identified
+ by ops->iop and ops->tid. The operation list for the message is
+ sent through the ops->opbuf buffer, and the result list is written
+ into the buffer pointed to by ops->resbuf. The actual size of data
+ written is placed into *(ops->reslen).
+
+ RETURNS
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+
+ A return value of 0 does not mean that the value was actually
+ properly retrieved. The user should check the result list
+ to determine the specific status of the transaction.
+
+VIII. Downloading Software
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 flags; /* DownloadFlags field */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Pointer to software buffer */
+ u32 *swlen; /* Length of software buffer */
+ u32 *maxfrag; /* Number of fragments */
+ u32 *curfrag; /* Current fragment number */
+ };
+
+ DESCRIPTION
+
+ This function downloads a software fragment pointed by sw->buf
+ to the iop identified by sw->iop. The DownloadFlags, SwID, SwType
+ and SwSize fields of the ExecSwDownload message are filled in with
+ the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen).
+
+ The fragments _must_ be sent in order and be 8K in size. The last
+ fragment _may_ be shorter, however. The kernel will compute its
+ size based on information in the sw->swlen field.
+
+ Please note that SW transfers can take a long time.
+
+ RETURNS
+
+ This function returns 0 no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+
+IX. Uploading Software
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 flags; /* UploadFlags */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Pointer to software buffer */
+ u32 *swlen; /* Length of software buffer */
+ u32 *maxfrag; /* Number of fragments */
+ u32 *curfrag; /* Current fragment number */
+ };
+
+ DESCRIPTION
+
+ This function uploads a software fragment from the IOP identified
+ by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields.
+ The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload
+ message are filled in with the values of sw->flags, sw->sw_id,
+ sw->sw_type and *(sw->swlen).
+
+ The fragments _must_ be requested in order and be 8K in size. The
+ user is responsible for allocating memory pointed by sw->buf. The
+ last fragment _may_ be shorter.
+
+ Please note that SW transfers can take a long time.
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+
+X. Removing Software
+
+ SYNOPSIS
+
+ ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw);
+
+ struct i2o_sw_xfer
+ {
+ u32 iop; /* IOP unit number */
+ u8 flags; /* RemoveFlags */
+ u8 sw_type; /* Software type */
+ u32 sw_id; /* Software ID */
+ void *buf; /* Unused */
+ u32 *swlen; /* Length of the software data */
+ u32 *maxfrag; /* Unused */
+ u32 *curfrag; /* Unused */
+ };
+
+ DESCRIPTION
+
+ This function removes software from the IOP identified by sw->iop.
+ The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message
+ are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and
+ *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses
+ *(sw->swlen) value to verify correct identication of the module to remove.
+ The actual size of the module is written into *(sw->swlen).
+
+ RETURNS
+
+ This function returns 0 if no errors occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+
+X. Validating Configuration
+
+ SYNOPSIS
+
+ ioctl(fd, I2OVALIDATE, int *iop);
+ u32 iop;
+
+ DESCRIPTION
+
+ This function posts an ExecConfigValidate message to the controller
+ identified by iop. This message indicates that the current
+ configuration is accepted. The iop changes the status of suspect drivers
+ to valid and may delete old drivers from its store.
+
+ RETURNS
+
+ This function returns 0 if no erro occur. If an error occurs, -1 is
+ returned and errno is set appropriatly:
+
+ ETIMEDOUT Timeout waiting for reply message
+ ENXIO Invalid IOP number
+
+XI. Configuration Dialog
+
+ SYNOPSIS
+
+ ioctl(fd, I2OHTML, struct i2o_html *htquery);
+ struct i2o_html
+ {
+ u32 iop; /* IOP unit number */
+ u32 tid; /* Target device ID */
+ u32 page; /* HTML page */
+ void *resbuf; /* Buffer for reply HTML page */
+ u32 *reslen; /* Length in bytes of reply buffer */
+ void *qbuf; /* Pointer to HTTP query string */
+ u32 qlen; /* Length in bytes of query string buffer */
+ };
+
+ DESCRIPTION
+
+ This function posts an UtilConfigDialog message to the device identified
+ by htquery->iop and htquery->tid. The requested HTML page number is
+ provided by the htquery->page field, and the resultant data is stored
+ in the buffer pointed to by htquery->resbuf. If there is an HTTP query
+ string that is to be sent to the device, it should be sent in the buffer
+ pointed to by htquery->qbuf. If there is no query string, this field
+ should be set to NULL. The actual size of the reply received is written
+ into *(htquery->reslen).
+
+ RETURNS
+
+ This function returns 0 if no error occur. If an error occurs, -1
+ is returned and errno is set appropriatly:
+
+ EFAULT Invalid user space pointer was passed
+ ENXIO Invalid IOP number
+ ENOBUFS Buffer not large enough. If this occurs, the required
+ buffer length is written into *(ops->reslen)
+ ETIMEDOUT Timeout waiting for reply message
+ ENOMEM Kernel memory allocation error
+
+XII. Events
+
+ In the process of determining this. Current idea is to have use
+ the select() interface to allow user apps to periodically poll
+ the /dev/i2o/ctl device for events. When select() notifies the user
+ that an event is available, the user would call read() to retrieve
+ a list of all the events that are pending for the specific device.
+
+=============================================================================
+Revision History
+=============================================================================
+
+Rev 0.1 - 04/01/99
+- Initial revision
+
+Rev 0.2 - 04/06/99
+- Changed return values to match UNIX ioctl() standard. Only return values
+ are 0 and -1. All errors are reported through errno.
+- Added summary of proposed possible event interfaces
+
+Rev 0.3 - 04/20/99
+- Changed all ioctls() to use pointers to user data instead of actual data
+- Updated error values to match the code
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
new file mode 100644
index 00000000..c463dc2e
--- /dev/null
+++ b/drivers/message/i2o/bus-osm.c
@@ -0,0 +1,176 @@
+/*
+ * Bus Adapter OSM
+ *
+ * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Fixes/additions:
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>
+ * initial version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+
+#define OSM_NAME "bus-osm"
+#define OSM_VERSION "1.317"
+#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
+
+static struct i2o_driver i2o_bus_driver;
+
+/* Bus OSM class handling definition */
+static struct i2o_class_id i2o_bus_class_id[] = {
+ {I2O_CLASS_BUS_ADAPTER},
+ {I2O_CLASS_END}
+};
+
+/**
+ * i2o_bus_scan - Scan the bus for new devices
+ * @dev: I2O device of the bus, which should be scanned
+ *
+ * Scans the bus dev for new / removed devices. After the scan a new LCT
+ * will be fetched automatically.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_bus_scan(struct i2o_device *dev)
+{
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return -ETIMEDOUT;
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
+ tid);
+
+ return i2o_msg_post_wait(dev->iop, msg, 60);
+};
+
+/**
+ * i2o_bus_store_scan - Scan the I2O Bus Adapter
+ * @d: device which should be scanned
+ * @attr: device_attribute
+ * @buf: output buffer
+ * @count: buffer size
+ *
+ * Returns count.
+ */
+static ssize_t i2o_bus_store_scan(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(d);
+ int rc;
+
+ if ((rc = i2o_bus_scan(i2o_dev)))
+ osm_warn("bus scan failed %d\n", rc);
+
+ return count;
+}
+
+/* Bus Adapter OSM device attributes */
+static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan);
+
+/**
+ * i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it
+ * @dev: device to verify if it is a I2O Bus Adapter device
+ *
+ * Because we want all Bus Adapters always return 0.
+ * Except when we fail. Then we are sad.
+ *
+ * Returns 0, except when we fail to excel.
+ */
+static int i2o_bus_probe(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(get_device(dev));
+ int rc;
+
+ rc = device_create_file(dev, &dev_attr_scan);
+ if (rc)
+ goto err_out;
+
+ osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid);
+
+ return 0;
+
+err_out:
+ put_device(dev);
+ return rc;
+};
+
+/**
+ * i2o_bus_remove - remove the I2O Bus Adapter device from the system again
+ * @dev: I2O Bus Adapter device which should be removed
+ *
+ * Always returns 0.
+ */
+static int i2o_bus_remove(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+
+ device_remove_file(dev, &dev_attr_scan);
+
+ put_device(dev);
+
+ osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid);
+
+ return 0;
+};
+
+/* Bus Adapter OSM driver struct */
+static struct i2o_driver i2o_bus_driver = {
+ .name = OSM_NAME,
+ .classes = i2o_bus_class_id,
+ .driver = {
+ .probe = i2o_bus_probe,
+ .remove = i2o_bus_remove,
+ },
+};
+
+/**
+ * i2o_bus_init - Bus Adapter OSM initialization function
+ *
+ * Only register the Bus Adapter OSM in the I2O core.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_bus_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+ /* Register Bus Adapter OSM into I2O core */
+ rc = i2o_driver_register(&i2o_bus_driver);
+ if (rc) {
+ osm_err("Could not register Bus Adapter OSM\n");
+ return rc;
+ }
+
+ return 0;
+};
+
+/**
+ * i2o_bus_exit - Bus Adapter OSM exit function
+ *
+ * Unregisters Bus Adapter OSM from I2O core.
+ */
+static void __exit i2o_bus_exit(void)
+{
+ i2o_driver_unregister(&i2o_bus_driver);
+};
+
+MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_bus_init);
+module_exit(i2o_bus_exit);
diff --git a/drivers/message/i2o/config-osm.c b/drivers/message/i2o/config-osm.c
new file mode 100644
index 00000000..3bba7aa8
--- /dev/null
+++ b/drivers/message/i2o/config-osm.c
@@ -0,0 +1,90 @@
+/*
+ * Configuration OSM
+ *
+ * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Fixes/additions:
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>
+ * initial version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
+
+#include <asm/uaccess.h>
+
+#define OSM_NAME "config-osm"
+#define OSM_VERSION "1.323"
+#define OSM_DESCRIPTION "I2O Configuration OSM"
+
+/* access mode user rw */
+#define S_IWRSR (S_IRUSR | S_IWUSR)
+
+static struct i2o_driver i2o_config_driver;
+
+/* Config OSM driver struct */
+static struct i2o_driver i2o_config_driver = {
+ .name = OSM_NAME,
+};
+
+#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
+#include "i2o_config.c"
+#endif
+
+/**
+ * i2o_config_init - Configuration OSM initialization function
+ *
+ * Registers Configuration OSM in the I2O core and if old ioctl's are
+ * compiled in initialize them.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_config_init(void)
+{
+ printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+ if (i2o_driver_register(&i2o_config_driver)) {
+ osm_err("handler register failed.\n");
+ return -EBUSY;
+ }
+#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
+ if (i2o_config_old_init()) {
+ osm_err("old config handler initialization failed\n");
+ i2o_driver_unregister(&i2o_config_driver);
+ return -EBUSY;
+ }
+#endif
+
+ return 0;
+}
+
+/**
+ * i2o_config_exit - Configuration OSM exit function
+ *
+ * If old ioctl's are compiled in exit remove them and unregisters
+ * Configuration OSM from I2O core.
+ */
+static void i2o_config_exit(void)
+{
+#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
+ i2o_config_old_exit();
+#endif
+
+ i2o_driver_unregister(&i2o_config_driver);
+}
+
+MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_config_init);
+module_exit(i2o_config_exit);
diff --git a/drivers/message/i2o/core.h b/drivers/message/i2o/core.h
new file mode 100644
index 00000000..cbe384fb
--- /dev/null
+++ b/drivers/message/i2o/core.h
@@ -0,0 +1,69 @@
+/*
+ * I2O core internal declarations
+ *
+ * Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Fixes/additions:
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>
+ * initial version.
+ */
+
+/* Exec-OSM */
+extern struct i2o_driver i2o_exec_driver;
+extern int i2o_exec_lct_get(struct i2o_controller *);
+
+extern int __init i2o_exec_init(void);
+extern void i2o_exec_exit(void);
+
+/* driver */
+extern struct bus_type i2o_bus_type;
+
+extern int i2o_driver_dispatch(struct i2o_controller *, u32);
+
+extern int __init i2o_driver_init(void);
+extern void i2o_driver_exit(void);
+
+/* PCI */
+extern int __init i2o_pci_init(void);
+extern void __exit i2o_pci_exit(void);
+
+/* device */
+extern struct device_attribute i2o_device_attrs[];
+
+extern void i2o_device_remove(struct i2o_device *);
+extern int i2o_device_parse_lct(struct i2o_controller *);
+
+int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
+ int oplen, void *reslist, int reslen);
+
+/* IOP */
+extern struct i2o_controller *i2o_iop_alloc(void);
+
+/**
+ * i2o_iop_free - Free the i2o_controller struct
+ * @c: I2O controller to free
+ */
+static inline void i2o_iop_free(struct i2o_controller *c)
+{
+ i2o_pool_free(&c->in_msg);
+ kfree(c);
+}
+
+extern int i2o_iop_add(struct i2o_controller *);
+extern void i2o_iop_remove(struct i2o_controller *);
+
+/* control registers relative to c->base */
+#define I2O_IRQ_STATUS 0x30
+#define I2O_IRQ_MASK 0x34
+#define I2O_IN_PORT 0x40
+#define I2O_OUT_PORT 0x44
+
+/* Motorola/Freescale specific register offset */
+#define I2O_MOTOROLA_PORT_OFFSET 0x10400
+
+#define I2O_IRQ_OUTBOUND_POST 0x00000008
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
new file mode 100644
index 00000000..ce62d8bf
--- /dev/null
+++ b/drivers/message/i2o/debug.c
@@ -0,0 +1,472 @@
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+
+static void i2o_report_util_cmd(u8 cmd);
+static void i2o_report_exec_cmd(u8 cmd);
+static void i2o_report_fail_status(u8 req_status, u32 * msg);
+static void i2o_report_common_status(u8 req_status);
+static void i2o_report_common_dsc(u16 detailed_status);
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Report Cmd name, Request status, Detailed Status.
+ */
+void i2o_report_status(const char *severity, const char *str,
+ struct i2o_message *m)
+{
+ u32 *msg = (u32 *) m;
+ u8 cmd = (msg[1] >> 24) & 0xFF;
+ u8 req_status = (msg[4] >> 24) & 0xFF;
+ u16 detailed_status = msg[4] & 0xFFFF;
+
+ if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
+ return; // No status in this reply
+
+ printk("%s%s: ", severity, str);
+
+ if (cmd < 0x1F) // Utility cmd
+ i2o_report_util_cmd(cmd);
+
+ else if (cmd >= 0xA0 && cmd <= 0xEF) // Executive cmd
+ i2o_report_exec_cmd(cmd);
+ else
+ printk("Cmd = %0#2x, ", cmd); // Other cmds
+
+ if (msg[0] & MSG_FAIL) {
+ i2o_report_fail_status(req_status, msg);
+ return;
+ }
+
+ i2o_report_common_status(req_status);
+
+ if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
+ i2o_report_common_dsc(detailed_status);
+ else
+ printk(" / DetailedStatus = %0#4x.\n",
+ detailed_status);
+}
+
+/* Used to dump a message to syslog during debugging */
+void i2o_dump_message(struct i2o_message *m)
+{
+#ifdef DEBUG
+ u32 *msg = (u32 *) m;
+ int i;
+ printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
+ msg[0] >> 16 & 0xffff, msg);
+ for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++)
+ printk(KERN_INFO " msg[%d] = %0#10x\n", i, msg[i]);
+#endif
+}
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Following fail status are common to all classes.
+ * The preserved message must be handled in the reply handler.
+ */
+static void i2o_report_fail_status(u8 req_status, u32 * msg)
+{
+ static char *FAIL_STATUS[] = {
+ "0x80", /* not used */
+ "SERVICE_SUSPENDED", /* 0x81 */
+ "SERVICE_TERMINATED", /* 0x82 */
+ "CONGESTION",
+ "FAILURE",
+ "STATE_ERROR",
+ "TIME_OUT",
+ "ROUTING_FAILURE",
+ "INVALID_VERSION",
+ "INVALID_OFFSET",
+ "INVALID_MSG_FLAGS",
+ "FRAME_TOO_SMALL",
+ "FRAME_TOO_LARGE",
+ "INVALID_TARGET_ID",
+ "INVALID_INITIATOR_ID",
+ "INVALID_INITIATOR_CONTEX", /* 0x8F */
+ "UNKNOWN_FAILURE" /* 0xFF */
+ };
+
+ if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
+ printk("TRANSPORT_UNKNOWN_FAILURE (%0#2x).\n",
+ req_status);
+ else
+ printk("TRANSPORT_%s.\n",
+ FAIL_STATUS[req_status & 0x0F]);
+
+ /* Dump some details */
+
+ printk(KERN_ERR " InitiatorId = %d, TargetId = %d\n",
+ (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
+ printk(KERN_ERR " LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
+ (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
+ printk(KERN_ERR " FailingHostUnit = 0x%04X, FailingIOP = 0x%03X\n",
+ msg[5] >> 16, msg[5] & 0xFFF);
+
+ printk(KERN_ERR " Severity: 0x%02X\n", (msg[4] >> 16) & 0xFF);
+ if (msg[4] & (1 << 16))
+ printk(KERN_DEBUG "(FormatError), "
+ "this msg can never be delivered/processed.\n");
+ if (msg[4] & (1 << 17))
+ printk(KERN_DEBUG "(PathError), "
+ "this msg can no longer be delivered/processed.\n");
+ if (msg[4] & (1 << 18))
+ printk(KERN_DEBUG "(PathState), "
+ "the system state does not allow delivery.\n");
+ if (msg[4] & (1 << 19))
+ printk(KERN_DEBUG
+ "(Congestion), resources temporarily not available;"
+ "do not retry immediately.\n");
+}
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Following reply status are common to all classes.
+ */
+static void i2o_report_common_status(u8 req_status)
+{
+ static char *REPLY_STATUS[] = {
+ "SUCCESS",
+ "ABORT_DIRTY",
+ "ABORT_NO_DATA_TRANSFER",
+ "ABORT_PARTIAL_TRANSFER",
+ "ERROR_DIRTY",
+ "ERROR_NO_DATA_TRANSFER",
+ "ERROR_PARTIAL_TRANSFER",
+ "PROCESS_ABORT_DIRTY",
+ "PROCESS_ABORT_NO_DATA_TRANSFER",
+ "PROCESS_ABORT_PARTIAL_TRANSFER",
+ "TRANSACTION_ERROR",
+ "PROGRESS_REPORT"
+ };
+
+ if (req_status >= ARRAY_SIZE(REPLY_STATUS))
+ printk("RequestStatus = %0#2x", req_status);
+ else
+ printk("%s", REPLY_STATUS[req_status]);
+}
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Following detailed status are valid for executive class,
+ * utility class, DDM class and for transaction error replies.
+ */
+static void i2o_report_common_dsc(u16 detailed_status)
+{
+ static char *COMMON_DSC[] = {
+ "SUCCESS",
+ "0x01", // not used
+ "BAD_KEY",
+ "TCL_ERROR",
+ "REPLY_BUFFER_FULL",
+ "NO_SUCH_PAGE",
+ "INSUFFICIENT_RESOURCE_SOFT",
+ "INSUFFICIENT_RESOURCE_HARD",
+ "0x08", // not used
+ "CHAIN_BUFFER_TOO_LARGE",
+ "UNSUPPORTED_FUNCTION",
+ "DEVICE_LOCKED",
+ "DEVICE_RESET",
+ "INAPPROPRIATE_FUNCTION",
+ "INVALID_INITIATOR_ADDRESS",
+ "INVALID_MESSAGE_FLAGS",
+ "INVALID_OFFSET",
+ "INVALID_PARAMETER",
+ "INVALID_REQUEST",
+ "INVALID_TARGET_ADDRESS",
+ "MESSAGE_TOO_LARGE",
+ "MESSAGE_TOO_SMALL",
+ "MISSING_PARAMETER",
+ "TIMEOUT",
+ "UNKNOWN_ERROR",
+ "UNKNOWN_FUNCTION",
+ "UNSUPPORTED_VERSION",
+ "DEVICE_BUSY",
+ "DEVICE_NOT_AVAILABLE"
+ };
+
+ if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
+ printk(" / DetailedStatus = %0#4x.\n",
+ detailed_status);
+ else
+ printk(" / %s.\n", COMMON_DSC[detailed_status]);
+}
+
+/*
+ * Used for error reporting/debugging purposes
+ */
+static void i2o_report_util_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case I2O_CMD_UTIL_NOP:
+ printk("UTIL_NOP, ");
+ break;
+ case I2O_CMD_UTIL_ABORT:
+ printk("UTIL_ABORT, ");
+ break;
+ case I2O_CMD_UTIL_CLAIM:
+ printk("UTIL_CLAIM, ");
+ break;
+ case I2O_CMD_UTIL_RELEASE:
+ printk("UTIL_CLAIM_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_CONFIG_DIALOG:
+ printk("UTIL_CONFIG_DIALOG, ");
+ break;
+ case I2O_CMD_UTIL_DEVICE_RESERVE:
+ printk("UTIL_DEVICE_RESERVE, ");
+ break;
+ case I2O_CMD_UTIL_DEVICE_RELEASE:
+ printk("UTIL_DEVICE_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_EVT_ACK:
+ printk("UTIL_EVENT_ACKNOWLEDGE, ");
+ break;
+ case I2O_CMD_UTIL_EVT_REGISTER:
+ printk("UTIL_EVENT_REGISTER, ");
+ break;
+ case I2O_CMD_UTIL_LOCK:
+ printk("UTIL_LOCK, ");
+ break;
+ case I2O_CMD_UTIL_LOCK_RELEASE:
+ printk("UTIL_LOCK_RELEASE, ");
+ break;
+ case I2O_CMD_UTIL_PARAMS_GET:
+ printk("UTIL_PARAMS_GET, ");
+ break;
+ case I2O_CMD_UTIL_PARAMS_SET:
+ printk("UTIL_PARAMS_SET, ");
+ break;
+ case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
+ printk("UTIL_REPLY_FAULT_NOTIFY, ");
+ break;
+ default:
+ printk("Cmd = %0#2x, ", cmd);
+ }
+}
+
+/*
+ * Used for error reporting/debugging purposes
+ */
+static void i2o_report_exec_cmd(u8 cmd)
+{
+ switch (cmd) {
+ case I2O_CMD_ADAPTER_ASSIGN:
+ printk("EXEC_ADAPTER_ASSIGN, ");
+ break;
+ case I2O_CMD_ADAPTER_READ:
+ printk("EXEC_ADAPTER_READ, ");
+ break;
+ case I2O_CMD_ADAPTER_RELEASE:
+ printk("EXEC_ADAPTER_RELEASE, ");
+ break;
+ case I2O_CMD_BIOS_INFO_SET:
+ printk("EXEC_BIOS_INFO_SET, ");
+ break;
+ case I2O_CMD_BOOT_DEVICE_SET:
+ printk("EXEC_BOOT_DEVICE_SET, ");
+ break;
+ case I2O_CMD_CONFIG_VALIDATE:
+ printk("EXEC_CONFIG_VALIDATE, ");
+ break;
+ case I2O_CMD_CONN_SETUP:
+ printk("EXEC_CONN_SETUP, ");
+ break;
+ case I2O_CMD_DDM_DESTROY:
+ printk("EXEC_DDM_DESTROY, ");
+ break;
+ case I2O_CMD_DDM_ENABLE:
+ printk("EXEC_DDM_ENABLE, ");
+ break;
+ case I2O_CMD_DDM_QUIESCE:
+ printk("EXEC_DDM_QUIESCE, ");
+ break;
+ case I2O_CMD_DDM_RESET:
+ printk("EXEC_DDM_RESET, ");
+ break;
+ case I2O_CMD_DDM_SUSPEND:
+ printk("EXEC_DDM_SUSPEND, ");
+ break;
+ case I2O_CMD_DEVICE_ASSIGN:
+ printk("EXEC_DEVICE_ASSIGN, ");
+ break;
+ case I2O_CMD_DEVICE_RELEASE:
+ printk("EXEC_DEVICE_RELEASE, ");
+ break;
+ case I2O_CMD_HRT_GET:
+ printk("EXEC_HRT_GET, ");
+ break;
+ case I2O_CMD_ADAPTER_CLEAR:
+ printk("EXEC_IOP_CLEAR, ");
+ break;
+ case I2O_CMD_ADAPTER_CONNECT:
+ printk("EXEC_IOP_CONNECT, ");
+ break;
+ case I2O_CMD_ADAPTER_RESET:
+ printk("EXEC_IOP_RESET, ");
+ break;
+ case I2O_CMD_LCT_NOTIFY:
+ printk("EXEC_LCT_NOTIFY, ");
+ break;
+ case I2O_CMD_OUTBOUND_INIT:
+ printk("EXEC_OUTBOUND_INIT, ");
+ break;
+ case I2O_CMD_PATH_ENABLE:
+ printk("EXEC_PATH_ENABLE, ");
+ break;
+ case I2O_CMD_PATH_QUIESCE:
+ printk("EXEC_PATH_QUIESCE, ");
+ break;
+ case I2O_CMD_PATH_RESET:
+ printk("EXEC_PATH_RESET, ");
+ break;
+ case I2O_CMD_STATIC_MF_CREATE:
+ printk("EXEC_STATIC_MF_CREATE, ");
+ break;
+ case I2O_CMD_STATIC_MF_RELEASE:
+ printk("EXEC_STATIC_MF_RELEASE, ");
+ break;
+ case I2O_CMD_STATUS_GET:
+ printk("EXEC_STATUS_GET, ");
+ break;
+ case I2O_CMD_SW_DOWNLOAD:
+ printk("EXEC_SW_DOWNLOAD, ");
+ break;
+ case I2O_CMD_SW_UPLOAD:
+ printk("EXEC_SW_UPLOAD, ");
+ break;
+ case I2O_CMD_SW_REMOVE:
+ printk("EXEC_SW_REMOVE, ");
+ break;
+ case I2O_CMD_SYS_ENABLE:
+ printk("EXEC_SYS_ENABLE, ");
+ break;
+ case I2O_CMD_SYS_MODIFY:
+ printk("EXEC_SYS_MODIFY, ");
+ break;
+ case I2O_CMD_SYS_QUIESCE:
+ printk("EXEC_SYS_QUIESCE, ");
+ break;
+ case I2O_CMD_SYS_TAB_SET:
+ printk("EXEC_SYS_TAB_SET, ");
+ break;
+ default:
+ printk("Cmd = %#02x, ", cmd);
+ }
+}
+
+void i2o_debug_state(struct i2o_controller *c)
+{
+ printk(KERN_INFO "%s: State = ", c->name);
+ switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
+ case 0x01:
+ printk("INIT\n");
+ break;
+ case 0x02:
+ printk("RESET\n");
+ break;
+ case 0x04:
+ printk("HOLD\n");
+ break;
+ case 0x05:
+ printk("READY\n");
+ break;
+ case 0x08:
+ printk("OPERATIONAL\n");
+ break;
+ case 0x10:
+ printk("FAILED\n");
+ break;
+ case 0x11:
+ printk("FAULTED\n");
+ break;
+ default:
+ printk("%x (unknown !!)\n",
+ ((i2o_status_block *) c->status_block.virt)->iop_state);
+ }
+};
+
+void i2o_dump_hrt(struct i2o_controller *c)
+{
+ u32 *rows = (u32 *) c->hrt.virt;
+ u8 *p = (u8 *) c->hrt.virt;
+ u8 *d;
+ int count;
+ int length;
+ int i;
+ int state;
+
+ if (p[3] != 0) {
+ printk(KERN_ERR
+ "%s: HRT table for controller is too new a version.\n",
+ c->name);
+ return;
+ }
+
+ count = p[0] | (p[1] << 8);
+ length = p[2];
+
+ printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
+ c->name, count, length << 2);
+
+ rows += 2;
+
+ for (i = 0; i < count; i++) {
+ printk(KERN_INFO "Adapter %08X: ", rows[0]);
+ p = (u8 *) (rows + 1);
+ d = (u8 *) (rows + 2);
+ state = p[1] << 8 | p[0];
+
+ printk("TID %04X:[", state & 0xFFF);
+ state >>= 12;
+ if (state & (1 << 0))
+ printk("H"); /* Hidden */
+ if (state & (1 << 2)) {
+ printk("P"); /* Present */
+ if (state & (1 << 1))
+ printk("C"); /* Controlled */
+ }
+ if (state > 9)
+ printk("*"); /* Hard */
+
+ printk("]:");
+
+ switch (p[3] & 0xFFFF) {
+ case 0:
+ /* Adapter private bus - easy */
+ printk("Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2],
+ d[1] << 8 | d[0], *(u32 *) (d + 4));
+ break;
+ case 1:
+ /* ISA bus */
+ printk("ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2],
+ d[2], d[1] << 8 | d[0], *(u32 *) (d + 4));
+ break;
+
+ case 2: /* EISA bus */
+ printk("EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
+ p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
+ break;
+
+ case 3: /* MCA bus */
+ printk("MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2],
+ d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
+ break;
+
+ case 4: /* PCI bus */
+ printk("PCI %d: Bus %d Device %d Function %d", p[2],
+ d[2], d[1], d[0]);
+ break;
+
+ case 0x80: /* Other */
+ default:
+ printk("Unsupported bus type.");
+ break;
+ }
+ printk("\n");
+ rows += length;
+ }
+}
+
+EXPORT_SYMBOL(i2o_dump_message);
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
new file mode 100644
index 00000000..4547db99
--- /dev/null
+++ b/drivers/message/i2o/device.c
@@ -0,0 +1,584 @@
+/*
+ * Functions to handle I2O devices
+ *
+ * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Fixes/additions:
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>
+ * initial version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "core.h"
+
+/**
+ * i2o_device_issue_claim - claim or release a device
+ * @dev: I2O device to claim or release
+ * @cmd: claim or release command
+ * @type: type of claim
+ *
+ * Issue I2O UTIL_CLAIM or UTIL_RELEASE messages. The message to be sent
+ * is set by cmd. dev is the I2O device which should be claim or
+ * released and the type is the claim type (see the I2O spec).
+ *
+ * Returs 0 on success or negative error code on failure.
+ */
+static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
+ u32 type)
+{
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
+ msg->body[0] = cpu_to_le32(type);
+
+ return i2o_msg_post_wait(dev->iop, msg, 60);
+}
+
+/**
+ * i2o_device_claim - claim a device for use by an OSM
+ * @dev: I2O device to claim
+ *
+ * Do the leg work to assign a device to a given OSM. If the claim succeeds,
+ * the owner is the primary. If the attempt fails a negative errno code
+ * is returned. On success zero is returned.
+ */
+int i2o_device_claim(struct i2o_device *dev)
+{
+ int rc = 0;
+
+ mutex_lock(&dev->lock);
+
+ rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY);
+ if (!rc)
+ pr_debug("i2o: claim of device %d succeeded\n",
+ dev->lct_data.tid);
+ else
+ pr_debug("i2o: claim of device %d failed %d\n",
+ dev->lct_data.tid, rc);
+
+ mutex_unlock(&dev->lock);
+
+ return rc;
+}
+
+/**
+ * i2o_device_claim_release - release a device that the OSM is using
+ * @dev: device to release
+ *
+ * Drop a claim by an OSM on a given I2O device.
+ *
+ * AC - some devices seem to want to refuse an unclaim until they have
+ * finished internal processing. It makes sense since you don't want a
+ * new device to go reconfiguring the entire system until you are done.
+ * Thus we are prepared to wait briefly.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_device_claim_release(struct i2o_device *dev)
+{
+ int tries;
+ int rc = 0;
+
+ mutex_lock(&dev->lock);
+
+ /*
+ * If the controller takes a nonblocking approach to
+ * releases we have to sleep/poll for a few times.
+ */
+ for (tries = 0; tries < 10; tries++) {
+ rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE,
+ I2O_CLAIM_PRIMARY);
+ if (!rc)
+ break;
+
+ ssleep(1);
+ }
+
+ if (!rc)
+ pr_debug("i2o: claim release of device %d succeeded\n",
+ dev->lct_data.tid);
+ else
+ pr_debug("i2o: claim release of device %d failed %d\n",
+ dev->lct_data.tid, rc);
+
+ mutex_unlock(&dev->lock);
+
+ return rc;
+}
+
+/**
+ * i2o_device_release - release the memory for a I2O device
+ * @dev: I2O device which should be released
+ *
+ * Release the allocated memory. This function is called if refcount of
+ * device reaches 0 automatically.
+ */
+static void i2o_device_release(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+
+ pr_debug("i2o: device %s released\n", dev_name(dev));
+
+ kfree(i2o_dev);
+}
+
+/**
+ * i2o_device_show_class_id - Displays class id of I2O device
+ * @dev: device of which the class id should be displayed
+ * @attr: pointer to device attribute
+ * @buf: buffer into which the class id should be printed
+ *
+ * Returns the number of bytes which are printed into the buffer.
+ */
+static ssize_t i2o_device_show_class_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+
+ sprintf(buf, "0x%03x\n", i2o_dev->lct_data.class_id);
+ return strlen(buf) + 1;
+}
+
+/**
+ * i2o_device_show_tid - Displays TID of I2O device
+ * @dev: device of which the TID should be displayed
+ * @attr: pointer to device attribute
+ * @buf: buffer into which the TID should be printed
+ *
+ * Returns the number of bytes which are printed into the buffer.
+ */
+static ssize_t i2o_device_show_tid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+
+ sprintf(buf, "0x%03x\n", i2o_dev->lct_data.tid);
+ return strlen(buf) + 1;
+}
+
+/* I2O device attributes */
+struct device_attribute i2o_device_attrs[] = {
+ __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
+ __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
+ __ATTR_NULL
+};
+
+/**
+ * i2o_device_alloc - Allocate a I2O device and initialize it
+ *
+ * Allocate the memory for a I2O device and initialize locks and lists
+ *
+ * Returns the allocated I2O device or a negative error code if the device
+ * could not be allocated.
+ */
+static struct i2o_device *i2o_device_alloc(void)
+{
+ struct i2o_device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&dev->list);
+ mutex_init(&dev->lock);
+
+ dev->device.bus = &i2o_bus_type;
+ dev->device.release = &i2o_device_release;
+
+ return dev;
+}
+
+/**
+ * i2o_device_add - allocate a new I2O device and add it to the IOP
+ * @c: I2O controller that the device is on
+ * @entry: LCT entry of the I2O device
+ *
+ * Allocate a new I2O device and initialize it with the LCT entry. The
+ * device is appended to the device list of the controller.
+ *
+ * Returns zero on success, or a -ve errno.
+ */
+static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
+{
+ struct i2o_device *i2o_dev, *tmp;
+ int rc;
+
+ i2o_dev = i2o_device_alloc();
+ if (IS_ERR(i2o_dev)) {
+ printk(KERN_ERR "i2o: unable to allocate i2o device\n");
+ return PTR_ERR(i2o_dev);
+ }
+
+ i2o_dev->lct_data = *entry;
+
+ dev_set_name(&i2o_dev->device, "%d:%03x", c->unit,
+ i2o_dev->lct_data.tid);
+
+ i2o_dev->iop = c;
+ i2o_dev->device.parent = &c->device;
+
+ rc = device_register(&i2o_dev->device);
+ if (rc)
+ goto err;
+
+ list_add_tail(&i2o_dev->list, &c->devices);
+
+ /* create user entries for this device */
+ tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
+ if (tmp && (tmp != i2o_dev)) {
+ rc = sysfs_create_link(&i2o_dev->device.kobj,
+ &tmp->device.kobj, "user");
+ if (rc)
+ goto unreg_dev;
+ }
+
+ /* create user entries referring to this device */
+ list_for_each_entry(tmp, &c->devices, list)
+ if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev)) {
+ rc = sysfs_create_link(&tmp->device.kobj,
+ &i2o_dev->device.kobj, "user");
+ if (rc)
+ goto rmlink1;
+ }
+
+ /* create parent entries for this device */
+ tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
+ if (tmp && (tmp != i2o_dev)) {
+ rc = sysfs_create_link(&i2o_dev->device.kobj,
+ &tmp->device.kobj, "parent");
+ if (rc)
+ goto rmlink1;
+ }
+
+ /* create parent entries referring to this device */
+ list_for_each_entry(tmp, &c->devices, list)
+ if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ && (tmp != i2o_dev)) {
+ rc = sysfs_create_link(&tmp->device.kobj,
+ &i2o_dev->device.kobj, "parent");
+ if (rc)
+ goto rmlink2;
+ }
+
+ i2o_driver_notify_device_add_all(i2o_dev);
+
+ pr_debug("i2o: device %s added\n", dev_name(&i2o_dev->device));
+
+ return 0;
+
+rmlink2:
+ /* If link creating failed halfway, we loop whole list to cleanup.
+ * And we don't care wrong removing of link, because sysfs_remove_link
+ * will take care of it.
+ */
+ list_for_each_entry(tmp, &c->devices, list) {
+ if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "parent");
+ }
+ sysfs_remove_link(&i2o_dev->device.kobj, "parent");
+rmlink1:
+ list_for_each_entry(tmp, &c->devices, list)
+ if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "user");
+ sysfs_remove_link(&i2o_dev->device.kobj, "user");
+unreg_dev:
+ list_del(&i2o_dev->list);
+ device_unregister(&i2o_dev->device);
+err:
+ kfree(i2o_dev);
+ return rc;
+}
+
+/**
+ * i2o_device_remove - remove an I2O device from the I2O core
+ * @i2o_dev: I2O device which should be released
+ *
+ * Is used on I2O controller removal or LCT modification, when the device
+ * is removed from the system. Note that the device could still hang
+ * around until the refcount reaches 0.
+ */
+void i2o_device_remove(struct i2o_device *i2o_dev)
+{
+ struct i2o_device *tmp;
+ struct i2o_controller *c = i2o_dev->iop;
+
+ i2o_driver_notify_device_remove_all(i2o_dev);
+
+ sysfs_remove_link(&i2o_dev->device.kobj, "parent");
+ sysfs_remove_link(&i2o_dev->device.kobj, "user");
+
+ list_for_each_entry(tmp, &c->devices, list) {
+ if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "parent");
+ if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+ sysfs_remove_link(&tmp->device.kobj, "user");
+ }
+ list_del(&i2o_dev->list);
+
+ device_unregister(&i2o_dev->device);
+}
+
+/**
+ * i2o_device_parse_lct - Parse a previously fetched LCT and create devices
+ * @c: I2O controller from which the LCT should be parsed.
+ *
+ * The Logical Configuration Table tells us what we can talk to on the
+ * board. For every entry we create an I2O device, which is registered in
+ * the I2O core.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_device_parse_lct(struct i2o_controller *c)
+{
+ struct i2o_device *dev, *tmp;
+ i2o_lct *lct;
+ u32 *dlct = c->dlct.virt;
+ int max = 0, i = 0;
+ u16 table_size;
+ u32 buf;
+
+ mutex_lock(&c->lct_lock);
+
+ kfree(c->lct);
+
+ buf = le32_to_cpu(*dlct++);
+ table_size = buf & 0xffff;
+
+ lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
+ if (!lct) {
+ mutex_unlock(&c->lct_lock);
+ return -ENOMEM;
+ }
+
+ lct->lct_ver = buf >> 28;
+ lct->boot_tid = buf >> 16 & 0xfff;
+ lct->table_size = table_size;
+ lct->change_ind = le32_to_cpu(*dlct++);
+ lct->iop_flags = le32_to_cpu(*dlct++);
+
+ table_size -= 3;
+
+ pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
+ lct->table_size);
+
+ while (table_size > 0) {
+ i2o_lct_entry *entry = &lct->lct_entry[max];
+ int found = 0;
+
+ buf = le32_to_cpu(*dlct++);
+ entry->entry_size = buf & 0xffff;
+ entry->tid = buf >> 16 & 0xfff;
+
+ entry->change_ind = le32_to_cpu(*dlct++);
+ entry->device_flags = le32_to_cpu(*dlct++);
+
+ buf = le32_to_cpu(*dlct++);
+ entry->class_id = buf & 0xfff;
+ entry->version = buf >> 12 & 0xf;
+ entry->vendor_id = buf >> 16;
+
+ entry->sub_class = le32_to_cpu(*dlct++);
+
+ buf = le32_to_cpu(*dlct++);
+ entry->user_tid = buf & 0xfff;
+ entry->parent_tid = buf >> 12 & 0xfff;
+ entry->bios_info = buf >> 24;
+
+ memcpy(&entry->identity_tag, dlct, 8);
+ dlct += 2;
+
+ entry->event_capabilities = le32_to_cpu(*dlct++);
+
+ /* add new devices, which are new in the LCT */
+ list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ if (entry->tid == dev->lct_data.tid) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ i2o_device_add(c, entry);
+
+ table_size -= 9;
+ max++;
+ }
+
+ /* remove devices, which are not in the LCT anymore */
+ list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+ int found = 0;
+
+ for (i = 0; i < max; i++) {
+ if (lct->lct_entry[i].tid == dev->lct_data.tid) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ i2o_device_remove(dev);
+ }
+
+ mutex_unlock(&c->lct_lock);
+
+ return 0;
+}
+
+/*
+ * Run time support routines
+ */
+
+/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
+ *
+ * This function can be used for all UtilParamsGet/Set operations.
+ * The OperationList is given in oplist-buffer,
+ * and results are returned in reslist-buffer.
+ * Note that the minimum sized reslist is 8 bytes and contains
+ * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
+ */
+int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
+ int oplen, void *reslist, int reslen)
+{
+ struct i2o_message *msg;
+ int i = 0;
+ int rc;
+ struct i2o_dma res;
+ struct i2o_controller *c = i2o_dev->iop;
+ struct device *dev = &c->pdev->dev;
+
+ res.virt = NULL;
+
+ if (i2o_dma_alloc(dev, &res, reslen))
+ return -ENOMEM;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg)) {
+ i2o_dma_free(dev, &res);
+ return PTR_ERR(msg);
+ }
+
+ i = 0;
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
+ msg->body[i++] = cpu_to_le32(0x00000000);
+ msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
+ memcpy(&msg->body[i], oplist, oplen);
+ i += (oplen / 4 + (oplen % 4 ? 1 : 0));
+ msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
+ msg->body[i++] = cpu_to_le32(res.phys);
+
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
+ SGL_OFFSET_5);
+
+ rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
+
+ /* This only looks like a memory leak - don't "fix" it. */
+ if (rc == -ETIMEDOUT)
+ return rc;
+
+ memcpy(reslist, res.virt, res.len);
+ i2o_dma_free(dev, &res);
+
+ return rc;
+}
+
+/*
+ * Query one field group value or a whole scalar group.
+ */
+int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
+ void *buf, int buflen)
+{
+ u32 opblk[] = { cpu_to_le32(0x00000001),
+ cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
+ cpu_to_le32((s16) field << 16 | 0x00000001)
+ };
+ u8 *resblk; /* 8 bytes for header */
+ int rc;
+
+ resblk = kmalloc(buflen + 8, GFP_KERNEL);
+ if (!resblk)
+ return -ENOMEM;
+
+ rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
+ sizeof(opblk), resblk, buflen + 8);
+
+ memcpy(buf, resblk + 8, buflen); /* cut off header */
+
+ kfree(resblk);
+
+ return rc;
+}
+
+/*
+ * if oper == I2O_PARAMS_TABLE_GET, get from all rows
+ * if fieldcount == -1 return all fields
+ * ibuf and ibuflen are unused (use NULL, 0)
+ * else return specific fields
+ * ibuf contains fieldindexes
+ *
+ * if oper == I2O_PARAMS_LIST_GET, get from specific rows
+ * if fieldcount == -1 return all fields
+ * ibuf contains rowcount, keyvalues
+ * else return specific fields
+ * fieldcount is # of fieldindexes
+ * ibuf contains fieldindexes, rowcount, keyvalues
+ *
+ * You could also use directly function i2o_issue_params().
+ */
+int i2o_parm_table_get(struct i2o_device *dev, int oper, int group,
+ int fieldcount, void *ibuf, int ibuflen, void *resblk,
+ int reslen)
+{
+ u16 *opblk;
+ int size;
+
+ size = 10 + ibuflen;
+ if (size % 4)
+ size += 4 - size % 4;
+
+ opblk = kmalloc(size, GFP_KERNEL);
+ if (opblk == NULL) {
+ printk(KERN_ERR "i2o: no memory for query buffer.\n");
+ return -ENOMEM;
+ }
+
+ opblk[0] = 1; /* operation count */
+ opblk[1] = 0; /* pad */
+ opblk[2] = oper;
+ opblk[3] = group;
+ opblk[4] = fieldcount;
+ memcpy(opblk + 5, ibuf, ibuflen); /* other params */
+
+ size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
+ size, resblk, reslen);
+
+ kfree(opblk);
+ if (size > reslen)
+ return reslen;
+
+ return size;
+}
+
+EXPORT_SYMBOL(i2o_device_claim);
+EXPORT_SYMBOL(i2o_device_claim_release);
+EXPORT_SYMBOL(i2o_parm_field_get);
+EXPORT_SYMBOL(i2o_parm_table_get);
+EXPORT_SYMBOL(i2o_parm_issue);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
new file mode 100644
index 00000000..8a5b2d8f
--- /dev/null
+++ b/drivers/message/i2o/driver.c
@@ -0,0 +1,378 @@
+/*
+ * Functions to handle I2O drivers (OSMs) and I2O bus type for sysfs
+ *
+ * Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Fixes/additions:
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>
+ * initial version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+#include <linux/i2o.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "core.h"
+
+#define OSM_NAME "i2o"
+
+/* max_drivers - Maximum I2O drivers (OSMs) which could be registered */
+static unsigned int i2o_max_drivers = I2O_MAX_DRIVERS;
+module_param_named(max_drivers, i2o_max_drivers, uint, 0);
+MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support");
+
+/* I2O drivers lock and array */
+static spinlock_t i2o_drivers_lock;
+static struct i2o_driver **i2o_drivers;
+
+/**
+ * i2o_bus_match - Tell if I2O device class id matches the class ids of the I2O driver (OSM)
+ * @dev: device which should be verified
+ * @drv: the driver to match against
+ *
+ * Used by the bus to check if the driver wants to handle the device.
+ *
+ * Returns 1 if the class ids of the driver match the class id of the
+ * device, otherwise 0.
+ */
+static int i2o_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+ struct i2o_driver *i2o_drv = to_i2o_driver(drv);
+ struct i2o_class_id *ids = i2o_drv->classes;
+
+ if (ids)
+ while (ids->class_id != I2O_CLASS_END) {
+ if (ids->class_id == i2o_dev->lct_data.class_id)
+ return 1;
+ ids++;
+ }
+ return 0;
+};
+
+/* I2O bus type */
+struct bus_type i2o_bus_type = {
+ .name = "i2o",
+ .match = i2o_bus_match,
+ .dev_attrs = i2o_device_attrs
+};
+
+/**
+ * i2o_driver_register - Register a I2O driver (OSM) in the I2O core
+ * @drv: I2O driver which should be registered
+ *
+ * Registers the OSM drv in the I2O core and creates an event queues if
+ * necessary.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_driver_register(struct i2o_driver *drv)
+{
+ struct i2o_controller *c;
+ int i;
+ int rc = 0;
+ unsigned long flags;
+
+ osm_debug("Register driver %s\n", drv->name);
+
+ if (drv->event) {
+ drv->event_queue = alloc_workqueue(drv->name,
+ WQ_MEM_RECLAIM, 1);
+ if (!drv->event_queue) {
+ osm_err("Could not initialize event queue for driver "
+ "%s\n", drv->name);
+ return -EFAULT;
+ }
+ osm_debug("Event queue initialized for driver %s\n", drv->name);
+ } else
+ drv->event_queue = NULL;
+
+ drv->driver.name = drv->name;
+ drv->driver.bus = &i2o_bus_type;
+
+ spin_lock_irqsave(&i2o_drivers_lock, flags);
+
+ for (i = 0; i2o_drivers[i]; i++)
+ if (i >= i2o_max_drivers) {
+ osm_err("too many drivers registered, increase "
+ "max_drivers\n");
+ spin_unlock_irqrestore(&i2o_drivers_lock, flags);
+ return -EFAULT;
+ }
+
+ drv->context = i;
+ i2o_drivers[i] = drv;
+
+ spin_unlock_irqrestore(&i2o_drivers_lock, flags);
+
+ osm_debug("driver %s gets context id %d\n", drv->name, drv->context);
+
+ list_for_each_entry(c, &i2o_controllers, list) {
+ struct i2o_device *i2o_dev;
+
+ i2o_driver_notify_controller_add(drv, c);
+ list_for_each_entry(i2o_dev, &c->devices, list)
+ i2o_driver_notify_device_add(drv, i2o_dev);
+ }
+
+ rc = driver_register(&drv->driver);
+ if (rc) {
+ if (drv->event) {
+ destroy_workqueue(drv->event_queue);
+ drv->event_queue = NULL;
+ }
+ }
+
+ return rc;
+};
+
+/**
+ * i2o_driver_unregister - Unregister a I2O driver (OSM) from the I2O core
+ * @drv: I2O driver which should be unregistered
+ *
+ * Unregisters the OSM drv from the I2O core and cleanup event queues if
+ * necessary.
+ */
+void i2o_driver_unregister(struct i2o_driver *drv)
+{
+ struct i2o_controller *c;
+ unsigned long flags;
+
+ osm_debug("unregister driver %s\n", drv->name);
+
+ driver_unregister(&drv->driver);
+
+ list_for_each_entry(c, &i2o_controllers, list) {
+ struct i2o_device *i2o_dev;
+
+ list_for_each_entry(i2o_dev, &c->devices, list)
+ i2o_driver_notify_device_remove(drv, i2o_dev);
+
+ i2o_driver_notify_controller_remove(drv, c);
+ }
+
+ spin_lock_irqsave(&i2o_drivers_lock, flags);
+ i2o_drivers[drv->context] = NULL;
+ spin_unlock_irqrestore(&i2o_drivers_lock, flags);
+
+ if (drv->event_queue) {
+ destroy_workqueue(drv->event_queue);
+ drv->event_queue = NULL;
+ osm_debug("event queue removed for %s\n", drv->name);
+ }
+};
+
+/**
+ * i2o_driver_dispatch - dispatch an I2O reply message
+ * @c: I2O controller of the message
+ * @m: I2O message number
+ *
+ * The reply is delivered to the driver from which the original message
+ * was. This function is only called from interrupt context.
+ *
+ * Returns 0 on success and the message should not be flushed. Returns > 0
+ * on success and if the message should be flushed afterwords. Returns
+ * negative error code on failure (the message will be flushed too).
+ */
+int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
+{
+ struct i2o_driver *drv;
+ struct i2o_message *msg = i2o_msg_out_to_virt(c, m);
+ u32 context = le32_to_cpu(msg->u.s.icntxt);
+ unsigned long flags;
+
+ if (unlikely(context >= i2o_max_drivers)) {
+ osm_warn("%s: Spurious reply to unknown driver %d\n", c->name,
+ context);
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&i2o_drivers_lock, flags);
+ drv = i2o_drivers[context];
+ spin_unlock_irqrestore(&i2o_drivers_lock, flags);
+
+ if (unlikely(!drv)) {
+ osm_warn("%s: Spurious reply to unknown driver %d\n", c->name,
+ context);
+ return -EIO;
+ }
+
+ if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) {
+ struct i2o_device *dev, *tmp;
+ struct i2o_event *evt;
+ u16 size;
+ u16 tid = le32_to_cpu(msg->u.head[1]) & 0xfff;
+
+ osm_debug("event received from device %d\n", tid);
+
+ if (!drv->event)
+ return -EIO;
+
+ /* cut of header from message size (in 32-bit words) */
+ size = (le32_to_cpu(msg->u.head[0]) >> 16) - 5;
+
+ evt = kzalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
+ if (!evt)
+ return -ENOMEM;
+
+ evt->size = size;
+ evt->tcntxt = le32_to_cpu(msg->u.s.tcntxt);
+ evt->event_indicator = le32_to_cpu(msg->body[0]);
+ memcpy(&evt->data, &msg->body[1], size * 4);
+
+ list_for_each_entry_safe(dev, tmp, &c->devices, list)
+ if (dev->lct_data.tid == tid) {
+ evt->i2o_dev = dev;
+ break;
+ }
+
+ INIT_WORK(&evt->work, drv->event);
+ queue_work(drv->event_queue, &evt->work);
+ return 1;
+ }
+
+ if (unlikely(!drv->reply)) {
+ osm_debug("%s: Reply to driver %s, but no reply function"
+ " defined!\n", c->name, drv->name);
+ return -EIO;
+ }
+
+ return drv->reply(c, m, msg);
+}
+
+/**
+ * i2o_driver_notify_controller_add_all - Send notify of added controller
+ * @c: newly added controller
+ *
+ * Send notifications to all registered drivers that a new controller was
+ * added.
+ */
+void i2o_driver_notify_controller_add_all(struct i2o_controller *c)
+{
+ int i;
+ struct i2o_driver *drv;
+
+ for (i = 0; i < i2o_max_drivers; i++) {
+ drv = i2o_drivers[i];
+
+ if (drv)
+ i2o_driver_notify_controller_add(drv, c);
+ }
+}
+
+/**
+ * i2o_driver_notify_controller_remove_all - Send notify of removed controller
+ * @c: controller that is being removed
+ *
+ * Send notifications to all registered drivers that a controller was
+ * removed.
+ */
+void i2o_driver_notify_controller_remove_all(struct i2o_controller *c)
+{
+ int i;
+ struct i2o_driver *drv;
+
+ for (i = 0; i < i2o_max_drivers; i++) {
+ drv = i2o_drivers[i];
+
+ if (drv)
+ i2o_driver_notify_controller_remove(drv, c);
+ }
+}
+
+/**
+ * i2o_driver_notify_device_add_all - Send notify of added device
+ * @i2o_dev: newly added I2O device
+ *
+ * Send notifications to all registered drivers that a device was added.
+ */
+void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev)
+{
+ int i;
+ struct i2o_driver *drv;
+
+ for (i = 0; i < i2o_max_drivers; i++) {
+ drv = i2o_drivers[i];
+
+ if (drv)
+ i2o_driver_notify_device_add(drv, i2o_dev);
+ }
+}
+
+/**
+ * i2o_driver_notify_device_remove_all - Send notify of removed device
+ * @i2o_dev: device that is being removed
+ *
+ * Send notifications to all registered drivers that a device was removed.
+ */
+void i2o_driver_notify_device_remove_all(struct i2o_device *i2o_dev)
+{
+ int i;
+ struct i2o_driver *drv;
+
+ for (i = 0; i < i2o_max_drivers; i++) {
+ drv = i2o_drivers[i];
+
+ if (drv)
+ i2o_driver_notify_device_remove(drv, i2o_dev);
+ }
+}
+
+/**
+ * i2o_driver_init - initialize I2O drivers (OSMs)
+ *
+ * Registers the I2O bus and allocate memory for the array of OSMs.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int __init i2o_driver_init(void)
+{
+ int rc = 0;
+
+ spin_lock_init(&i2o_drivers_lock);
+
+ if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64)) {
+ osm_warn("max_drivers set to %d, but must be >=2 and <= 64\n",
+ i2o_max_drivers);
+ i2o_max_drivers = I2O_MAX_DRIVERS;
+ }
+ osm_info("max drivers = %d\n", i2o_max_drivers);
+
+ i2o_drivers =
+ kcalloc(i2o_max_drivers, sizeof(*i2o_drivers), GFP_KERNEL);
+ if (!i2o_drivers)
+ return -ENOMEM;
+
+ rc = bus_register(&i2o_bus_type);
+
+ if (rc < 0)
+ kfree(i2o_drivers);
+
+ return rc;
+};
+
+/**
+ * i2o_driver_exit - clean up I2O drivers (OSMs)
+ *
+ * Unregisters the I2O bus and frees driver array.
+ */
+void i2o_driver_exit(void)
+{
+ bus_unregister(&i2o_bus_type);
+ kfree(i2o_drivers);
+};
+
+EXPORT_SYMBOL(i2o_driver_register);
+EXPORT_SYMBOL(i2o_driver_unregister);
+EXPORT_SYMBOL(i2o_driver_notify_controller_add_all);
+EXPORT_SYMBOL(i2o_driver_notify_controller_remove_all);
+EXPORT_SYMBOL(i2o_driver_notify_device_add_all);
+EXPORT_SYMBOL(i2o_driver_notify_device_remove_all);
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
new file mode 100644
index 00000000..a3970e56
--- /dev/null
+++ b/drivers/message/i2o/exec-osm.c
@@ -0,0 +1,612 @@
+/*
+ * Executive OSM
+ *
+ * Copyright (C) 1999-2002 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * A lot of the I2O message side code from this is taken from the Red
+ * Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ * Fixes/additions:
+ * Philipp Rumpf
+ * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ * Deepak Saxena <deepak@plexity.net>
+ * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>:
+ * Ported to Linux 2.5.
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Minor fixes for 2.6.
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Support for sysfs included.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
+#include <asm/param.h> /* HZ */
+#include "core.h"
+
+#define OSM_NAME "exec-osm"
+
+struct i2o_driver i2o_exec_driver;
+
+/* global wait list for POST WAIT */
+static LIST_HEAD(i2o_exec_wait_list);
+
+/* Wait struct needed for POST WAIT */
+struct i2o_exec_wait {
+ wait_queue_head_t *wq; /* Pointer to Wait queue */
+ struct i2o_dma dma; /* DMA buffers to free on failure */
+ u32 tcntxt; /* transaction context from reply */
+ int complete; /* 1 if reply received otherwise 0 */
+ u32 m; /* message id */
+ struct i2o_message *msg; /* pointer to the reply message */
+ struct list_head list; /* node in global wait list */
+ spinlock_t lock; /* lock before modifying */
+};
+
+/* Work struct needed to handle LCT NOTIFY replies */
+struct i2o_exec_lct_notify_work {
+ struct work_struct work; /* work struct */
+ struct i2o_controller *c; /* controller on which the LCT NOTIFY
+ was received */
+};
+
+/* Exec OSM class handling definition */
+static struct i2o_class_id i2o_exec_class_id[] = {
+ {I2O_CLASS_EXECUTIVE},
+ {I2O_CLASS_END}
+};
+
+/**
+ * i2o_exec_wait_alloc - Allocate a i2o_exec_wait struct an initialize it
+ *
+ * Allocate the i2o_exec_wait struct and initialize the wait.
+ *
+ * Returns i2o_exec_wait pointer on success or negative error code on
+ * failure.
+ */
+static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
+{
+ struct i2o_exec_wait *wait;
+
+ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait)
+ return NULL;
+
+ INIT_LIST_HEAD(&wait->list);
+ spin_lock_init(&wait->lock);
+
+ return wait;
+};
+
+/**
+ * i2o_exec_wait_free - Free an i2o_exec_wait struct
+ * @wait: I2O wait data which should be cleaned up
+ */
+static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
+{
+ kfree(wait);
+};
+
+/**
+ * i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
+ * @c: controller
+ * @msg: message to post
+ * @timeout: time in seconds to wait
+ * @dma: i2o_dma struct of the DMA buffer to free on failure
+ *
+ * This API allows an OSM to post a message and then be told whether or
+ * not the system received a successful reply. If the message times out
+ * then the value '-ETIMEDOUT' is returned. This is a special case. In
+ * this situation the message may (should) complete at an indefinite time
+ * in the future. When it completes it will use the memory buffer
+ * attached to the request. If -ETIMEDOUT is returned then the memory
+ * buffer must not be freed. Instead the event completion will free them
+ * for you. In all other cases the buffer are your problem.
+ *
+ * Returns 0 on success, negative error code on timeout or positive error
+ * code from reply.
+ */
+int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
+ unsigned long timeout, struct i2o_dma *dma)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ struct i2o_exec_wait *wait;
+ static u32 tcntxt = 0x80000000;
+ unsigned long flags;
+ int rc = 0;
+
+ wait = i2o_exec_wait_alloc();
+ if (!wait) {
+ i2o_msg_nop(c, msg);
+ return -ENOMEM;
+ }
+
+ if (tcntxt == 0xffffffff)
+ tcntxt = 0x80000000;
+
+ if (dma)
+ wait->dma = *dma;
+
+ /*
+ * Fill in the message initiator context and transaction context.
+ * We will only use transaction contexts >= 0x80000000 for POST WAIT,
+ * so we could find a POST WAIT reply easier in the reply handler.
+ */
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ wait->tcntxt = tcntxt++;
+ msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
+
+ wait->wq = &wq;
+ /*
+ * we add elements to the head, because if a entry in the list will
+ * never be removed, we have to iterate over it every time
+ */
+ list_add(&wait->list, &i2o_exec_wait_list);
+
+ /*
+ * Post the message to the controller. At some point later it will
+ * return. If we time out before it returns then complete will be zero.
+ */
+ i2o_msg_post(c, msg);
+
+ wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
+
+ spin_lock_irqsave(&wait->lock, flags);
+
+ wait->wq = NULL;
+
+ if (wait->complete)
+ rc = le32_to_cpu(wait->msg->body[0]) >> 24;
+ else {
+ /*
+ * We cannot remove it now. This is important. When it does
+ * terminate (which it must do if the controller has not
+ * died...) then it will otherwise scribble on stuff.
+ *
+ * FIXME: try abort message
+ */
+ if (dma)
+ dma->virt = NULL;
+
+ rc = -ETIMEDOUT;
+ }
+
+ spin_unlock_irqrestore(&wait->lock, flags);
+
+ if (rc != -ETIMEDOUT) {
+ i2o_flush_reply(c, wait->m);
+ i2o_exec_wait_free(wait);
+ }
+
+ return rc;
+};
+
+/**
+ * i2o_msg_post_wait_complete - Reply to a i2o_msg_post request from IOP
+ * @c: I2O controller which answers
+ * @m: message id
+ * @msg: pointer to the I2O reply message
+ * @context: transaction context of request
+ *
+ * This function is called in interrupt context only. If the reply reached
+ * before the timeout, the i2o_exec_wait struct is filled with the message
+ * and the task will be waked up. The task is now responsible for returning
+ * the message m back to the controller! If the message reaches us after
+ * the timeout clean up the i2o_exec_wait struct (including allocated
+ * DMA buffer).
+ *
+ * Return 0 on success and if the message m should not be given back to the
+ * I2O controller, or >0 on success and if the message should be given back
+ * afterwords. Returns negative error code on failure. In this case the
+ * message must also be given back to the controller.
+ */
+static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
+ struct i2o_message *msg, u32 context)
+{
+ struct i2o_exec_wait *wait, *tmp;
+ unsigned long flags;
+ int rc = 1;
+
+ /*
+ * We need to search through the i2o_exec_wait_list to see if the given
+ * message is still outstanding. If not, it means that the IOP took
+ * longer to respond to the message than we had allowed and timer has
+ * already expired. Not much we can do about that except log it for
+ * debug purposes, increase timeout, and recompile.
+ */
+ list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
+ if (wait->tcntxt == context) {
+ spin_lock_irqsave(&wait->lock, flags);
+
+ list_del(&wait->list);
+
+ wait->m = m;
+ wait->msg = msg;
+ wait->complete = 1;
+
+ if (wait->wq)
+ rc = 0;
+ else
+ rc = -1;
+
+ spin_unlock_irqrestore(&wait->lock, flags);
+
+ if (rc) {
+ struct device *dev;
+
+ dev = &c->pdev->dev;
+
+ pr_debug("%s: timedout reply received!\n",
+ c->name);
+ i2o_dma_free(dev, &wait->dma);
+ i2o_exec_wait_free(wait);
+ } else
+ wake_up_interruptible(wait->wq);
+
+ return rc;
+ }
+ }
+
+ osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
+ context);
+
+ return -1;
+};
+
+/**
+ * i2o_exec_show_vendor_id - Displays Vendor ID of controller
+ * @d: device of which the Vendor ID should be displayed
+ * @attr: device_attribute to display
+ * @buf: buffer into which the Vendor ID should be printed
+ *
+ * Returns number of bytes printed into buffer.
+ */
+static ssize_t i2o_exec_show_vendor_id(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2o_device *dev = to_i2o_device(d);
+ u16 id;
+
+ if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
+ sprintf(buf, "0x%04x", le16_to_cpu(id));
+ return strlen(buf) + 1;
+ }
+
+ return 0;
+};
+
+/**
+ * i2o_exec_show_product_id - Displays Product ID of controller
+ * @d: device of which the Product ID should be displayed
+ * @attr: device_attribute to display
+ * @buf: buffer into which the Product ID should be printed
+ *
+ * Returns number of bytes printed into buffer.
+ */
+static ssize_t i2o_exec_show_product_id(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2o_device *dev = to_i2o_device(d);
+ u16 id;
+
+ if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
+ sprintf(buf, "0x%04x", le16_to_cpu(id));
+ return strlen(buf) + 1;
+ }
+
+ return 0;
+};
+
+/* Exec-OSM device attributes */
+static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL);
+static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
+
+/**
+ * i2o_exec_probe - Called if a new I2O device (executive class) appears
+ * @dev: I2O device which should be probed
+ *
+ * Registers event notification for every event from Executive device. The
+ * return is always 0, because we want all devices of class Executive.
+ *
+ * Returns 0 on success.
+ */
+static int i2o_exec_probe(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+ int rc;
+
+ rc = i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
+ if (rc) goto err_out;
+
+ rc = device_create_file(dev, &dev_attr_vendor_id);
+ if (rc) goto err_evtreg;
+ rc = device_create_file(dev, &dev_attr_product_id);
+ if (rc) goto err_vid;
+
+ i2o_dev->iop->exec = i2o_dev;
+
+ return 0;
+
+err_vid:
+ device_remove_file(dev, &dev_attr_vendor_id);
+err_evtreg:
+ i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
+err_out:
+ return rc;
+};
+
+/**
+ * i2o_exec_remove - Called on I2O device removal
+ * @dev: I2O device which was removed
+ *
+ * Unregisters event notification from Executive I2O device.
+ *
+ * Returns 0 on success.
+ */
+static int i2o_exec_remove(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_product_id);
+ device_remove_file(dev, &dev_attr_vendor_id);
+
+ i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
+
+ return 0;
+};
+
+#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
+/**
+ * i2o_exec_lct_notify - Send a asynchronus LCT NOTIFY request
+ * @c: I2O controller to which the request should be send
+ * @change_ind: change indicator
+ *
+ * This function sends a LCT NOTIFY request to the I2O controller with
+ * the change indicator change_ind. If the change_ind == 0 the controller
+ * replies immediately after the request. If change_ind > 0 the reply is
+ * send after change indicator of the LCT is > change_ind.
+ */
+static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
+{
+ i2o_status_block *sb = c->status_block.virt;
+ struct device *dev;
+ struct i2o_message *msg;
+
+ mutex_lock(&c->lct_lock);
+
+ dev = &c->pdev->dev;
+
+ if (i2o_dma_realloc(dev, &c->dlct,
+ le32_to_cpu(sb->expected_lct_size))) {
+ mutex_unlock(&c->lct_lock);
+ return -ENOMEM;
+ }
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg)) {
+ mutex_unlock(&c->lct_lock);
+ return PTR_ERR(msg);
+ }
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(change_ind);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ i2o_msg_post(c, msg);
+
+ mutex_unlock(&c->lct_lock);
+
+ return 0;
+}
+#endif
+
+/**
+ * i2o_exec_lct_modified - Called on LCT NOTIFY reply
+ * @_work: work struct for a specific controller
+ *
+ * This function handles asynchronus LCT NOTIFY replies. It parses the
+ * new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
+ * again, otherwise send LCT NOTIFY to get informed on next LCT change.
+ */
+static void i2o_exec_lct_modified(struct work_struct *_work)
+{
+ struct i2o_exec_lct_notify_work *work =
+ container_of(_work, struct i2o_exec_lct_notify_work, work);
+ u32 change_ind = 0;
+ struct i2o_controller *c = work->c;
+
+ kfree(work);
+
+ if (i2o_device_parse_lct(c) != -EAGAIN)
+ change_ind = c->lct->change_ind + 1;
+
+#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
+ i2o_exec_lct_notify(c, change_ind);
+#endif
+};
+
+/**
+ * i2o_exec_reply - I2O Executive reply handler
+ * @c: I2O controller from which the reply comes
+ * @m: message id
+ * @msg: pointer to the I2O reply message
+ *
+ * This function is always called from interrupt context. If a POST WAIT
+ * reply was received, pass it to the complete function. If a LCT NOTIFY
+ * reply was received, a new event is created to handle the update.
+ *
+ * Returns 0 on success and if the reply should not be flushed or > 0
+ * on success and if the reply should be flushed. Returns negative error
+ * code on failure and if the reply should be flushed.
+ */
+static int i2o_exec_reply(struct i2o_controller *c, u32 m,
+ struct i2o_message *msg)
+{
+ u32 context;
+
+ if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
+ struct i2o_message __iomem *pmsg;
+ u32 pm;
+
+ /*
+ * If Fail bit is set we must take the transaction context of
+ * the preserved message to find the right request again.
+ */
+
+ pm = le32_to_cpu(msg->body[3]);
+ pmsg = i2o_msg_in_to_virt(c, pm);
+ context = readl(&pmsg->u.s.tcntxt);
+
+ i2o_report_status(KERN_INFO, "i2o_core", msg);
+
+ /* Release the preserved msg */
+ i2o_msg_nop_mfa(c, pm);
+ } else
+ context = le32_to_cpu(msg->u.s.tcntxt);
+
+ if (context & 0x80000000)
+ return i2o_msg_post_wait_complete(c, m, msg, context);
+
+ if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
+ struct i2o_exec_lct_notify_work *work;
+
+ pr_debug("%s: LCT notify received\n", c->name);
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return -ENOMEM;
+
+ work->c = c;
+
+ INIT_WORK(&work->work, i2o_exec_lct_modified);
+ queue_work(i2o_exec_driver.event_queue, &work->work);
+ return 1;
+ }
+
+ /*
+ * If this happens, we want to dump the message to the syslog so
+ * it can be sent back to the card manufacturer by the end user
+ * to aid in debugging.
+ *
+ */
+ printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
+ "Message dumped to syslog\n", c->name);
+ i2o_dump_message(msg);
+
+ return -EFAULT;
+}
+
+/**
+ * i2o_exec_event - Event handling function
+ * @work: Work item in occurring event
+ *
+ * Handles events send by the Executive device. At the moment does not do
+ * anything useful.
+ */
+static void i2o_exec_event(struct work_struct *work)
+{
+ struct i2o_event *evt = container_of(work, struct i2o_event, work);
+
+ if (likely(evt->i2o_dev))
+ osm_debug("Event received from device: %d\n",
+ evt->i2o_dev->lct_data.tid);
+ kfree(evt);
+};
+
+/**
+ * i2o_exec_lct_get - Get the IOP's Logical Configuration Table
+ * @c: I2O controller from which the LCT should be fetched
+ *
+ * Send a LCT NOTIFY request to the controller, and wait
+ * I2O_TIMEOUT_LCT_GET seconds until arrival of response. If the LCT is
+ * to large, retry it.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_exec_lct_get(struct i2o_controller *c)
+{
+ struct i2o_message *msg;
+ int i = 0;
+ int rc = -EAGAIN;
+
+ for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] =
+ cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
+ if (rc < 0)
+ break;
+
+ rc = i2o_device_parse_lct(c);
+ if (rc != -EAGAIN)
+ break;
+ }
+
+ return rc;
+}
+
+/* Exec OSM driver struct */
+struct i2o_driver i2o_exec_driver = {
+ .name = OSM_NAME,
+ .reply = i2o_exec_reply,
+ .event = i2o_exec_event,
+ .classes = i2o_exec_class_id,
+ .driver = {
+ .probe = i2o_exec_probe,
+ .remove = i2o_exec_remove,
+ },
+};
+
+/**
+ * i2o_exec_init - Registers the Exec OSM
+ *
+ * Registers the Exec OSM in the I2O core.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int __init i2o_exec_init(void)
+{
+ return i2o_driver_register(&i2o_exec_driver);
+};
+
+/**
+ * i2o_exec_exit - Removes the Exec OSM
+ *
+ * Unregisters the Exec OSM from the I2O core.
+ */
+void i2o_exec_exit(void)
+{
+ i2o_driver_unregister(&i2o_exec_driver);
+};
+
+EXPORT_SYMBOL(i2o_msg_post_wait_mem);
+EXPORT_SYMBOL(i2o_exec_lct_get);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
new file mode 100644
index 00000000..4796bbf0
--- /dev/null
+++ b/drivers/message/i2o/i2o_block.c
@@ -0,0 +1,1232 @@
+/*
+ * Block OSM
+ *
+ * Copyright (C) 1999-2002 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * For the purpose of avoiding doubt the preferred form of the work
+ * for making modifications shall be a standards compliant form such
+ * gzipped tar and not one requiring a proprietary or patent encumbered
+ * tool to unpack.
+ *
+ * Fixes/additions:
+ * Steve Ralston:
+ * Multiple device handling error fixes,
+ * Added a queue depth.
+ * Alan Cox:
+ * FC920 has an rmw bug. Dont or in the end marker.
+ * Removed queue walk, fixed for 64bitness.
+ * Rewrote much of the code over time
+ * Added indirect block lists
+ * Handle 64K limits on many controllers
+ * Don't use indirects on the Promise (breaks)
+ * Heavily chop down the queue depths
+ * Deepak Saxena:
+ * Independent queues per IOP
+ * Support for dynamic device creation/deletion
+ * Code cleanup
+ * Support for larger I/Os through merge* functions
+ * (taken from DAC960 driver)
+ * Boji T Kannanthanam:
+ * Set the I2O Block devices to be detected in increasing
+ * order of TIDs during boot.
+ * Search and set the I2O block device that we boot off
+ * from as the first device to be claimed (as /dev/i2o/hda)
+ * Properly attach/detach I2O gendisk structure from the
+ * system gendisk list. The I2O block devices now appear in
+ * /proc/partitions.
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Minor bugfixes for 2.6.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2o.h>
+#include <linux/mutex.h>
+
+#include <linux/mempool.h>
+
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+
+#include <scsi/scsi.h>
+
+#include "i2o_block.h"
+
+#define OSM_NAME "block-osm"
+#define OSM_VERSION "1.325"
+#define OSM_DESCRIPTION "I2O Block Device OSM"
+
+static DEFINE_MUTEX(i2o_block_mutex);
+static struct i2o_driver i2o_block_driver;
+
+/* global Block OSM request mempool */
+static struct i2o_block_mempool i2o_blk_req_pool;
+
+/* Block OSM class handling definition */
+static struct i2o_class_id i2o_block_class_id[] = {
+ {I2O_CLASS_RANDOM_BLOCK_STORAGE},
+ {I2O_CLASS_END}
+};
+
+/**
+ * i2o_block_device_free - free the memory of the I2O Block device
+ * @dev: I2O Block device, which should be cleaned up
+ *
+ * Frees the request queue, gendisk and the i2o_block_device structure.
+ */
+static void i2o_block_device_free(struct i2o_block_device *dev)
+{
+ blk_cleanup_queue(dev->gd->queue);
+
+ put_disk(dev->gd);
+
+ kfree(dev);
+};
+
+/**
+ * i2o_block_remove - remove the I2O Block device from the system again
+ * @dev: I2O Block device which should be removed
+ *
+ * Remove gendisk from system and free all allocated memory.
+ *
+ * Always returns 0.
+ */
+static int i2o_block_remove(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+ struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
+
+ osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid,
+ i2o_blk_dev->gd->disk_name);
+
+ i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
+
+ del_gendisk(i2o_blk_dev->gd);
+
+ dev_set_drvdata(dev, NULL);
+
+ i2o_device_claim_release(i2o_dev);
+
+ i2o_block_device_free(i2o_blk_dev);
+
+ return 0;
+};
+
+/**
+ * i2o_block_device flush - Flush all dirty data of I2O device dev
+ * @dev: I2O device which should be flushed
+ *
+ * Flushes all dirty data on device dev.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_flush(struct i2o_device *dev)
+{
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(60 << 16);
+ osm_debug("Flushing...\n");
+
+ return i2o_msg_post_wait(dev->iop, msg, 60);
+};
+
+/**
+ * i2o_block_device_mount - Mount (load) the media of device dev
+ * @dev: I2O device which should receive the mount request
+ * @media_id: Media Identifier
+ *
+ * Load a media into drive. Identifier should be set to -1, because the
+ * spec does not support any other value.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
+{
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ osm_debug("Mounting...\n");
+
+ return i2o_msg_post_wait(dev->iop, msg, 2);
+};
+
+/**
+ * i2o_block_device_lock - Locks the media of device dev
+ * @dev: I2O device which should receive the lock request
+ * @media_id: Media Identifier
+ *
+ * Lock media of device dev to prevent removal. The media identifier
+ * should be set to -1, because the spec does not support any other value.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
+{
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
+ osm_debug("Locking...\n");
+
+ return i2o_msg_post_wait(dev->iop, msg, 2);
+};
+
+/**
+ * i2o_block_device_unlock - Unlocks the media of device dev
+ * @dev: I2O device which should receive the unlocked request
+ * @media_id: Media Identifier
+ *
+ * Unlocks the media in device dev. The media identifier should be set to
+ * -1, because the spec does not support any other value.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
+{
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(media_id);
+ osm_debug("Unlocking...\n");
+
+ return i2o_msg_post_wait(dev->iop, msg, 2);
+};
+
+/**
+ * i2o_block_device_power - Power management for device dev
+ * @dev: I2O device which should receive the power management request
+ * @op: Operation to send
+ *
+ * Send a power management request to the device dev.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
+{
+ struct i2o_device *i2o_dev = dev->i2o_dev;
+ struct i2o_controller *c = i2o_dev->iop;
+ struct i2o_message *msg;
+ int rc;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(op << 24);
+ osm_debug("Power...\n");
+
+ rc = i2o_msg_post_wait(c, msg, 60);
+ if (!rc)
+ dev->power = op;
+
+ return rc;
+};
+
+/**
+ * i2o_block_request_alloc - Allocate an I2O block request struct
+ *
+ * Allocates an I2O block request struct and initialize the list.
+ *
+ * Returns a i2o_block_request pointer on success or negative error code
+ * on failure.
+ */
+static inline struct i2o_block_request *i2o_block_request_alloc(void)
+{
+ struct i2o_block_request *ireq;
+
+ ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
+ if (!ireq)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ireq->queue);
+ sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS);
+
+ return ireq;
+};
+
+/**
+ * i2o_block_request_free - Frees a I2O block request
+ * @ireq: I2O block request which should be freed
+ *
+ * Frees the allocated memory (give it back to the request mempool).
+ */
+static inline void i2o_block_request_free(struct i2o_block_request *ireq)
+{
+ mempool_free(ireq, i2o_blk_req_pool.pool);
+};
+
+/**
+ * i2o_block_sglist_alloc - Allocate the SG list and map it
+ * @c: I2O controller to which the request belongs
+ * @ireq: I2O block request
+ * @mptr: message body pointer
+ *
+ * Builds the SG list and map it to be accessible by the controller.
+ *
+ * Returns 0 on failure or 1 on success.
+ */
+static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
+ struct i2o_block_request *ireq,
+ u32 ** mptr)
+{
+ int nents;
+ enum dma_data_direction direction;
+
+ ireq->dev = &c->pdev->dev;
+ nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
+
+ if (rq_data_dir(ireq->req) == READ)
+ direction = PCI_DMA_FROMDEVICE;
+ else
+ direction = PCI_DMA_TODEVICE;
+
+ ireq->sg_nents = nents;
+
+ return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr);
+};
+
+/**
+ * i2o_block_sglist_free - Frees the SG list
+ * @ireq: I2O block request from which the SG should be freed
+ *
+ * Frees the SG list from the I2O block request.
+ */
+static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
+{
+ enum dma_data_direction direction;
+
+ if (rq_data_dir(ireq->req) == READ)
+ direction = PCI_DMA_FROMDEVICE;
+ else
+ direction = PCI_DMA_TODEVICE;
+
+ dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction);
+};
+
+/**
+ * i2o_block_prep_req_fn - Allocates I2O block device specific struct
+ * @q: request queue for the request
+ * @req: the request to prepare
+ *
+ * Allocate the necessary i2o_block_request struct and connect it to
+ * the request. This is needed that we not lose the SG list later on.
+ *
+ * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
+ */
+static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
+{
+ struct i2o_block_device *i2o_blk_dev = q->queuedata;
+ struct i2o_block_request *ireq;
+
+ if (unlikely(!i2o_blk_dev)) {
+ osm_err("block device already removed\n");
+ return BLKPREP_KILL;
+ }
+
+ /* connect the i2o_block_request to the request */
+ if (!req->special) {
+ ireq = i2o_block_request_alloc();
+ if (IS_ERR(ireq)) {
+ osm_debug("unable to allocate i2o_block_request!\n");
+ return BLKPREP_DEFER;
+ }
+
+ ireq->i2o_blk_dev = i2o_blk_dev;
+ req->special = ireq;
+ ireq->req = req;
+ }
+ /* do not come back here */
+ req->cmd_flags |= REQ_DONTPREP;
+
+ return BLKPREP_OK;
+};
+
+/**
+ * i2o_block_delayed_request_fn - delayed request queue function
+ * @work: the delayed request with the queue to start
+ *
+ * If the request queue is stopped for a disk, and there is no open
+ * request, a new event is created, which calls this function to start
+ * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
+ * be started again.
+ */
+static void i2o_block_delayed_request_fn(struct work_struct *work)
+{
+ struct i2o_block_delayed_request *dreq =
+ container_of(work, struct i2o_block_delayed_request,
+ work.work);
+ struct request_queue *q = dreq->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ kfree(dreq);
+};
+
+/**
+ * i2o_block_end_request - Post-processing of completed commands
+ * @req: request which should be completed
+ * @error: 0 for success, < 0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Mark the request as complete. The lock must not be held when entering.
+ *
+ */
+static void i2o_block_end_request(struct request *req, int error,
+ int nr_bytes)
+{
+ struct i2o_block_request *ireq = req->special;
+ struct i2o_block_device *dev = ireq->i2o_blk_dev;
+ struct request_queue *q = req->q;
+ unsigned long flags;
+
+ if (blk_end_request(req, error, nr_bytes))
+ if (error)
+ blk_end_request_all(req, -EIO);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ if (likely(dev)) {
+ dev->open_queue_depth--;
+ list_del(&ireq->queue);
+ }
+
+ blk_start_queue(q);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ i2o_block_sglist_free(ireq);
+ i2o_block_request_free(ireq);
+};
+
+/**
+ * i2o_block_reply - Block OSM reply handler.
+ * @c: I2O controller from which the message arrives
+ * @m: message id of reply
+ * @msg: the actual I2O message reply
+ *
+ * This function gets all the message replies.
+ *
+ */
+static int i2o_block_reply(struct i2o_controller *c, u32 m,
+ struct i2o_message *msg)
+{
+ struct request *req;
+ int error = 0;
+
+ req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
+ if (unlikely(!req)) {
+ osm_err("NULL reply received!\n");
+ return -1;
+ }
+
+ /*
+ * Lets see what is cooking. We stuffed the
+ * request in the context.
+ */
+
+ if ((le32_to_cpu(msg->body[0]) >> 24) != 0) {
+ u32 status = le32_to_cpu(msg->body[0]);
+ /*
+ * Device not ready means two things. One is that the
+ * the thing went offline (but not a removal media)
+ *
+ * The second is that you have a SuperTrak 100 and the
+ * firmware got constipated. Unlike standard i2o card
+ * setups the supertrak returns an error rather than
+ * blocking for the timeout in these cases.
+ *
+ * Don't stick a supertrak100 into cache aggressive modes
+ */
+
+ osm_err("TID %03x error status: 0x%02x, detailed status: "
+ "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
+ status >> 24, status & 0xffff);
+
+ req->errors++;
+
+ error = -EIO;
+ }
+
+ i2o_block_end_request(req, error, le32_to_cpu(msg->body[1]));
+
+ return 1;
+};
+
+static void i2o_block_event(struct work_struct *work)
+{
+ struct i2o_event *evt = container_of(work, struct i2o_event, work);
+ osm_debug("event received\n");
+ kfree(evt);
+};
+
+/*
+ * SCSI-CAM for ioctl geometry mapping
+ * Duplicated with SCSI - this should be moved into somewhere common
+ * perhaps genhd ?
+ *
+ * LBA -> CHS mapping table taken from:
+ *
+ * "Incorporating the I2O Architecture into BIOS for Intel Architecture
+ * Platforms"
+ *
+ * This is an I2O document that is only available to I2O members,
+ * not developers.
+ *
+ * From my understanding, this is how all the I2O cards do this
+ *
+ * Disk Size | Sectors | Heads | Cylinders
+ * ---------------+---------+-------+-------------------
+ * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
+ * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
+ * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
+ * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
+ *
+ */
+#define BLOCK_SIZE_528M 1081344
+#define BLOCK_SIZE_1G 2097152
+#define BLOCK_SIZE_21G 4403200
+#define BLOCK_SIZE_42G 8806400
+#define BLOCK_SIZE_84G 17612800
+
+static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
+ unsigned char *hds, unsigned char *secs)
+{
+ unsigned long heads, sectors, cylinders;
+
+ sectors = 63L; /* Maximize sectors per track */
+ if (capacity <= BLOCK_SIZE_528M)
+ heads = 16;
+ else if (capacity <= BLOCK_SIZE_1G)
+ heads = 32;
+ else if (capacity <= BLOCK_SIZE_21G)
+ heads = 64;
+ else if (capacity <= BLOCK_SIZE_42G)
+ heads = 128;
+ else
+ heads = 255;
+
+ cylinders = (unsigned long)capacity / (heads * sectors);
+
+ *cyls = (unsigned short)cylinders; /* Stuff return values */
+ *secs = (unsigned char)sectors;
+ *hds = (unsigned char)heads;
+}
+
+/**
+ * i2o_block_open - Open the block device
+ * @bdev: block device being opened
+ * @mode: file open mode
+ *
+ * Power up the device, mount and lock the media. This function is called,
+ * if the block device is opened for access.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_open(struct block_device *bdev, fmode_t mode)
+{
+ struct i2o_block_device *dev = bdev->bd_disk->private_data;
+
+ if (!dev->i2o_dev)
+ return -ENODEV;
+
+ mutex_lock(&i2o_block_mutex);
+ if (dev->power > 0x1f)
+ i2o_block_device_power(dev, 0x02);
+
+ i2o_block_device_mount(dev->i2o_dev, -1);
+
+ i2o_block_device_lock(dev->i2o_dev, -1);
+
+ osm_debug("Ready.\n");
+ mutex_unlock(&i2o_block_mutex);
+
+ return 0;
+};
+
+/**
+ * i2o_block_release - Release the I2O block device
+ * @disk: gendisk device being released
+ * @mode: file open mode
+ *
+ * Unlock and unmount the media, and power down the device. Gets called if
+ * the block device is closed.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_release(struct gendisk *disk, fmode_t mode)
+{
+ struct i2o_block_device *dev = disk->private_data;
+ u8 operation;
+
+ /*
+ * This is to deail with the case of an application
+ * opening a device and then the device disappears while
+ * it's in use, and then the application tries to release
+ * it. ex: Unmounting a deleted RAID volume at reboot.
+ * If we send messages, it will just cause FAILs since
+ * the TID no longer exists.
+ */
+ if (!dev->i2o_dev)
+ return 0;
+
+ mutex_lock(&i2o_block_mutex);
+ i2o_block_device_flush(dev->i2o_dev);
+
+ i2o_block_device_unlock(dev->i2o_dev, -1);
+
+ if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
+ operation = 0x21;
+ else
+ operation = 0x24;
+
+ i2o_block_device_power(dev, operation);
+ mutex_unlock(&i2o_block_mutex);
+
+ return 0;
+}
+
+static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ i2o_block_biosparam(get_capacity(bdev->bd_disk),
+ &geo->cylinders, &geo->heads, &geo->sectors);
+ return 0;
+}
+
+/**
+ * i2o_block_ioctl - Issue device specific ioctl calls.
+ * @bdev: block device being opened
+ * @mode: file open mode
+ * @cmd: ioctl command
+ * @arg: arg
+ *
+ * Handles ioctl request for the block device.
+ *
+ * Return 0 on success or negative error on failure.
+ */
+static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct i2o_block_device *dev = disk->private_data;
+ int ret = -ENOTTY;
+
+ /* Anyone capable of this syscall can do *real bad* things */
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&i2o_block_mutex);
+ switch (cmd) {
+ case BLKI2OGRSTRAT:
+ ret = put_user(dev->rcache, (int __user *)arg);
+ break;
+ case BLKI2OGWSTRAT:
+ ret = put_user(dev->wcache, (int __user *)arg);
+ break;
+ case BLKI2OSRSTRAT:
+ ret = -EINVAL;
+ if (arg < 0 || arg > CACHE_SMARTFETCH)
+ break;
+ dev->rcache = arg;
+ ret = 0;
+ break;
+ case BLKI2OSWSTRAT:
+ ret = -EINVAL;
+ if (arg != 0
+ && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
+ break;
+ dev->wcache = arg;
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&i2o_block_mutex);
+
+ return ret;
+};
+
+/**
+ * i2o_block_check_events - Have we seen a media change?
+ * @disk: gendisk which should be verified
+ * @clearing: events being cleared
+ *
+ * Verifies if the media has changed.
+ *
+ * Returns 1 if the media was changed or 0 otherwise.
+ */
+static unsigned int i2o_block_check_events(struct gendisk *disk,
+ unsigned int clearing)
+{
+ struct i2o_block_device *p = disk->private_data;
+
+ if (p->media_change_flag) {
+ p->media_change_flag = 0;
+ return DISK_EVENT_MEDIA_CHANGE;
+ }
+ return 0;
+}
+
+/**
+ * i2o_block_transfer - Transfer a request to/from the I2O controller
+ * @req: the request which should be transferred
+ *
+ * This function converts the request into a I2O message. The necessary
+ * DMA buffers are allocated and after everything is setup post the message
+ * to the I2O controller. No cleanup is done by this function. It is done
+ * on the interrupt side when the reply arrives.
+ *
+ * Return 0 on success or negative error code on failure.
+ */
+static int i2o_block_transfer(struct request *req)
+{
+ struct i2o_block_device *dev = req->rq_disk->private_data;
+ struct i2o_controller *c;
+ u32 tid;
+ struct i2o_message *msg;
+ u32 *mptr;
+ struct i2o_block_request *ireq = req->special;
+ u32 tcntxt;
+ u32 sgl_offset = SGL_OFFSET_8;
+ u32 ctl_flags = 0x00000000;
+ int rc;
+ u32 cmd;
+
+ if (unlikely(!dev->i2o_dev)) {
+ osm_err("transfer to removed drive\n");
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ tid = dev->i2o_dev->lct_data.tid;
+ c = dev->i2o_dev->iop;
+
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
+ rc = PTR_ERR(msg);
+ goto exit;
+ }
+
+ tcntxt = i2o_cntxt_list_add(c, req);
+ if (!tcntxt) {
+ rc = -ENOMEM;
+ goto nop_msg;
+ }
+
+ msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
+
+ mptr = &msg->body[0];
+
+ if (rq_data_dir(req) == READ) {
+ cmd = I2O_CMD_BLOCK_READ << 24;
+
+ switch (dev->rcache) {
+ case CACHE_PREFETCH:
+ ctl_flags = 0x201F0008;
+ break;
+
+ case CACHE_SMARTFETCH:
+ if (blk_rq_sectors(req) > 16)
+ ctl_flags = 0x201F0008;
+ else
+ ctl_flags = 0x001F0000;
+ break;
+
+ default:
+ break;
+ }
+ } else {
+ cmd = I2O_CMD_BLOCK_WRITE << 24;
+
+ switch (dev->wcache) {
+ case CACHE_WRITETHROUGH:
+ ctl_flags = 0x001F0008;
+ break;
+ case CACHE_WRITEBACK:
+ ctl_flags = 0x001F0010;
+ break;
+ case CACHE_SMARTBACK:
+ if (blk_rq_sectors(req) > 16)
+ ctl_flags = 0x001F0004;
+ else
+ ctl_flags = 0x001F0010;
+ break;
+ case CACHE_SMARTTHROUGH:
+ if (blk_rq_sectors(req) > 16)
+ ctl_flags = 0x001F0004;
+ else
+ ctl_flags = 0x001F0010;
+ default:
+ break;
+ }
+ }
+
+#ifdef CONFIG_I2O_EXT_ADAPTEC
+ if (c->adaptec) {
+ u8 cmd[10];
+ u32 scsi_flags;
+ u16 hwsec;
+
+ hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT;
+ memset(cmd, 0, 10);
+
+ sgl_offset = SGL_OFFSET_12;
+
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
+
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(tid);
+
+ /*
+ * ENABLE_DISCONNECT
+ * SIMPLE_TAG
+ * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
+ */
+ if (rq_data_dir(req) == READ) {
+ cmd[0] = READ_10;
+ scsi_flags = 0x60a0000a;
+ } else {
+ cmd[0] = WRITE_10;
+ scsi_flags = 0xa0a0000a;
+ }
+
+ *mptr++ = cpu_to_le32(scsi_flags);
+
+ *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec);
+ *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec);
+
+ memcpy(mptr, cmd, 10);
+ mptr += 4;
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
+ } else
+#endif
+ {
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ *mptr++ = cpu_to_le32(ctl_flags);
+ *mptr++ = cpu_to_le32(blk_rq_bytes(req));
+ *mptr++ =
+ cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT));
+ *mptr++ =
+ cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT));
+ }
+
+ if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
+ rc = -ENOMEM;
+ goto context_remove;
+ }
+
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
+
+ list_add_tail(&ireq->queue, &dev->open_queue);
+ dev->open_queue_depth++;
+
+ i2o_msg_post(c, msg);
+
+ return 0;
+
+ context_remove:
+ i2o_cntxt_list_remove(c, req);
+
+ nop_msg:
+ i2o_msg_nop(c, msg);
+
+ exit:
+ return rc;
+};
+
+/**
+ * i2o_block_request_fn - request queue handling function
+ * @q: request queue from which the request could be fetched
+ *
+ * Takes the next request from the queue, transfers it and if no error
+ * occurs dequeue it from the queue. On arrival of the reply the message
+ * will be processed further. If an error occurs requeue the request.
+ */
+static void i2o_block_request_fn(struct request_queue *q)
+{
+ struct request *req;
+
+ while ((req = blk_peek_request(q)) != NULL) {
+ if (req->cmd_type == REQ_TYPE_FS) {
+ struct i2o_block_delayed_request *dreq;
+ struct i2o_block_request *ireq = req->special;
+ unsigned int queue_depth;
+
+ queue_depth = ireq->i2o_blk_dev->open_queue_depth;
+
+ if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
+ if (!i2o_block_transfer(req)) {
+ blk_start_request(req);
+ continue;
+ } else
+ osm_info("transfer error\n");
+ }
+
+ if (queue_depth)
+ break;
+
+ /* stop the queue and retry later */
+ dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
+ if (!dreq)
+ continue;
+
+ dreq->queue = q;
+ INIT_DELAYED_WORK(&dreq->work,
+ i2o_block_delayed_request_fn);
+
+ if (!queue_delayed_work(i2o_block_driver.event_queue,
+ &dreq->work,
+ I2O_BLOCK_RETRY_TIME))
+ kfree(dreq);
+ else {
+ blk_stop_queue(q);
+ break;
+ }
+ } else {
+ blk_start_request(req);
+ __blk_end_request_all(req, -EIO);
+ }
+ }
+};
+
+/* I2O Block device operations definition */
+static const struct block_device_operations i2o_block_fops = {
+ .owner = THIS_MODULE,
+ .open = i2o_block_open,
+ .release = i2o_block_release,
+ .ioctl = i2o_block_ioctl,
+ .compat_ioctl = i2o_block_ioctl,
+ .getgeo = i2o_block_getgeo,
+ .check_events = i2o_block_check_events,
+};
+
+/**
+ * i2o_block_device_alloc - Allocate memory for a I2O Block device
+ *
+ * Allocate memory for the i2o_block_device struct, gendisk and request
+ * queue and initialize them as far as no additional information is needed.
+ *
+ * Returns a pointer to the allocated I2O Block device on success or a
+ * negative error code on failure.
+ */
+static struct i2o_block_device *i2o_block_device_alloc(void)
+{
+ struct i2o_block_device *dev;
+ struct gendisk *gd;
+ struct request_queue *queue;
+ int rc;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ osm_err("Insufficient memory to allocate I2O Block disk.\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ INIT_LIST_HEAD(&dev->open_queue);
+ spin_lock_init(&dev->lock);
+ dev->rcache = CACHE_PREFETCH;
+ dev->wcache = CACHE_WRITEBACK;
+
+ /* allocate a gendisk with 16 partitions */
+ gd = alloc_disk(16);
+ if (!gd) {
+ osm_err("Insufficient memory to allocate gendisk.\n");
+ rc = -ENOMEM;
+ goto cleanup_dev;
+ }
+
+ /* initialize the request queue */
+ queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
+ if (!queue) {
+ osm_err("Insufficient memory to allocate request queue.\n");
+ rc = -ENOMEM;
+ goto cleanup_queue;
+ }
+
+ blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
+
+ gd->major = I2O_MAJOR;
+ gd->queue = queue;
+ gd->fops = &i2o_block_fops;
+ gd->private_data = dev;
+
+ dev->gd = gd;
+
+ return dev;
+
+ cleanup_queue:
+ put_disk(gd);
+
+ cleanup_dev:
+ kfree(dev);
+
+ exit:
+ return ERR_PTR(rc);
+};
+
+/**
+ * i2o_block_probe - verify if dev is a I2O Block device and install it
+ * @dev: device to verify if it is a I2O Block device
+ *
+ * We only verify if the user_tid of the device is 0xfff and then install
+ * the device. Otherwise it is used by some other device (e. g. RAID).
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_probe(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+ struct i2o_controller *c = i2o_dev->iop;
+ struct i2o_block_device *i2o_blk_dev;
+ struct gendisk *gd;
+ struct request_queue *queue;
+ static int unit = 0;
+ int rc;
+ u64 size;
+ u32 blocksize;
+ u16 body_size = 4;
+ u16 power;
+ unsigned short max_sectors;
+
+#ifdef CONFIG_I2O_EXT_ADAPTEC
+ if (c->adaptec)
+ body_size = 8;
+#endif
+
+ if (c->limit_sectors)
+ max_sectors = I2O_MAX_SECTORS_LIMITED;
+ else
+ max_sectors = I2O_MAX_SECTORS;
+
+ /* skip devices which are used by IOP */
+ if (i2o_dev->lct_data.user_tid != 0xfff) {
+ osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
+ return -ENODEV;
+ }
+
+ if (i2o_device_claim(i2o_dev)) {
+ osm_warn("Unable to claim device. Installation aborted\n");
+ rc = -EFAULT;
+ goto exit;
+ }
+
+ i2o_blk_dev = i2o_block_device_alloc();
+ if (IS_ERR(i2o_blk_dev)) {
+ osm_err("could not alloc a new I2O block device");
+ rc = PTR_ERR(i2o_blk_dev);
+ goto claim_release;
+ }
+
+ i2o_blk_dev->i2o_dev = i2o_dev;
+ dev_set_drvdata(dev, i2o_blk_dev);
+
+ /* setup gendisk */
+ gd = i2o_blk_dev->gd;
+ gd->first_minor = unit << 4;
+ sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
+ gd->driverfs_dev = &i2o_dev->device;
+
+ /* setup request queue */
+ queue = gd->queue;
+ queue->queuedata = i2o_blk_dev;
+
+ blk_queue_max_hw_sectors(queue, max_sectors);
+ blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size));
+
+ osm_debug("max sectors = %d\n", queue->max_sectors);
+ osm_debug("phys segments = %d\n", queue->max_phys_segments);
+ osm_debug("max hw segments = %d\n", queue->max_hw_segments);
+
+ /*
+ * Ask for the current media data. If that isn't supported
+ * then we ask for the device capacity data
+ */
+ if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
+ !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
+ blk_queue_logical_block_size(queue, le32_to_cpu(blocksize));
+ } else
+ osm_warn("unable to get blocksize of %s\n", gd->disk_name);
+
+ if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
+ !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
+ set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT);
+ } else
+ osm_warn("could not get size of %s\n", gd->disk_name);
+
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
+ i2o_blk_dev->power = power;
+
+ i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
+
+ add_disk(gd);
+
+ unit++;
+
+ osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid,
+ i2o_blk_dev->gd->disk_name);
+
+ return 0;
+
+ claim_release:
+ i2o_device_claim_release(i2o_dev);
+
+ exit:
+ return rc;
+};
+
+/* Block OSM driver struct */
+static struct i2o_driver i2o_block_driver = {
+ .name = OSM_NAME,
+ .event = i2o_block_event,
+ .reply = i2o_block_reply,
+ .classes = i2o_block_class_id,
+ .driver = {
+ .probe = i2o_block_probe,
+ .remove = i2o_block_remove,
+ },
+};
+
+/**
+ * i2o_block_init - Block OSM initialization function
+ *
+ * Allocate the slab and mempool for request structs, registers i2o_block
+ * block device and finally register the Block OSM in the I2O core.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_block_init(void)
+{
+ int rc;
+ int size;
+
+ printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+ /* Allocate request mempool and slab */
+ size = sizeof(struct i2o_block_request);
+ i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!i2o_blk_req_pool.slab) {
+ osm_err("can't init request slab\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ i2o_blk_req_pool.pool =
+ mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE,
+ i2o_blk_req_pool.slab);
+ if (!i2o_blk_req_pool.pool) {
+ osm_err("can't init request mempool\n");
+ rc = -ENOMEM;
+ goto free_slab;
+ }
+
+ /* Register the block device interfaces */
+ rc = register_blkdev(I2O_MAJOR, "i2o_block");
+ if (rc) {
+ osm_err("unable to register block device\n");
+ goto free_mempool;
+ }
+#ifdef MODULE
+ osm_info("registered device at major %d\n", I2O_MAJOR);
+#endif
+
+ /* Register Block OSM into I2O core */
+ rc = i2o_driver_register(&i2o_block_driver);
+ if (rc) {
+ osm_err("Could not register Block driver\n");
+ goto unregister_blkdev;
+ }
+
+ return 0;
+
+ unregister_blkdev:
+ unregister_blkdev(I2O_MAJOR, "i2o_block");
+
+ free_mempool:
+ mempool_destroy(i2o_blk_req_pool.pool);
+
+ free_slab:
+ kmem_cache_destroy(i2o_blk_req_pool.slab);
+
+ exit:
+ return rc;
+};
+
+/**
+ * i2o_block_exit - Block OSM exit function
+ *
+ * Unregisters Block OSM from I2O core, unregisters i2o_block block device
+ * and frees the mempool and slab.
+ */
+static void __exit i2o_block_exit(void)
+{
+ /* Unregister I2O Block OSM from I2O core */
+ i2o_driver_unregister(&i2o_block_driver);
+
+ /* Unregister block device */
+ unregister_blkdev(I2O_MAJOR, "i2o_block");
+
+ /* Free request mempool and slab */
+ mempool_destroy(i2o_blk_req_pool.pool);
+ kmem_cache_destroy(i2o_blk_req_pool.slab);
+};
+
+MODULE_AUTHOR("Red Hat");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_block_init);
+module_exit(i2o_block_exit);
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
new file mode 100644
index 00000000..cf8873cb
--- /dev/null
+++ b/drivers/message/i2o/i2o_block.h
@@ -0,0 +1,103 @@
+/*
+ * Block OSM structures/API
+ *
+ * Copyright (C) 1999-2002 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * For the purpose of avoiding doubt the preferred form of the work
+ * for making modifications shall be a standards compliant form such
+ * gzipped tar and not one requiring a proprietary or patent encumbered
+ * tool to unpack.
+ *
+ * Fixes/additions:
+ * Steve Ralston:
+ * Multiple device handling error fixes,
+ * Added a queue depth.
+ * Alan Cox:
+ * FC920 has an rmw bug. Dont or in the end marker.
+ * Removed queue walk, fixed for 64bitness.
+ * Rewrote much of the code over time
+ * Added indirect block lists
+ * Handle 64K limits on many controllers
+ * Don't use indirects on the Promise (breaks)
+ * Heavily chop down the queue depths
+ * Deepak Saxena:
+ * Independent queues per IOP
+ * Support for dynamic device creation/deletion
+ * Code cleanup
+ * Support for larger I/Os through merge* functions
+ * (taken from DAC960 driver)
+ * Boji T Kannanthanam:
+ * Set the I2O Block devices to be detected in increasing
+ * order of TIDs during boot.
+ * Search and set the I2O block device that we boot off
+ * from as the first device to be claimed (as /dev/i2o/hda)
+ * Properly attach/detach I2O gendisk structure from the
+ * system gendisk list. The I2O block devices now appear in
+ * /proc/partitions.
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Minor bugfixes for 2.6.
+ */
+
+#ifndef I2O_BLOCK_OSM_H
+#define I2O_BLOCK_OSM_H
+
+#define I2O_BLOCK_RETRY_TIME HZ/4
+#define I2O_BLOCK_MAX_OPEN_REQUESTS 50
+
+/* request queue sizes */
+#define I2O_BLOCK_REQ_MEMPOOL_SIZE 32
+
+#define KERNEL_SECTOR_SHIFT 9
+#define KERNEL_SECTOR_SIZE (1 << KERNEL_SECTOR_SHIFT)
+
+/* I2O Block OSM mempool struct */
+struct i2o_block_mempool {
+ struct kmem_cache *slab;
+ mempool_t *pool;
+};
+
+/* I2O Block device descriptor */
+struct i2o_block_device {
+ struct i2o_device *i2o_dev; /* pointer to I2O device */
+ struct gendisk *gd;
+ spinlock_t lock; /* queue lock */
+ struct list_head open_queue; /* list of transferred, but unfinished
+ requests */
+ unsigned int open_queue_depth; /* number of requests in the queue */
+
+ int rcache; /* read cache flags */
+ int wcache; /* write cache flags */
+ int flags;
+ u16 power; /* power state */
+ int media_change_flag; /* media changed flag */
+};
+
+/* I2O Block device request */
+struct i2o_block_request {
+ struct list_head queue;
+ struct request *req; /* corresponding request */
+ struct i2o_block_device *i2o_blk_dev; /* I2O block device */
+ struct device *dev; /* device used for DMA */
+ int sg_nents; /* number of SG elements */
+ struct scatterlist sg_table[I2O_MAX_PHYS_SEGMENTS]; /* SG table */
+};
+
+/* I2O Block device delayed request */
+struct i2o_block_delayed_request {
+ struct delayed_work work;
+ struct request_queue *queue;
+};
+
+#endif
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
new file mode 100644
index 00000000..098de2b3
--- /dev/null
+++ b/drivers/message/i2o/i2o_config.c
@@ -0,0 +1,1146 @@
+/*
+ * I2O Configuration Interface Driver
+ *
+ * (C) Copyright 1999-2002 Red Hat
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * Fixes/additions:
+ * Deepak Saxena (04/20/1999):
+ * Added basic ioctl() support
+ * Deepak Saxena (06/07/1999):
+ * Added software download ioctl (still testing)
+ * Auvo Häkkinen (09/10/1999):
+ * Changes to i2o_cfg_reply(), ioctl_parms()
+ * Added ioct_validate()
+ * Taneli Vähäkangas (09/30/1999):
+ * Fixed ioctl_swdl()
+ * Taneli Vähäkangas (10/04/1999):
+ * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
+ * Deepak Saxena (11/18/1999):
+ * Added event managmenet support
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>:
+ * 2.4 rewrite ported to 2.5
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Added pass-thru support for Adaptec's raidutils
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+
+#include "core.h"
+
+#define SG_TABLESIZE 30
+
+static DEFINE_MUTEX(i2o_cfg_mutex);
+static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long);
+
+static spinlock_t i2o_config_lock;
+
+#define MODINC(x,y) ((x) = ((x) + 1) % (y))
+
+struct sg_simple_element {
+ u32 flag_count;
+ u32 addr_bus;
+};
+
+struct i2o_cfg_info {
+ struct file *fp;
+ struct fasync_struct *fasync;
+ struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
+ u16 q_in; // Queue head index
+ u16 q_out; // Queue tail index
+ u16 q_len; // Queue length
+ u16 q_lost; // Number of lost events
+ ulong q_id; // Event queue ID...used as tx_context
+ struct i2o_cfg_info *next;
+};
+static struct i2o_cfg_info *open_files = NULL;
+static ulong i2o_cfg_info_id = 0;
+
+static int i2o_cfg_getiops(unsigned long arg)
+{
+ struct i2o_controller *c;
+ u8 __user *user_iop_table = (void __user *)arg;
+ u8 tmp[MAX_I2O_CONTROLLERS];
+ int ret = 0;
+
+ memset(tmp, 0, MAX_I2O_CONTROLLERS);
+
+ list_for_each_entry(c, &i2o_controllers, list)
+ tmp[c->unit] = 1;
+
+ if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS))
+ ret = -EFAULT;
+
+ return ret;
+};
+
+static int i2o_cfg_gethrt(unsigned long arg)
+{
+ struct i2o_controller *c;
+ struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
+ struct i2o_cmd_hrtlct kcmd;
+ i2o_hrt *hrt;
+ int len;
+ u32 reslen;
+ int ret = 0;
+
+ if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+ return -EFAULT;
+
+ if (get_user(reslen, kcmd.reslen) < 0)
+ return -EFAULT;
+
+ if (kcmd.resbuf == NULL)
+ return -EFAULT;
+
+ c = i2o_find_iop(kcmd.iop);
+ if (!c)
+ return -ENXIO;
+
+ hrt = (i2o_hrt *) c->hrt.virt;
+
+ len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
+
+ if (put_user(len, kcmd.reslen))
+ ret = -EFAULT;
+ else if (len > reslen)
+ ret = -ENOBUFS;
+ else if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
+ ret = -EFAULT;
+
+ return ret;
+};
+
+static int i2o_cfg_getlct(unsigned long arg)
+{
+ struct i2o_controller *c;
+ struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
+ struct i2o_cmd_hrtlct kcmd;
+ i2o_lct *lct;
+ int len;
+ int ret = 0;
+ u32 reslen;
+
+ if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+ return -EFAULT;
+
+ if (get_user(reslen, kcmd.reslen) < 0)
+ return -EFAULT;
+
+ if (kcmd.resbuf == NULL)
+ return -EFAULT;
+
+ c = i2o_find_iop(kcmd.iop);
+ if (!c)
+ return -ENXIO;
+
+ lct = (i2o_lct *) c->lct;
+
+ len = (unsigned int)lct->table_size << 2;
+ if (put_user(len, kcmd.reslen))
+ ret = -EFAULT;
+ else if (len > reslen)
+ ret = -ENOBUFS;
+ else if (copy_to_user(kcmd.resbuf, lct, len))
+ ret = -EFAULT;
+
+ return ret;
+};
+
+static int i2o_cfg_parms(unsigned long arg, unsigned int type)
+{
+ int ret = 0;
+ struct i2o_controller *c;
+ struct i2o_device *dev;
+ struct i2o_cmd_psetget __user *cmd =
+ (struct i2o_cmd_psetget __user *)arg;
+ struct i2o_cmd_psetget kcmd;
+ u32 reslen;
+ u8 *ops;
+ u8 *res;
+ int len = 0;
+
+ u32 i2o_cmd = (type == I2OPARMGET ?
+ I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET);
+
+ if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
+ return -EFAULT;
+
+ if (get_user(reslen, kcmd.reslen))
+ return -EFAULT;
+
+ c = i2o_find_iop(kcmd.iop);
+ if (!c)
+ return -ENXIO;
+
+ dev = i2o_iop_find_device(c, kcmd.tid);
+ if (!dev)
+ return -ENXIO;
+
+ ops = memdup_user(kcmd.opbuf, kcmd.oplen);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ /*
+ * It's possible to have a _very_ large table
+ * and that the user asks for all of it at once...
+ */
+ res = kmalloc(65536, GFP_KERNEL);
+ if (!res) {
+ kfree(ops);
+ return -ENOMEM;
+ }
+
+ len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536);
+ kfree(ops);
+
+ if (len < 0) {
+ kfree(res);
+ return -EAGAIN;
+ }
+
+ if (put_user(len, kcmd.reslen))
+ ret = -EFAULT;
+ else if (len > reslen)
+ ret = -ENOBUFS;
+ else if (copy_to_user(kcmd.resbuf, res, len))
+ ret = -EFAULT;
+
+ kfree(res);
+
+ return ret;
+};
+
+static int i2o_cfg_swdl(unsigned long arg)
+{
+ struct i2o_sw_xfer kxfer;
+ struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
+ unsigned char maxfrag = 0, curfrag = 1;
+ struct i2o_dma buffer;
+ struct i2o_message *msg;
+ unsigned int status = 0, swlen = 0, fragsize = 8192;
+ struct i2o_controller *c;
+
+ if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
+ return -EFAULT;
+
+ if (get_user(swlen, kxfer.swlen) < 0)
+ return -EFAULT;
+
+ if (get_user(maxfrag, kxfer.maxfrag) < 0)
+ return -EFAULT;
+
+ if (get_user(curfrag, kxfer.curfrag) < 0)
+ return -EFAULT;
+
+ if (curfrag == maxfrag)
+ fragsize = swlen - (maxfrag - 1) * 8192;
+
+ if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
+ return -EFAULT;
+
+ c = i2o_find_iop(kxfer.iop);
+ if (!c)
+ return -ENXIO;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) {
+ i2o_msg_nop(c, msg);
+ return -ENOMEM;
+ }
+
+ if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) {
+ i2o_msg_nop(c, msg);
+ i2o_dma_free(&c->pdev->dev, &buffer);
+ return -EFAULT;
+ }
+
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
+ sw_type) << 16) |
+ (((u32) maxfrag) << 8) | (((u32) curfrag)));
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
+
+ osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
+
+ if (status != -ETIMEDOUT)
+ i2o_dma_free(&c->pdev->dev, &buffer);
+
+ if (status != I2O_POST_WAIT_OK) {
+ // it fails if you try and send frags out of order
+ // and for some yet unknown reasons too
+ osm_info("swdl failed, DetailedStatus = %d\n", status);
+ return status;
+ }
+
+ return 0;
+};
+
+static int i2o_cfg_swul(unsigned long arg)
+{
+ struct i2o_sw_xfer kxfer;
+ struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
+ unsigned char maxfrag = 0, curfrag = 1;
+ struct i2o_dma buffer;
+ struct i2o_message *msg;
+ unsigned int status = 0, swlen = 0, fragsize = 8192;
+ struct i2o_controller *c;
+ int ret = 0;
+
+ if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
+ return -EFAULT;
+
+ if (get_user(swlen, kxfer.swlen) < 0)
+ return -EFAULT;
+
+ if (get_user(maxfrag, kxfer.maxfrag) < 0)
+ return -EFAULT;
+
+ if (get_user(curfrag, kxfer.curfrag) < 0)
+ return -EFAULT;
+
+ if (curfrag == maxfrag)
+ fragsize = swlen - (maxfrag - 1) * 8192;
+
+ if (!kxfer.buf)
+ return -EFAULT;
+
+ c = i2o_find_iop(kxfer.iop);
+ if (!c)
+ return -ENXIO;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) {
+ i2o_msg_nop(c, msg);
+ return -ENOMEM;
+ }
+
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
+ sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
+
+ osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
+
+ if (status != I2O_POST_WAIT_OK) {
+ if (status != -ETIMEDOUT)
+ i2o_dma_free(&c->pdev->dev, &buffer);
+
+ osm_info("swul failed, DetailedStatus = %d\n", status);
+ return status;
+ }
+
+ if (copy_to_user(kxfer.buf, buffer.virt, fragsize))
+ ret = -EFAULT;
+
+ i2o_dma_free(&c->pdev->dev, &buffer);
+
+ return ret;
+}
+
+static int i2o_cfg_swdel(unsigned long arg)
+{
+ struct i2o_controller *c;
+ struct i2o_sw_xfer kxfer;
+ struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
+ struct i2o_message *msg;
+ unsigned int swlen;
+ int token;
+
+ if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
+ return -EFAULT;
+
+ if (get_user(swlen, kxfer.swlen) < 0)
+ return -EFAULT;
+
+ c = i2o_find_iop(kxfer.iop);
+ if (!c)
+ return -ENXIO;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+
+ token = i2o_msg_post_wait(c, msg, 10);
+
+ if (token != I2O_POST_WAIT_OK) {
+ osm_info("swdel failed, DetailedStatus = %d\n", token);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+};
+
+static int i2o_cfg_validate(unsigned long arg)
+{
+ int token;
+ int iop = (int)arg;
+ struct i2o_message *msg;
+ struct i2o_controller *c;
+
+ c = i2o_find_iop(iop);
+ if (!c)
+ return -ENXIO;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+
+ token = i2o_msg_post_wait(c, msg, 10);
+
+ if (token != I2O_POST_WAIT_OK) {
+ osm_info("Can't validate configuration, ErrorStatus = %d\n",
+ token);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+};
+
+static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
+{
+ struct i2o_message *msg;
+ struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
+ struct i2o_evt_id kdesc;
+ struct i2o_controller *c;
+ struct i2o_device *d;
+
+ if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
+ return -EFAULT;
+
+ /* IOP exists? */
+ c = i2o_find_iop(kdesc.iop);
+ if (!c)
+ return -ENXIO;
+
+ /* Device exists? */
+ d = i2o_iop_find_device(c, kdesc.tid);
+ if (!d)
+ return -ENODEV;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
+ kdesc.tid);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
+ msg->body[0] = cpu_to_le32(kdesc.evt_mask);
+
+ i2o_msg_post(c, msg);
+
+ return 0;
+}
+
+static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
+{
+ struct i2o_cfg_info *p = NULL;
+ struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg;
+ struct i2o_evt_get kget;
+ unsigned long flags;
+
+ for (p = open_files; p; p = p->next)
+ if (p->q_id == (ulong) fp->private_data)
+ break;
+
+ if (!p->q_len)
+ return -ENOENT;
+
+ memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
+ MODINC(p->q_out, I2O_EVT_Q_LEN);
+ spin_lock_irqsave(&i2o_config_lock, flags);
+ p->q_len--;
+ kget.pending = p->q_len;
+ kget.lost = p->q_lost;
+ spin_unlock_irqrestore(&i2o_config_lock, flags);
+
+ if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
+ return -EFAULT;
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
+ unsigned long arg)
+{
+ struct i2o_cmd_passthru32 __user *cmd;
+ struct i2o_controller *c;
+ u32 __user *user_msg;
+ u32 *reply = NULL;
+ u32 __user *user_reply = NULL;
+ u32 size = 0;
+ u32 reply_size = 0;
+ u32 rcode = 0;
+ struct i2o_dma sg_list[SG_TABLESIZE];
+ u32 sg_offset = 0;
+ u32 sg_count = 0;
+ u32 i = 0;
+ u32 sg_index = 0;
+ i2o_status_block *sb;
+ struct i2o_message *msg;
+ unsigned int iop;
+
+ cmd = (struct i2o_cmd_passthru32 __user *)arg;
+
+ if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg))
+ return -EFAULT;
+
+ user_msg = compat_ptr(i);
+
+ c = i2o_find_iop(iop);
+ if (!c) {
+ osm_debug("controller %d not found\n", iop);
+ return -ENXIO;
+ }
+
+ sb = c->status_block.virt;
+
+ if (get_user(size, &user_msg[0])) {
+ osm_warn("unable to get size!\n");
+ return -EFAULT;
+ }
+ size = size >> 16;
+
+ if (size > sb->inbound_frame_size) {
+ osm_warn("size of message > inbound_frame_size");
+ return -EFAULT;
+ }
+
+ user_reply = &user_msg[size];
+
+ size <<= 2; // Convert to bytes
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ rcode = -EFAULT;
+ /* Copy in the user's I2O command */
+ if (copy_from_user(msg, user_msg, size)) {
+ osm_warn("unable to copy user message\n");
+ goto out;
+ }
+ i2o_dump_message(msg);
+
+ if (get_user(reply_size, &user_reply[0]) < 0)
+ goto out;
+
+ reply_size >>= 16;
+ reply_size <<= 2;
+
+ rcode = -ENOMEM;
+ reply = kzalloc(reply_size, GFP_KERNEL);
+ if (!reply) {
+ printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
+ c->name);
+ goto out;
+ }
+
+ sg_offset = (msg->u.head[0] >> 4) & 0x0f;
+
+ memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
+ if (sg_offset) {
+ struct sg_simple_element *sg;
+
+ if (sg_offset * 4 >= size) {
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ // TODO 64bit fix
+ sg = (struct sg_simple_element *)((&msg->u.head[0]) +
+ sg_offset);
+ sg_count =
+ (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+ if (sg_count > SG_TABLESIZE) {
+ printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
+ c->name, sg_count);
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+
+ for (i = 0; i < sg_count; i++) {
+ int sg_size;
+ struct i2o_dma *p;
+
+ if (!(sg[i].flag_count & 0x10000000
+ /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
+ printk(KERN_DEBUG
+ "%s:Bad SG element %d - not simple (%x)\n",
+ c->name, i, sg[i].flag_count);
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+ sg_size = sg[i].flag_count & 0xffffff;
+ p = &(sg_list[sg_index]);
+ /* Allocate memory for the transfer */
+ if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) {
+ printk(KERN_DEBUG
+ "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ c->name, sg_size, i, sg_count);
+ rcode = -ENOMEM;
+ goto sg_list_cleanup;
+ }
+ sg_index++;
+ /* Copy in the user's SG buffer if necessary */
+ if (sg[i].
+ flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
+ // TODO 64bit fix
+ if (copy_from_user
+ (p->virt,
+ (void __user *)(unsigned long)sg[i].
+ addr_bus, sg_size)) {
+ printk(KERN_DEBUG
+ "%s: Could not copy SG buf %d FROM user\n",
+ c->name, i);
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ }
+ //TODO 64bit fix
+ sg[i].addr_bus = (u32) p->phys;
+ }
+ }
+
+ rcode = i2o_msg_post_wait(c, msg, 60);
+ msg = NULL;
+ if (rcode) {
+ reply[4] = ((u32) rcode) << 24;
+ goto sg_list_cleanup;
+ }
+
+ if (sg_offset) {
+ u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
+ /* Copy back the Scatter Gather buffers back to user space */
+ u32 j;
+ // TODO 64bit fix
+ struct sg_simple_element *sg;
+ int sg_size;
+
+ // re-acquire the original message to handle correctly the sg copy operation
+ memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+ // get user msg size in u32s
+ if (get_user(size, &user_msg[0])) {
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ size = size >> 16;
+ size *= 4;
+ /* Copy in the user's I2O command */
+ if (copy_from_user(rmsg, user_msg, size)) {
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ sg_count =
+ (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+
+ // TODO 64bit fix
+ sg = (struct sg_simple_element *)(rmsg + sg_offset);
+ for (j = 0; j < sg_count; j++) {
+ /* Copy out the SG list to user's buffer if necessary */
+ if (!
+ (sg[j].
+ flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
+ sg_size = sg[j].flag_count & 0xffffff;
+ // TODO 64bit fix
+ if (copy_to_user
+ ((void __user *)(u64) sg[j].addr_bus,
+ sg_list[j].virt, sg_size)) {
+ printk(KERN_WARNING
+ "%s: Could not copy %p TO user %x\n",
+ c->name, sg_list[j].virt,
+ sg[j].addr_bus);
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ }
+ }
+ }
+
+sg_list_cleanup:
+ /* Copy back the reply to user space */
+ if (reply_size) {
+ // we wrote our own values for context - now restore the user supplied ones
+ if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
+ printk(KERN_WARNING
+ "%s: Could not copy message context FROM user\n",
+ c->name);
+ rcode = -EFAULT;
+ }
+ if (copy_to_user(user_reply, reply, reply_size)) {
+ printk(KERN_WARNING
+ "%s: Could not copy reply TO user\n", c->name);
+ rcode = -EFAULT;
+ }
+ }
+ for (i = 0; i < sg_index; i++)
+ i2o_dma_free(&c->pdev->dev, &sg_list[i]);
+
+cleanup:
+ kfree(reply);
+out:
+ if (msg)
+ i2o_msg_nop(c, msg);
+ return rcode;
+}
+
+static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ int ret;
+ mutex_lock(&i2o_cfg_mutex);
+ switch (cmd) {
+ case I2OGETIOPS:
+ ret = i2o_cfg_ioctl(file, cmd, arg);
+ break;
+ case I2OPASSTHRU32:
+ ret = i2o_cfg_passthru32(file, cmd, arg);
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+ mutex_unlock(&i2o_cfg_mutex);
+ return ret;
+}
+
+#endif
+
+#ifdef CONFIG_I2O_EXT_ADAPTEC
+static int i2o_cfg_passthru(unsigned long arg)
+{
+ struct i2o_cmd_passthru __user *cmd =
+ (struct i2o_cmd_passthru __user *)arg;
+ struct i2o_controller *c;
+ u32 __user *user_msg;
+ u32 *reply = NULL;
+ u32 __user *user_reply = NULL;
+ u32 size = 0;
+ u32 reply_size = 0;
+ u32 rcode = 0;
+ struct i2o_dma sg_list[SG_TABLESIZE];
+ u32 sg_offset = 0;
+ u32 sg_count = 0;
+ int sg_index = 0;
+ u32 i = 0;
+ i2o_status_block *sb;
+ struct i2o_message *msg;
+ unsigned int iop;
+
+ if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
+ return -EFAULT;
+
+ c = i2o_find_iop(iop);
+ if (!c) {
+ osm_warn("controller %d not found\n", iop);
+ return -ENXIO;
+ }
+
+ sb = c->status_block.virt;
+
+ if (get_user(size, &user_msg[0]))
+ return -EFAULT;
+ size = size >> 16;
+
+ if (size > sb->inbound_frame_size) {
+ osm_warn("size of message > inbound_frame_size");
+ return -EFAULT;
+ }
+
+ user_reply = &user_msg[size];
+
+ size <<= 2; // Convert to bytes
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ rcode = -EFAULT;
+ /* Copy in the user's I2O command */
+ if (copy_from_user(msg, user_msg, size))
+ goto out;
+
+ if (get_user(reply_size, &user_reply[0]) < 0)
+ goto out;
+
+ reply_size >>= 16;
+ reply_size <<= 2;
+
+ reply = kzalloc(reply_size, GFP_KERNEL);
+ if (!reply) {
+ printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
+ c->name);
+ rcode = -ENOMEM;
+ goto out;
+ }
+
+ sg_offset = (msg->u.head[0] >> 4) & 0x0f;
+
+ memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
+ if (sg_offset) {
+ struct sg_simple_element *sg;
+ struct i2o_dma *p;
+
+ if (sg_offset * 4 >= size) {
+ rcode = -EFAULT;
+ goto cleanup;
+ }
+ // TODO 64bit fix
+ sg = (struct sg_simple_element *)((&msg->u.head[0]) +
+ sg_offset);
+ sg_count =
+ (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+ if (sg_count > SG_TABLESIZE) {
+ printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
+ c->name, sg_count);
+ rcode = -EINVAL;
+ goto cleanup;
+ }
+
+ for (i = 0; i < sg_count; i++) {
+ int sg_size;
+
+ if (!(sg[i].flag_count & 0x10000000
+ /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
+ printk(KERN_DEBUG
+ "%s:Bad SG element %d - not simple (%x)\n",
+ c->name, i, sg[i].flag_count);
+ rcode = -EINVAL;
+ goto sg_list_cleanup;
+ }
+ sg_size = sg[i].flag_count & 0xffffff;
+ p = &(sg_list[sg_index]);
+ if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) {
+ /* Allocate memory for the transfer */
+ printk(KERN_DEBUG
+ "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+ c->name, sg_size, i, sg_count);
+ rcode = -ENOMEM;
+ goto sg_list_cleanup;
+ }
+ sg_index++;
+ /* Copy in the user's SG buffer if necessary */
+ if (sg[i].
+ flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
+ // TODO 64bit fix
+ if (copy_from_user
+ (p->virt, (void __user *)sg[i].addr_bus,
+ sg_size)) {
+ printk(KERN_DEBUG
+ "%s: Could not copy SG buf %d FROM user\n",
+ c->name, i);
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ }
+ sg[i].addr_bus = p->phys;
+ }
+ }
+
+ rcode = i2o_msg_post_wait(c, msg, 60);
+ msg = NULL;
+ if (rcode) {
+ reply[4] = ((u32) rcode) << 24;
+ goto sg_list_cleanup;
+ }
+
+ if (sg_offset) {
+ u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE];
+ /* Copy back the Scatter Gather buffers back to user space */
+ u32 j;
+ // TODO 64bit fix
+ struct sg_simple_element *sg;
+ int sg_size;
+
+ // re-acquire the original message to handle correctly the sg copy operation
+ memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4);
+ // get user msg size in u32s
+ if (get_user(size, &user_msg[0])) {
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ size = size >> 16;
+ size *= 4;
+ /* Copy in the user's I2O command */
+ if (copy_from_user(rmsg, user_msg, size)) {
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ sg_count =
+ (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+
+ // TODO 64bit fix
+ sg = (struct sg_simple_element *)(rmsg + sg_offset);
+ for (j = 0; j < sg_count; j++) {
+ /* Copy out the SG list to user's buffer if necessary */
+ if (!
+ (sg[j].
+ flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
+ sg_size = sg[j].flag_count & 0xffffff;
+ // TODO 64bit fix
+ if (copy_to_user
+ ((void __user *)sg[j].addr_bus, sg_list[j].virt,
+ sg_size)) {
+ printk(KERN_WARNING
+ "%s: Could not copy %p TO user %x\n",
+ c->name, sg_list[j].virt,
+ sg[j].addr_bus);
+ rcode = -EFAULT;
+ goto sg_list_cleanup;
+ }
+ }
+ }
+ }
+
+sg_list_cleanup:
+ /* Copy back the reply to user space */
+ if (reply_size) {
+ // we wrote our own values for context - now restore the user supplied ones
+ if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
+ printk(KERN_WARNING
+ "%s: Could not copy message context FROM user\n",
+ c->name);
+ rcode = -EFAULT;
+ }
+ if (copy_to_user(user_reply, reply, reply_size)) {
+ printk(KERN_WARNING
+ "%s: Could not copy reply TO user\n", c->name);
+ rcode = -EFAULT;
+ }
+ }
+
+ for (i = 0; i < sg_index; i++)
+ i2o_dma_free(&c->pdev->dev, &sg_list[i]);
+
+cleanup:
+ kfree(reply);
+out:
+ if (msg)
+ i2o_msg_nop(c, msg);
+ return rcode;
+}
+#endif
+
+/*
+ * IOCTL Handler
+ */
+static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&i2o_cfg_mutex);
+ switch (cmd) {
+ case I2OGETIOPS:
+ ret = i2o_cfg_getiops(arg);
+ break;
+
+ case I2OHRTGET:
+ ret = i2o_cfg_gethrt(arg);
+ break;
+
+ case I2OLCTGET:
+ ret = i2o_cfg_getlct(arg);
+ break;
+
+ case I2OPARMSET:
+ ret = i2o_cfg_parms(arg, I2OPARMSET);
+ break;
+
+ case I2OPARMGET:
+ ret = i2o_cfg_parms(arg, I2OPARMGET);
+ break;
+
+ case I2OSWDL:
+ ret = i2o_cfg_swdl(arg);
+ break;
+
+ case I2OSWUL:
+ ret = i2o_cfg_swul(arg);
+ break;
+
+ case I2OSWDEL:
+ ret = i2o_cfg_swdel(arg);
+ break;
+
+ case I2OVALIDATE:
+ ret = i2o_cfg_validate(arg);
+ break;
+
+ case I2OEVTREG:
+ ret = i2o_cfg_evt_reg(arg, fp);
+ break;
+
+ case I2OEVTGET:
+ ret = i2o_cfg_evt_get(arg, fp);
+ break;
+
+#ifdef CONFIG_I2O_EXT_ADAPTEC
+ case I2OPASSTHRU:
+ ret = i2o_cfg_passthru(arg);
+ break;
+#endif
+
+ default:
+ osm_debug("unknown ioctl called!\n");
+ ret = -EINVAL;
+ }
+ mutex_unlock(&i2o_cfg_mutex);
+ return ret;
+}
+
+static int cfg_open(struct inode *inode, struct file *file)
+{
+ struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info),
+ GFP_KERNEL);
+ unsigned long flags;
+
+ if (!tmp)
+ return -ENOMEM;
+
+ mutex_lock(&i2o_cfg_mutex);
+ file->private_data = (void *)(i2o_cfg_info_id++);
+ tmp->fp = file;
+ tmp->fasync = NULL;
+ tmp->q_id = (ulong) file->private_data;
+ tmp->q_len = 0;
+ tmp->q_in = 0;
+ tmp->q_out = 0;
+ tmp->q_lost = 0;
+ tmp->next = open_files;
+
+ spin_lock_irqsave(&i2o_config_lock, flags);
+ open_files = tmp;
+ spin_unlock_irqrestore(&i2o_config_lock, flags);
+ mutex_unlock(&i2o_cfg_mutex);
+
+ return 0;
+}
+
+static int cfg_fasync(int fd, struct file *fp, int on)
+{
+ ulong id = (ulong) fp->private_data;
+ struct i2o_cfg_info *p;
+ int ret = -EBADF;
+
+ mutex_lock(&i2o_cfg_mutex);
+ for (p = open_files; p; p = p->next)
+ if (p->q_id == id)
+ break;
+
+ if (p)
+ ret = fasync_helper(fd, fp, on, &p->fasync);
+ mutex_unlock(&i2o_cfg_mutex);
+ return ret;
+}
+
+static int cfg_release(struct inode *inode, struct file *file)
+{
+ ulong id = (ulong) file->private_data;
+ struct i2o_cfg_info *p, **q;
+ unsigned long flags;
+
+ mutex_lock(&i2o_cfg_mutex);
+ spin_lock_irqsave(&i2o_config_lock, flags);
+ for (q = &open_files; (p = *q) != NULL; q = &p->next) {
+ if (p->q_id == id) {
+ *q = p->next;
+ kfree(p);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&i2o_config_lock, flags);
+ mutex_unlock(&i2o_cfg_mutex);
+
+ return 0;
+}
+
+static const struct file_operations config_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .unlocked_ioctl = i2o_cfg_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i2o_cfg_compat_ioctl,
+#endif
+ .open = cfg_open,
+ .release = cfg_release,
+ .fasync = cfg_fasync,
+};
+
+static struct miscdevice i2o_miscdev = {
+ I2O_MINOR,
+ "i2octl",
+ &config_fops
+};
+
+static int __init i2o_config_old_init(void)
+{
+ spin_lock_init(&i2o_config_lock);
+
+ if (misc_register(&i2o_miscdev) < 0) {
+ osm_err("can't register device.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void i2o_config_old_exit(void)
+{
+ misc_deregister(&i2o_miscdev);
+}
+
+MODULE_AUTHOR("Red Hat Software");
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
new file mode 100644
index 00000000..07dbeaf9
--- /dev/null
+++ b/drivers/message/i2o/i2o_proc.c
@@ -0,0 +1,2104 @@
+/*
+ * procfs handler for Linux I2O subsystem
+ *
+ * (c) Copyright 1999 Deepak Saxena
+ *
+ * Originally written by Deepak Saxena(deepak@plexity.net)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This is an initial test release. The code is based on the design of the
+ * ide procfs system (drivers/block/ide-proc.c). Some code taken from
+ * i2o-core module by Alan Cox.
+ *
+ * DISCLAIMER: This code is still under development/test and may cause
+ * your system to behave unpredictably. Use at your own discretion.
+ *
+ *
+ * Fixes/additions:
+ * Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI),
+ * Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI)
+ * University of Helsinki, Department of Computer Science
+ * LAN entries
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>
+ * Changes for new I2O API
+ */
+
+#define OSM_NAME "proc-osm"
+#define OSM_VERSION "1.316"
+#define OSM_DESCRIPTION "I2O ProcFS OSM"
+
+#define I2O_MAX_MODULES 4
+// FIXME!
+#define FMT_U64_HEX "0x%08x%08x"
+#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64))
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+/* Structure used to define /proc entries */
+typedef struct _i2o_proc_entry_t {
+ char *name; /* entry name */
+ mode_t mode; /* mode */
+ const struct file_operations *fops; /* open function */
+} i2o_proc_entry;
+
+/* global I2O /proc/i2o entry */
+static struct proc_dir_entry *i2o_proc_dir_root;
+
+/* proc OSM driver struct */
+static struct i2o_driver i2o_proc_driver = {
+ .name = OSM_NAME,
+};
+
+static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
+{
+ int i;
+
+ /* 19990419 -sralston
+ * The I2O v1.5 (and v2.0 so far) "official specification"
+ * got serial numbers WRONG!
+ * Apparently, and despite what Section 3.4.4 says and
+ * Figure 3-35 shows (pg 3-39 in the pdf doc),
+ * the convention / consensus seems to be:
+ * + First byte is SNFormat
+ * + Second byte is SNLen (but only if SNFormat==7 (?))
+ * + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format
+ */
+ switch (serialno[0]) {
+ case I2O_SNFORMAT_BINARY: /* Binary */
+ seq_printf(seq, "0x");
+ for (i = 0; i < serialno[1]; i++) {
+ seq_printf(seq, "%02X", serialno[2 + i]);
+ }
+ break;
+
+ case I2O_SNFORMAT_ASCII: /* ASCII */
+ if (serialno[1] < ' ') { /* printable or SNLen? */
+ /* sanity */
+ max_len =
+ (max_len < serialno[1]) ? max_len : serialno[1];
+ serialno[1 + max_len] = '\0';
+
+ /* just print it */
+ seq_printf(seq, "%s", &serialno[2]);
+ } else {
+ /* print chars for specified length */
+ for (i = 0; i < serialno[1]; i++) {
+ seq_printf(seq, "%c", serialno[2 + i]);
+ }
+ }
+ break;
+
+ case I2O_SNFORMAT_UNICODE: /* UNICODE */
+ seq_printf(seq, "UNICODE Format. Can't Display\n");
+ break;
+
+ case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
+ seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
+ break;
+
+ case I2O_SNFORMAT_WAN: /* WAN MAC Address */
+ /* FIXME: Figure out what a WAN access address looks like?? */
+ seq_printf(seq, "WAN Access Address");
+ break;
+
+/* plus new in v2.0 */
+ case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
+ /* FIXME: Figure out what a LAN-64 address really looks like?? */
+ seq_printf(seq,
+ "LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
+ serialno[8], serialno[9], &serialno[2]);
+ break;
+
+ case I2O_SNFORMAT_DDM: /* I2O DDM */
+ seq_printf(seq,
+ "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh",
+ *(u16 *) & serialno[2],
+ *(u16 *) & serialno[4], *(u16 *) & serialno[6]);
+ break;
+
+ case I2O_SNFORMAT_IEEE_REG64: /* IEEE Registered (64-bit) */
+ case I2O_SNFORMAT_IEEE_REG128: /* IEEE Registered (128-bit) */
+ /* FIXME: Figure if this is even close?? */
+ seq_printf(seq,
+ "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n",
+ *(u32 *) & serialno[2],
+ *(u32 *) & serialno[6],
+ *(u32 *) & serialno[10], *(u32 *) & serialno[14]);
+ break;
+
+ case I2O_SNFORMAT_UNKNOWN: /* Unknown 0 */
+ case I2O_SNFORMAT_UNKNOWN2: /* Unknown 0xff */
+ default:
+ seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * i2o_get_class_name - do i2o class name lookup
+ * @class: class number
+ *
+ * Return a descriptive string for an i2o class.
+ */
+static const char *i2o_get_class_name(int class)
+{
+ int idx = 16;
+ static char *i2o_class_name[] = {
+ "Executive",
+ "Device Driver Module",
+ "Block Device",
+ "Tape Device",
+ "LAN Interface",
+ "WAN Interface",
+ "Fibre Channel Port",
+ "Fibre Channel Device",
+ "SCSI Device",
+ "ATE Port",
+ "ATE Device",
+ "Floppy Controller",
+ "Floppy Device",
+ "Secondary Bus Port",
+ "Peer Transport Agent",
+ "Peer Transport",
+ "Unknown"
+ };
+
+ switch (class & 0xfff) {
+ case I2O_CLASS_EXECUTIVE:
+ idx = 0;
+ break;
+ case I2O_CLASS_DDM:
+ idx = 1;
+ break;
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ idx = 2;
+ break;
+ case I2O_CLASS_SEQUENTIAL_STORAGE:
+ idx = 3;
+ break;
+ case I2O_CLASS_LAN:
+ idx = 4;
+ break;
+ case I2O_CLASS_WAN:
+ idx = 5;
+ break;
+ case I2O_CLASS_FIBRE_CHANNEL_PORT:
+ idx = 6;
+ break;
+ case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
+ idx = 7;
+ break;
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ idx = 8;
+ break;
+ case I2O_CLASS_ATE_PORT:
+ idx = 9;
+ break;
+ case I2O_CLASS_ATE_PERIPHERAL:
+ idx = 10;
+ break;
+ case I2O_CLASS_FLOPPY_CONTROLLER:
+ idx = 11;
+ break;
+ case I2O_CLASS_FLOPPY_DEVICE:
+ idx = 12;
+ break;
+ case I2O_CLASS_BUS_ADAPTER:
+ idx = 13;
+ break;
+ case I2O_CLASS_PEER_TRANSPORT_AGENT:
+ idx = 14;
+ break;
+ case I2O_CLASS_PEER_TRANSPORT:
+ idx = 15;
+ break;
+ }
+
+ return i2o_class_name[idx];
+}
+
+#define SCSI_TABLE_SIZE 13
+static char *scsi_devices[] = {
+ "Direct-Access Read/Write",
+ "Sequential-Access Storage",
+ "Printer",
+ "Processor",
+ "WORM Device",
+ "CD-ROM Device",
+ "Scanner Device",
+ "Optical Memory Device",
+ "Medium Changer Device",
+ "Communications Device",
+ "Graphics Art Pre-Press Device",
+ "Graphics Art Pre-Press Device",
+ "Array Controller Device"
+};
+
+static char *chtostr(u8 * chars, int n)
+{
+ char tmp[256];
+ tmp[0] = 0;
+ return strncat(tmp, (char *)chars, n);
+}
+
+static int i2o_report_query_status(struct seq_file *seq, int block_status,
+ char *group)
+{
+ switch (block_status) {
+ case -ETIMEDOUT:
+ return seq_printf(seq, "Timeout reading group %s.\n", group);
+ case -ENOMEM:
+ return seq_printf(seq, "No free memory to read the table.\n");
+ case -I2O_PARAMS_STATUS_INVALID_GROUP_ID:
+ return seq_printf(seq, "Group %s not supported.\n", group);
+ default:
+ return seq_printf(seq,
+ "Error reading group %s. BlockStatus 0x%02X\n",
+ group, -block_status);
+ }
+}
+
+static char *bus_strings[] = {
+ "Local Bus",
+ "ISA",
+ "EISA",
+ "MCA",
+ "PCI",
+ "PCMCIA",
+ "NUBUS",
+ "CARDBUS"
+};
+
+static int i2o_seq_show_hrt(struct seq_file *seq, void *v)
+{
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+ i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt;
+ u32 bus;
+ int i;
+
+ if (hrt->hrt_version) {
+ seq_printf(seq,
+ "HRT table for controller is too new a version.\n");
+ return 0;
+ }
+
+ seq_printf(seq, "HRT has %d entries of %d bytes each.\n",
+ hrt->num_entries, hrt->entry_len << 2);
+
+ for (i = 0; i < hrt->num_entries; i++) {
+ seq_printf(seq, "Entry %d:\n", i);
+ seq_printf(seq, " Adapter ID: %0#10x\n",
+ hrt->hrt_entry[i].adapter_id);
+ seq_printf(seq, " Controlling tid: %0#6x\n",
+ hrt->hrt_entry[i].parent_tid);
+
+ if (hrt->hrt_entry[i].bus_type != 0x80) {
+ bus = hrt->hrt_entry[i].bus_type;
+ seq_printf(seq, " %s Information\n",
+ bus_strings[bus]);
+
+ switch (bus) {
+ case I2O_BUS_LOCAL:
+ seq_printf(seq, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.local_bus.
+ LbBaseIOPort);
+ seq_printf(seq, " MemoryBase: %0#10x\n",
+ hrt->hrt_entry[i].bus.local_bus.
+ LbBaseMemoryAddress);
+ break;
+
+ case I2O_BUS_ISA:
+ seq_printf(seq, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.isa_bus.
+ IsaBaseIOPort);
+ seq_printf(seq, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.isa_bus.
+ IsaBaseMemoryAddress);
+ seq_printf(seq, " CSN: %0#4x,",
+ hrt->hrt_entry[i].bus.isa_bus.CSN);
+ break;
+
+ case I2O_BUS_EISA:
+ seq_printf(seq, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.eisa_bus.
+ EisaBaseIOPort);
+ seq_printf(seq, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.eisa_bus.
+ EisaBaseMemoryAddress);
+ seq_printf(seq, " Slot: %0#4x,",
+ hrt->hrt_entry[i].bus.eisa_bus.
+ EisaSlotNumber);
+ break;
+
+ case I2O_BUS_MCA:
+ seq_printf(seq, " IOBase: %0#6x,",
+ hrt->hrt_entry[i].bus.mca_bus.
+ McaBaseIOPort);
+ seq_printf(seq, " MemoryBase: %0#10x,",
+ hrt->hrt_entry[i].bus.mca_bus.
+ McaBaseMemoryAddress);
+ seq_printf(seq, " Slot: %0#4x,",
+ hrt->hrt_entry[i].bus.mca_bus.
+ McaSlotNumber);
+ break;
+
+ case I2O_BUS_PCI:
+ seq_printf(seq, " Bus: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.
+ PciBusNumber);
+ seq_printf(seq, " Dev: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.
+ PciDeviceNumber);
+ seq_printf(seq, " Func: %0#4x",
+ hrt->hrt_entry[i].bus.pci_bus.
+ PciFunctionNumber);
+ seq_printf(seq, " Vendor: %0#6x",
+ hrt->hrt_entry[i].bus.pci_bus.
+ PciVendorID);
+ seq_printf(seq, " Device: %0#6x\n",
+ hrt->hrt_entry[i].bus.pci_bus.
+ PciDeviceID);
+ break;
+
+ default:
+ seq_printf(seq, " Unsupported Bus Type\n");
+ }
+ } else
+ seq_printf(seq, " Unknown Bus Type\n");
+ }
+
+ return 0;
+}
+
+static int i2o_seq_show_lct(struct seq_file *seq, void *v)
+{
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+ i2o_lct *lct = (i2o_lct *) c->lct;
+ int entries;
+ int i;
+
+#define BUS_TABLE_SIZE 3
+ static char *bus_ports[] = {
+ "Generic Bus",
+ "SCSI Bus",
+ "Fibre Channel Bus"
+ };
+
+ entries = (lct->table_size - 3) / 9;
+
+ seq_printf(seq, "LCT contains %d %s\n", entries,
+ entries == 1 ? "entry" : "entries");
+ if (lct->boot_tid)
+ seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid);
+
+ seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind);
+
+ for (i = 0; i < entries; i++) {
+ seq_printf(seq, "Entry %d\n", i);
+ seq_printf(seq, " Class, SubClass : %s",
+ i2o_get_class_name(lct->lct_entry[i].class_id));
+
+ /*
+ * Classes which we'll print subclass info for
+ */
+ switch (lct->lct_entry[i].class_id & 0xFFF) {
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ switch (lct->lct_entry[i].sub_class) {
+ case 0x00:
+ seq_printf(seq, ", Direct-Access Read/Write");
+ break;
+
+ case 0x04:
+ seq_printf(seq, ", WORM Drive");
+ break;
+
+ case 0x05:
+ seq_printf(seq, ", CD-ROM Drive");
+ break;
+
+ case 0x07:
+ seq_printf(seq, ", Optical Memory Device");
+ break;
+
+ default:
+ seq_printf(seq, ", Unknown (0x%02x)",
+ lct->lct_entry[i].sub_class);
+ break;
+ }
+ break;
+
+ case I2O_CLASS_LAN:
+ switch (lct->lct_entry[i].sub_class & 0xFF) {
+ case 0x30:
+ seq_printf(seq, ", Ethernet");
+ break;
+
+ case 0x40:
+ seq_printf(seq, ", 100base VG");
+ break;
+
+ case 0x50:
+ seq_printf(seq, ", IEEE 802.5/Token-Ring");
+ break;
+
+ case 0x60:
+ seq_printf(seq, ", ANSI X3T9.5 FDDI");
+ break;
+
+ case 0x70:
+ seq_printf(seq, ", Fibre Channel");
+ break;
+
+ default:
+ seq_printf(seq, ", Unknown Sub-Class (0x%02x)",
+ lct->lct_entry[i].sub_class & 0xFF);
+ break;
+ }
+ break;
+
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE)
+ seq_printf(seq, ", %s",
+ scsi_devices[lct->lct_entry[i].
+ sub_class]);
+ else
+ seq_printf(seq, ", Unknown Device Type");
+ break;
+
+ case I2O_CLASS_BUS_ADAPTER:
+ if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
+ seq_printf(seq, ", %s",
+ bus_ports[lct->lct_entry[i].
+ sub_class]);
+ else
+ seq_printf(seq, ", Unknown Bus Type");
+ break;
+ }
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, " Local TID : 0x%03x\n",
+ lct->lct_entry[i].tid);
+ seq_printf(seq, " User TID : 0x%03x\n",
+ lct->lct_entry[i].user_tid);
+ seq_printf(seq, " Parent TID : 0x%03x\n",
+ lct->lct_entry[i].parent_tid);
+ seq_printf(seq, " Identity Tag : 0x%x%x%x%x%x%x%x%x\n",
+ lct->lct_entry[i].identity_tag[0],
+ lct->lct_entry[i].identity_tag[1],
+ lct->lct_entry[i].identity_tag[2],
+ lct->lct_entry[i].identity_tag[3],
+ lct->lct_entry[i].identity_tag[4],
+ lct->lct_entry[i].identity_tag[5],
+ lct->lct_entry[i].identity_tag[6],
+ lct->lct_entry[i].identity_tag[7]);
+ seq_printf(seq, " Change Indicator : %0#10x\n",
+ lct->lct_entry[i].change_ind);
+ seq_printf(seq, " Event Capab Mask : %0#10x\n",
+ lct->lct_entry[i].device_flags);
+ }
+
+ return 0;
+}
+
+static int i2o_seq_show_status(struct seq_file *seq, void *v)
+{
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+ char prodstr[25];
+ int version;
+ i2o_status_block *sb = c->status_block.virt;
+
+ i2o_status_get(c); // reread the status block
+
+ seq_printf(seq, "Organization ID : %0#6x\n", sb->org_id);
+
+ version = sb->i2o_version;
+
+/* FIXME for Spec 2.0
+ if (version == 0x02) {
+ seq_printf(seq, "Lowest I2O version supported: ");
+ switch(workspace[2]) {
+ case 0x00:
+ seq_printf(seq, "1.0\n");
+ break;
+ case 0x01:
+ seq_printf(seq, "1.5\n");
+ break;
+ case 0x02:
+ seq_printf(seq, "2.0\n");
+ break;
+ }
+
+ seq_printf(seq, "Highest I2O version supported: ");
+ switch(workspace[3]) {
+ case 0x00:
+ seq_printf(seq, "1.0\n");
+ break;
+ case 0x01:
+ seq_printf(seq, "1.5\n");
+ break;
+ case 0x02:
+ seq_printf(seq, "2.0\n");
+ break;
+ }
+ }
+*/
+ seq_printf(seq, "IOP ID : %0#5x\n", sb->iop_id);
+ seq_printf(seq, "Host Unit ID : %0#6x\n", sb->host_unit_id);
+ seq_printf(seq, "Segment Number : %0#5x\n", sb->segment_number);
+
+ seq_printf(seq, "I2O version : ");
+ switch (version) {
+ case 0x00:
+ seq_printf(seq, "1.0\n");
+ break;
+ case 0x01:
+ seq_printf(seq, "1.5\n");
+ break;
+ case 0x02:
+ seq_printf(seq, "2.0\n");
+ break;
+ default:
+ seq_printf(seq, "Unknown version\n");
+ }
+
+ seq_printf(seq, "IOP State : ");
+ switch (sb->iop_state) {
+ case 0x01:
+ seq_printf(seq, "INIT\n");
+ break;
+
+ case 0x02:
+ seq_printf(seq, "RESET\n");
+ break;
+
+ case 0x04:
+ seq_printf(seq, "HOLD\n");
+ break;
+
+ case 0x05:
+ seq_printf(seq, "READY\n");
+ break;
+
+ case 0x08:
+ seq_printf(seq, "OPERATIONAL\n");
+ break;
+
+ case 0x10:
+ seq_printf(seq, "FAILED\n");
+ break;
+
+ case 0x11:
+ seq_printf(seq, "FAULTED\n");
+ break;
+
+ default:
+ seq_printf(seq, "Unknown\n");
+ break;
+ }
+
+ seq_printf(seq, "Messenger Type : ");
+ switch (sb->msg_type) {
+ case 0x00:
+ seq_printf(seq, "Memory mapped\n");
+ break;
+ case 0x01:
+ seq_printf(seq, "Memory mapped only\n");
+ break;
+ case 0x02:
+ seq_printf(seq, "Remote only\n");
+ break;
+ case 0x03:
+ seq_printf(seq, "Memory mapped and remote\n");
+ break;
+ default:
+ seq_printf(seq, "Unknown\n");
+ }
+
+ seq_printf(seq, "Inbound Frame Size : %d bytes\n",
+ sb->inbound_frame_size << 2);
+ seq_printf(seq, "Max Inbound Frames : %d\n",
+ sb->max_inbound_frames);
+ seq_printf(seq, "Current Inbound Frames : %d\n",
+ sb->cur_inbound_frames);
+ seq_printf(seq, "Max Outbound Frames : %d\n",
+ sb->max_outbound_frames);
+
+ /* Spec doesn't say if NULL terminated or not... */
+ memcpy(prodstr, sb->product_id, 24);
+ prodstr[24] = '\0';
+ seq_printf(seq, "Product ID : %s\n", prodstr);
+ seq_printf(seq, "Expected LCT Size : %d bytes\n",
+ sb->expected_lct_size);
+
+ seq_printf(seq, "IOP Capabilities\n");
+ seq_printf(seq, " Context Field Size Support : ");
+ switch (sb->iop_capabilities & 0x0000003) {
+ case 0:
+ seq_printf(seq, "Supports only 32-bit context fields\n");
+ break;
+ case 1:
+ seq_printf(seq, "Supports only 64-bit context fields\n");
+ break;
+ case 2:
+ seq_printf(seq, "Supports 32-bit and 64-bit context fields, "
+ "but not concurrently\n");
+ break;
+ case 3:
+ seq_printf(seq, "Supports 32-bit and 64-bit context fields "
+ "concurrently\n");
+ break;
+ default:
+ seq_printf(seq, "0x%08x\n", sb->iop_capabilities);
+ }
+ seq_printf(seq, " Current Context Field Size : ");
+ switch (sb->iop_capabilities & 0x0000000C) {
+ case 0:
+ seq_printf(seq, "not configured\n");
+ break;
+ case 4:
+ seq_printf(seq, "Supports only 32-bit context fields\n");
+ break;
+ case 8:
+ seq_printf(seq, "Supports only 64-bit context fields\n");
+ break;
+ case 12:
+ seq_printf(seq, "Supports both 32-bit or 64-bit context fields "
+ "concurrently\n");
+ break;
+ default:
+ seq_printf(seq, "\n");
+ }
+ seq_printf(seq, " Inbound Peer Support : %s\n",
+ (sb->
+ iop_capabilities & 0x00000010) ? "Supported" :
+ "Not supported");
+ seq_printf(seq, " Outbound Peer Support : %s\n",
+ (sb->
+ iop_capabilities & 0x00000020) ? "Supported" :
+ "Not supported");
+ seq_printf(seq, " Peer to Peer Support : %s\n",
+ (sb->
+ iop_capabilities & 0x00000040) ? "Supported" :
+ "Not supported");
+
+ seq_printf(seq, "Desired private memory size : %d kB\n",
+ sb->desired_mem_size >> 10);
+ seq_printf(seq, "Allocated private memory size : %d kB\n",
+ sb->current_mem_size >> 10);
+ seq_printf(seq, "Private memory base address : %0#10x\n",
+ sb->current_mem_base);
+ seq_printf(seq, "Desired private I/O size : %d kB\n",
+ sb->desired_io_size >> 10);
+ seq_printf(seq, "Allocated private I/O size : %d kB\n",
+ sb->current_io_size >> 10);
+ seq_printf(seq, "Private I/O base address : %0#10x\n",
+ sb->current_io_base);
+
+ return 0;
+}
+
+static int i2o_seq_show_hw(struct seq_file *seq, void *v)
+{
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+ static u32 work32[5];
+ static u8 *work8 = (u8 *) work32;
+ static u16 *work16 = (u16 *) work32;
+ int token;
+ u32 hwcap;
+
+ static char *cpu_table[] = {
+ "Intel 80960 series",
+ "AMD2900 series",
+ "Motorola 68000 series",
+ "ARM series",
+ "MIPS series",
+ "Sparc series",
+ "PowerPC series",
+ "Intel x86 series"
+ };
+
+ token =
+ i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0x0000 IOP Hardware");
+ return 0;
+ }
+
+ seq_printf(seq, "I2O Vendor ID : %0#6x\n", work16[0]);
+ seq_printf(seq, "Product ID : %0#6x\n", work16[1]);
+ seq_printf(seq, "CPU : ");
+ if (work8[16] > 8)
+ seq_printf(seq, "Unknown\n");
+ else
+ seq_printf(seq, "%s\n", cpu_table[work8[16]]);
+ /* Anyone using ProcessorVersion? */
+
+ seq_printf(seq, "RAM : %dkB\n", work32[1] >> 10);
+ seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10);
+
+ hwcap = work32[3];
+ seq_printf(seq, "Capabilities : 0x%08x\n", hwcap);
+ seq_printf(seq, " [%s] Self booting\n",
+ (hwcap & 0x00000001) ? "+" : "-");
+ seq_printf(seq, " [%s] Upgradable IRTOS\n",
+ (hwcap & 0x00000002) ? "+" : "-");
+ seq_printf(seq, " [%s] Supports downloading DDMs\n",
+ (hwcap & 0x00000004) ? "+" : "-");
+ seq_printf(seq, " [%s] Supports installing DDMs\n",
+ (hwcap & 0x00000008) ? "+" : "-");
+ seq_printf(seq, " [%s] Battery-backed RAM\n",
+ (hwcap & 0x00000010) ? "+" : "-");
+
+ return 0;
+}
+
+/* Executive group 0003h - Executing DDM List (table) */
+static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
+{
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+ int token;
+ int i;
+
+ typedef struct _i2o_exec_execute_ddm_table {
+ u16 ddm_tid;
+ u8 module_type;
+ u8 reserved;
+ u16 i2o_vendor_id;
+ u16 module_id;
+ u8 module_name_version[28];
+ u32 data_size;
+ u32 code_size;
+ } i2o_exec_execute_ddm_table;
+
+ struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES];
+ } *result;
+
+ i2o_exec_execute_ddm_table ddm_table;
+
+ result = kmalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1,
+ NULL, 0, result, sizeof(*result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token,
+ "0x0003 Executing DDM List");
+ goto out;
+ }
+
+ seq_printf(seq,
+ "Tid Module_type Vendor Mod_id Module_name Vrs Data_size Code_size\n");
+ ddm_table = result->ddm_table[0];
+
+ for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) {
+ seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF);
+
+ switch (ddm_table.module_type) {
+ case 0x01:
+ seq_printf(seq, "Downloaded DDM ");
+ break;
+ case 0x22:
+ seq_printf(seq, "Embedded DDM ");
+ break;
+ default:
+ seq_printf(seq, " ");
+ }
+
+ seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
+ seq_printf(seq, "%-#8x", ddm_table.module_id);
+ seq_printf(seq, "%-29s",
+ chtostr(ddm_table.module_name_version, 28));
+ seq_printf(seq, "%9d ", ddm_table.data_size);
+ seq_printf(seq, "%8d", ddm_table.code_size);
+
+ seq_printf(seq, "\n");
+ }
+ out:
+ kfree(result);
+ return 0;
+}
+
+/* Executive group 0004h - Driver Store (scalar) */
+static int i2o_seq_show_driver_store(struct seq_file *seq, void *v)
+{
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+ u32 work32[8];
+ int token;
+
+ token =
+ i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32));
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0x0004 Driver Store");
+ return 0;
+ }
+
+ seq_printf(seq, "Module limit : %d\n"
+ "Module count : %d\n"
+ "Current space : %d kB\n"
+ "Free space : %d kB\n",
+ work32[0], work32[1], work32[2] >> 10, work32[3] >> 10);
+
+ return 0;
+}
+
+/* Executive group 0005h - Driver Store Table (table) */
+static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
+{
+ typedef struct _i2o_driver_store {
+ u16 stored_ddm_index;
+ u8 module_type;
+ u8 reserved;
+ u16 i2o_vendor_id;
+ u16 module_id;
+ u8 module_name_version[28];
+ u8 date[8];
+ u32 module_size;
+ u32 mpb_size;
+ u32 module_flags;
+ } i2o_driver_store_table;
+
+ struct i2o_controller *c = (struct i2o_controller *)seq->private;
+ int token;
+ int i;
+
+ typedef struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ i2o_driver_store_table dst[I2O_MAX_MODULES];
+ } i2o_driver_result_table;
+
+ i2o_driver_result_table *result;
+ i2o_driver_store_table *dst;
+
+ result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
+ if (result == NULL)
+ return -ENOMEM;
+
+ token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1,
+ NULL, 0, result, sizeof(*result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token,
+ "0x0005 DRIVER STORE TABLE");
+ kfree(result);
+ return 0;
+ }
+
+ seq_printf(seq,
+ "# Module_type Vendor Mod_id Module_name Vrs"
+ "Date Mod_size Par_size Flags\n");
+ for (i = 0, dst = &result->dst[0]; i < result->row_count;
+ dst = &result->dst[++i]) {
+ seq_printf(seq, "%-3d", dst->stored_ddm_index);
+ switch (dst->module_type) {
+ case 0x01:
+ seq_printf(seq, "Downloaded DDM ");
+ break;
+ case 0x22:
+ seq_printf(seq, "Embedded DDM ");
+ break;
+ default:
+ seq_printf(seq, " ");
+ }
+
+ seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
+ seq_printf(seq, "%-#8x", dst->module_id);
+ seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
+ seq_printf(seq, "%-9s", chtostr(dst->date, 8));
+ seq_printf(seq, "%8d ", dst->module_size);
+ seq_printf(seq, "%8d ", dst->mpb_size);
+ seq_printf(seq, "0x%04x", dst->module_flags);
+ seq_printf(seq, "\n");
+ }
+
+ kfree(result);
+ return 0;
+}
+
+/* Generic group F000h - Params Descriptor (table) */
+static int i2o_seq_show_groups(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+ int i;
+ u8 properties;
+
+ typedef struct _i2o_group_info {
+ u16 group_number;
+ u16 field_count;
+ u16 row_count;
+ u8 properties;
+ u8 reserved;
+ } i2o_group_info;
+
+ struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ i2o_group_info group[256];
+ } *result;
+
+ result = kmalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
+ result, sizeof(*result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0xF000 Params Descriptor");
+ goto out;
+ }
+
+ seq_printf(seq,
+ "# Group FieldCount RowCount Type Add Del Clear\n");
+
+ for (i = 0; i < result->row_count; i++) {
+ seq_printf(seq, "%-3d", i);
+ seq_printf(seq, "0x%04X ", result->group[i].group_number);
+ seq_printf(seq, "%10d ", result->group[i].field_count);
+ seq_printf(seq, "%8d ", result->group[i].row_count);
+
+ properties = result->group[i].properties;
+ if (properties & 0x1)
+ seq_printf(seq, "Table ");
+ else
+ seq_printf(seq, "Scalar ");
+ if (properties & 0x2)
+ seq_printf(seq, " + ");
+ else
+ seq_printf(seq, " - ");
+ if (properties & 0x4)
+ seq_printf(seq, " + ");
+ else
+ seq_printf(seq, " - ");
+ if (properties & 0x8)
+ seq_printf(seq, " + ");
+ else
+ seq_printf(seq, " - ");
+
+ seq_printf(seq, "\n");
+ }
+
+ if (result->more_flag)
+ seq_printf(seq, "There is more...\n");
+ out:
+ kfree(result);
+ return 0;
+}
+
+/* Generic group F001h - Physical Device Table (table) */
+static int i2o_seq_show_phys_device(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+ int i;
+
+ struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ u32 adapter_id[64];
+ } result;
+
+ token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0,
+ &result, sizeof(result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token,
+ "0xF001 Physical Device Table");
+ return 0;
+ }
+
+ if (result.row_count)
+ seq_printf(seq, "# AdapterId\n");
+
+ for (i = 0; i < result.row_count; i++) {
+ seq_printf(seq, "%-2d", i);
+ seq_printf(seq, "%#7x\n", result.adapter_id[i]);
+ }
+
+ if (result.more_flag)
+ seq_printf(seq, "There is more...\n");
+
+ return 0;
+}
+
+/* Generic group F002h - Claimed Table (table) */
+static int i2o_seq_show_claimed(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+ int i;
+
+ struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ u16 claimed_tid[64];
+ } result;
+
+ token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0,
+ &result, sizeof(result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0xF002 Claimed Table");
+ return 0;
+ }
+
+ if (result.row_count)
+ seq_printf(seq, "# ClaimedTid\n");
+
+ for (i = 0; i < result.row_count; i++) {
+ seq_printf(seq, "%-2d", i);
+ seq_printf(seq, "%#7x\n", result.claimed_tid[i]);
+ }
+
+ if (result.more_flag)
+ seq_printf(seq, "There is more...\n");
+
+ return 0;
+}
+
+/* Generic group F003h - User Table (table) */
+static int i2o_seq_show_users(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+ int i;
+
+ typedef struct _i2o_user_table {
+ u16 instance;
+ u16 user_tid;
+ u8 claim_type;
+ u8 reserved1;
+ u16 reserved2;
+ } i2o_user_table;
+
+ struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ i2o_user_table user[64];
+ } *result;
+
+ result = kmalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return -ENOMEM;
+
+ token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0,
+ result, sizeof(*result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0xF003 User Table");
+ goto out;
+ }
+
+ seq_printf(seq, "# Instance UserTid ClaimType\n");
+
+ for (i = 0; i < result->row_count; i++) {
+ seq_printf(seq, "%-3d", i);
+ seq_printf(seq, "%#8x ", result->user[i].instance);
+ seq_printf(seq, "%#7x ", result->user[i].user_tid);
+ seq_printf(seq, "%#9x\n", result->user[i].claim_type);
+ }
+
+ if (result->more_flag)
+ seq_printf(seq, "There is more...\n");
+ out:
+ kfree(result);
+ return 0;
+}
+
+/* Generic group F005h - Private message extensions (table) (optional) */
+static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+ int i;
+
+ typedef struct _i2o_private {
+ u16 ext_instance;
+ u16 organization_id;
+ u16 x_function_code;
+ } i2o_private;
+
+ struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ i2o_private extension[64];
+ } result;
+
+ token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
+ &result, sizeof(result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token,
+ "0xF005 Private Message Extensions (optional)");
+ return 0;
+ }
+
+ seq_printf(seq, "Instance# OrgId FunctionCode\n");
+
+ for (i = 0; i < result.row_count; i++) {
+ seq_printf(seq, "%0#9x ", result.extension[i].ext_instance);
+ seq_printf(seq, "%0#6x ", result.extension[i].organization_id);
+ seq_printf(seq, "%0#6x", result.extension[i].x_function_code);
+
+ seq_printf(seq, "\n");
+ }
+
+ if (result.more_flag)
+ seq_printf(seq, "There is more...\n");
+
+ return 0;
+}
+
+/* Generic group F006h - Authorized User Table (table) */
+static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+ int i;
+
+ struct {
+ u16 result_count;
+ u16 pad;
+ u16 block_size;
+ u8 block_status;
+ u8 error_info_size;
+ u16 row_count;
+ u16 more_flag;
+ u32 alternate_tid[64];
+ } result;
+
+ token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0,
+ &result, sizeof(result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token,
+ "0xF006 Autohorized User Table");
+ return 0;
+ }
+
+ if (result.row_count)
+ seq_printf(seq, "# AlternateTid\n");
+
+ for (i = 0; i < result.row_count; i++) {
+ seq_printf(seq, "%-2d", i);
+ seq_printf(seq, "%#7x ", result.alternate_tid[i]);
+ }
+
+ if (result.more_flag)
+ seq_printf(seq, "There is more...\n");
+
+ return 0;
+}
+
+/* Generic group F100h - Device Identity (scalar) */
+static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
+ // == (allow) 512d bytes (max)
+ static u16 *work16 = (u16 *) work32;
+ int token;
+
+ token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0xF100 Device Identity");
+ return 0;
+ }
+
+ seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
+ seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
+ seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
+ seq_printf(seq, "Vendor info : %s\n",
+ chtostr((u8 *) (work32 + 2), 16));
+ seq_printf(seq, "Product info : %s\n",
+ chtostr((u8 *) (work32 + 6), 16));
+ seq_printf(seq, "Description : %s\n",
+ chtostr((u8 *) (work32 + 10), 16));
+ seq_printf(seq, "Product rev. : %s\n",
+ chtostr((u8 *) (work32 + 14), 8));
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, (u8 *) (work32 + 16),
+ /* allow for SNLen plus
+ * possible trailing '\0'
+ */
+ sizeof(work32) - (16 * sizeof(u32)) - 2);
+ seq_printf(seq, "\n");
+
+ return 0;
+}
+
+static int i2o_seq_show_dev_name(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+
+ seq_printf(seq, "%s\n", dev_name(&d->device));
+
+ return 0;
+}
+
+/* Generic group F101h - DDM Identity (scalar) */
+static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+
+ struct {
+ u16 ddm_tid;
+ u8 module_name[24];
+ u8 module_rev[8];
+ u8 sn_format;
+ u8 serial_number[12];
+ u8 pad[256]; // allow up to 256 byte (max) serial number
+ } result;
+
+ token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0xF101 DDM Identity");
+ return 0;
+ }
+
+ seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
+ seq_printf(seq, "Module name : %s\n",
+ chtostr(result.module_name, 24));
+ seq_printf(seq, "Module revision : %s\n",
+ chtostr(result.module_rev, 8));
+
+ seq_printf(seq, "Serial number : ");
+ print_serial_number(seq, result.serial_number, sizeof(result) - 36);
+ /* allow for SNLen plus possible trailing '\0' */
+
+ seq_printf(seq, "\n");
+
+ return 0;
+}
+
+/* Generic group F102h - User Information (scalar) */
+static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+
+ struct {
+ u8 device_name[64];
+ u8 service_name[64];
+ u8 physical_location[64];
+ u8 instance_number[4];
+ } result;
+
+ token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token, "0xF102 User Information");
+ return 0;
+ }
+
+ seq_printf(seq, "Device name : %s\n",
+ chtostr(result.device_name, 64));
+ seq_printf(seq, "Service name : %s\n",
+ chtostr(result.service_name, 64));
+ seq_printf(seq, "Physical name : %s\n",
+ chtostr(result.physical_location, 64));
+ seq_printf(seq, "Instance number : %s\n",
+ chtostr(result.instance_number, 4));
+
+ return 0;
+}
+
+/* Generic group F103h - SGL Operating Limits (scalar) */
+static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ static u32 work32[12];
+ static u16 *work16 = (u16 *) work32;
+ static u8 *work8 = (u8 *) work32;
+ int token;
+
+ token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token,
+ "0xF103 SGL Operating Limits");
+ return 0;
+ }
+
+ seq_printf(seq, "SGL chain size : %d\n", work32[0]);
+ seq_printf(seq, "Max SGL chain size : %d\n", work32[1]);
+ seq_printf(seq, "SGL chain size target : %d\n", work32[2]);
+ seq_printf(seq, "SGL frag count : %d\n", work16[6]);
+ seq_printf(seq, "Max SGL frag count : %d\n", work16[7]);
+ seq_printf(seq, "SGL frag count target : %d\n", work16[8]);
+
+/* FIXME
+ if (d->i2oversion == 0x02)
+ {
+*/
+ seq_printf(seq, "SGL data alignment : %d\n", work16[8]);
+ seq_printf(seq, "SGL addr limit : %d\n", work8[20]);
+ seq_printf(seq, "SGL addr sizes supported : ");
+ if (work8[21] & 0x01)
+ seq_printf(seq, "32 bit ");
+ if (work8[21] & 0x02)
+ seq_printf(seq, "64 bit ");
+ if (work8[21] & 0x04)
+ seq_printf(seq, "96 bit ");
+ if (work8[21] & 0x08)
+ seq_printf(seq, "128 bit ");
+ seq_printf(seq, "\n");
+/*
+ }
+*/
+
+ return 0;
+}
+
+/* Generic group F200h - Sensors (scalar) */
+static int i2o_seq_show_sensors(struct seq_file *seq, void *v)
+{
+ struct i2o_device *d = (struct i2o_device *)seq->private;
+ int token;
+
+ struct {
+ u16 sensor_instance;
+ u8 component;
+ u16 component_instance;
+ u8 sensor_class;
+ u8 sensor_type;
+ u8 scaling_exponent;
+ u32 actual_reading;
+ u32 minimum_reading;
+ u32 low2lowcat_treshold;
+ u32 lowcat2low_treshold;
+ u32 lowwarn2low_treshold;
+ u32 low2lowwarn_treshold;
+ u32 norm2lowwarn_treshold;
+ u32 lowwarn2norm_treshold;
+ u32 nominal_reading;
+ u32 hiwarn2norm_treshold;
+ u32 norm2hiwarn_treshold;
+ u32 high2hiwarn_treshold;
+ u32 hiwarn2high_treshold;
+ u32 hicat2high_treshold;
+ u32 hi2hicat_treshold;
+ u32 maximum_reading;
+ u8 sensor_state;
+ u16 event_enable;
+ } result;
+
+ token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result));
+
+ if (token < 0) {
+ i2o_report_query_status(seq, token,
+ "0xF200 Sensors (optional)");
+ return 0;
+ }
+
+ seq_printf(seq, "Sensor instance : %d\n", result.sensor_instance);
+
+ seq_printf(seq, "Component : %d = ", result.component);
+ switch (result.component) {
+ case 0:
+ seq_printf(seq, "Other");
+ break;
+ case 1:
+ seq_printf(seq, "Planar logic Board");
+ break;
+ case 2:
+ seq_printf(seq, "CPU");
+ break;
+ case 3:
+ seq_printf(seq, "Chassis");
+ break;
+ case 4:
+ seq_printf(seq, "Power Supply");
+ break;
+ case 5:
+ seq_printf(seq, "Storage");
+ break;
+ case 6:
+ seq_printf(seq, "External");
+ break;
+ }
+ seq_printf(seq, "\n");
+
+ seq_printf(seq, "Component instance : %d\n",
+ result.component_instance);
+ seq_printf(seq, "Sensor class : %s\n",
+ result.sensor_class ? "Analog" : "Digital");
+
+ seq_printf(seq, "Sensor type : %d = ", result.sensor_type);
+ switch (result.sensor_type) {
+ case 0:
+ seq_printf(seq, "Other\n");
+ break;
+ case 1:
+ seq_printf(seq, "Thermal\n");
+ break;
+ case 2:
+ seq_printf(seq, "DC voltage (DC volts)\n");
+ break;
+ case 3:
+ seq_printf(seq, "AC voltage (AC volts)\n");
+ break;
+ case 4:
+ seq_printf(seq, "DC current (DC amps)\n");
+ break;
+ case 5:
+ seq_printf(seq, "AC current (AC volts)\n");
+ break;
+ case 6:
+ seq_printf(seq, "Door open\n");
+ break;
+ case 7:
+ seq_printf(seq, "Fan operational\n");
+ break;
+ }
+
+ seq_printf(seq, "Scaling exponent : %d\n",
+ result.scaling_exponent);
+ seq_printf(seq, "Actual reading : %d\n", result.actual_reading);
+ seq_printf(seq, "Minimum reading : %d\n", result.minimum_reading);
+ seq_printf(seq, "Low2LowCat treshold : %d\n",
+ result.low2lowcat_treshold);
+ seq_printf(seq, "LowCat2Low treshold : %d\n",
+ result.lowcat2low_treshold);
+ seq_printf(seq, "LowWarn2Low treshold : %d\n",
+ result.lowwarn2low_treshold);
+ seq_printf(seq, "Low2LowWarn treshold : %d\n",
+ result.low2lowwarn_treshold);
+ seq_printf(seq, "Norm2LowWarn treshold : %d\n",
+ result.norm2lowwarn_treshold);
+ seq_printf(seq, "LowWarn2Norm treshold : %d\n",
+ result.lowwarn2norm_treshold);
+ seq_printf(seq, "Nominal reading : %d\n", result.nominal_reading);
+ seq_printf(seq, "HiWarn2Norm treshold : %d\n",
+ result.hiwarn2norm_treshold);
+ seq_printf(seq, "Norm2HiWarn treshold : %d\n",
+ result.norm2hiwarn_treshold);
+ seq_printf(seq, "High2HiWarn treshold : %d\n",
+ result.high2hiwarn_treshold);
+ seq_printf(seq, "HiWarn2High treshold : %d\n",
+ result.hiwarn2high_treshold);
+ seq_printf(seq, "HiCat2High treshold : %d\n",
+ result.hicat2high_treshold);
+ seq_printf(seq, "High2HiCat treshold : %d\n",
+ result.hi2hicat_treshold);
+ seq_printf(seq, "Maximum reading : %d\n", result.maximum_reading);
+
+ seq_printf(seq, "Sensor state : %d = ", result.sensor_state);
+ switch (result.sensor_state) {
+ case 0:
+ seq_printf(seq, "Normal\n");
+ break;
+ case 1:
+ seq_printf(seq, "Abnormal\n");
+ break;
+ case 2:
+ seq_printf(seq, "Unknown\n");
+ break;
+ case 3:
+ seq_printf(seq, "Low Catastrophic (LoCat)\n");
+ break;
+ case 4:
+ seq_printf(seq, "Low (Low)\n");
+ break;
+ case 5:
+ seq_printf(seq, "Low Warning (LoWarn)\n");
+ break;
+ case 6:
+ seq_printf(seq, "High Warning (HiWarn)\n");
+ break;
+ case 7:
+ seq_printf(seq, "High (High)\n");
+ break;
+ case 8:
+ seq_printf(seq, "High Catastrophic (HiCat)\n");
+ break;
+ }
+
+ seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable);
+ seq_printf(seq, " [%s] Operational state change. \n",
+ (result.event_enable & 0x01) ? "+" : "-");
+ seq_printf(seq, " [%s] Low catastrophic. \n",
+ (result.event_enable & 0x02) ? "+" : "-");
+ seq_printf(seq, " [%s] Low reading. \n",
+ (result.event_enable & 0x04) ? "+" : "-");
+ seq_printf(seq, " [%s] Low warning. \n",
+ (result.event_enable & 0x08) ? "+" : "-");
+ seq_printf(seq,
+ " [%s] Change back to normal from out of range state. \n",
+ (result.event_enable & 0x10) ? "+" : "-");
+ seq_printf(seq, " [%s] High warning. \n",
+ (result.event_enable & 0x20) ? "+" : "-");
+ seq_printf(seq, " [%s] High reading. \n",
+ (result.event_enable & 0x40) ? "+" : "-");
+ seq_printf(seq, " [%s] High catastrophic. \n",
+ (result.event_enable & 0x80) ? "+" : "-");
+
+ return 0;
+}
+
+static int i2o_seq_open_hrt(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_hrt, PDE(inode)->data);
+};
+
+static int i2o_seq_open_lct(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_lct, PDE(inode)->data);
+};
+
+static int i2o_seq_open_status(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_status, PDE(inode)->data);
+};
+
+static int i2o_seq_open_hw(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_hw, PDE(inode)->data);
+};
+
+static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data);
+};
+
+static int i2o_seq_open_driver_store(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data);
+};
+
+static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data);
+};
+
+static int i2o_seq_open_groups(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_groups, PDE(inode)->data);
+};
+
+static int i2o_seq_open_phys_device(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data);
+};
+
+static int i2o_seq_open_claimed(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_claimed, PDE(inode)->data);
+};
+
+static int i2o_seq_open_users(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_users, PDE(inode)->data);
+};
+
+static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data);
+};
+
+static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_authorized_users,
+ PDE(inode)->data);
+};
+
+static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data);
+};
+
+static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data);
+};
+
+static int i2o_seq_open_uinfo(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data);
+};
+
+static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data);
+};
+
+static int i2o_seq_open_sensors(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_sensors, PDE(inode)->data);
+};
+
+static int i2o_seq_open_dev_name(struct inode *inode, struct file *file)
+{
+ return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data);
+};
+
+static const struct file_operations i2o_seq_fops_lct = {
+ .open = i2o_seq_open_lct,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_hrt = {
+ .open = i2o_seq_open_hrt,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_status = {
+ .open = i2o_seq_open_status,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_hw = {
+ .open = i2o_seq_open_hw,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_ddm_table = {
+ .open = i2o_seq_open_ddm_table,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_driver_store = {
+ .open = i2o_seq_open_driver_store,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_drivers_stored = {
+ .open = i2o_seq_open_drivers_stored,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_groups = {
+ .open = i2o_seq_open_groups,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_phys_device = {
+ .open = i2o_seq_open_phys_device,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_claimed = {
+ .open = i2o_seq_open_claimed,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_users = {
+ .open = i2o_seq_open_users,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_priv_msgs = {
+ .open = i2o_seq_open_priv_msgs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_authorized_users = {
+ .open = i2o_seq_open_authorized_users,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_dev_name = {
+ .open = i2o_seq_open_dev_name,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_dev_identity = {
+ .open = i2o_seq_open_dev_identity,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_ddm_identity = {
+ .open = i2o_seq_open_ddm_identity,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_uinfo = {
+ .open = i2o_seq_open_uinfo,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_sgl_limits = {
+ .open = i2o_seq_open_sgl_limits,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations i2o_seq_fops_sensors = {
+ .open = i2o_seq_open_sensors,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/*
+ * IOP specific entries...write field just in case someone
+ * ever wants one.
+ */
+static i2o_proc_entry i2o_proc_generic_iop_entries[] = {
+ {"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt},
+ {"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct},
+ {"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status},
+ {"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw},
+ {"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table},
+ {"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store},
+ {"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored},
+ {NULL, 0, NULL}
+};
+
+/*
+ * Device specific entries
+ */
+static i2o_proc_entry generic_dev_entries[] = {
+ {"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups},
+ {"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device},
+ {"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed},
+ {"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users},
+ {"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs},
+ {"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users},
+ {"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity},
+ {"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity},
+ {"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo},
+ {"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits},
+ {"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors},
+ {NULL, 0, NULL}
+};
+
+/*
+ * Storage unit specific entries (SCSI Periph, BS) with device names
+ */
+static i2o_proc_entry rbs_dev_entries[] = {
+ {"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name},
+ {NULL, 0, NULL}
+};
+
+/**
+ * i2o_proc_create_entries - Creates proc dir entries
+ * @dir: proc dir entry under which the entries should be placed
+ * @i2o_pe: pointer to the entries which should be added
+ * @data: pointer to I2O controller or device
+ *
+ * Create proc dir entries for a I2O controller or I2O device.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_proc_create_entries(struct proc_dir_entry *dir,
+ i2o_proc_entry * i2o_pe, void *data)
+{
+ struct proc_dir_entry *tmp;
+
+ while (i2o_pe->name) {
+ tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir,
+ i2o_pe->fops, data);
+ if (!tmp)
+ return -1;
+
+ i2o_pe++;
+ }
+
+ return 0;
+}
+
+/**
+ * i2o_proc_subdir_remove - Remove child entries from a proc entry
+ * @dir: proc dir entry from which the childs should be removed
+ *
+ * Iterate over each i2o proc entry under dir and remove it. If the child
+ * also has entries, remove them too.
+ */
+static void i2o_proc_subdir_remove(struct proc_dir_entry *dir)
+{
+ struct proc_dir_entry *pe, *tmp;
+ pe = dir->subdir;
+ while (pe) {
+ tmp = pe->next;
+ i2o_proc_subdir_remove(pe);
+ remove_proc_entry(pe->name, dir);
+ pe = tmp;
+ }
+};
+
+/**
+ * i2o_proc_device_add - Add an I2O device to the proc dir
+ * @dir: proc dir entry to which the device should be added
+ * @dev: I2O device which should be added
+ *
+ * Add an I2O device to the proc dir entry dir and create the entries for
+ * the device depending on the class of the I2O device.
+ */
+static void i2o_proc_device_add(struct proc_dir_entry *dir,
+ struct i2o_device *dev)
+{
+ char buff[10];
+ struct proc_dir_entry *devdir;
+ i2o_proc_entry *i2o_pe = NULL;
+
+ sprintf(buff, "%03x", dev->lct_data.tid);
+
+ osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff);
+
+ devdir = proc_mkdir(buff, dir);
+ if (!devdir) {
+ osm_warn("Could not allocate procdir!\n");
+ return;
+ }
+
+ devdir->data = dev;
+
+ i2o_proc_create_entries(devdir, generic_dev_entries, dev);
+
+ /* Inform core that we want updates about this device's status */
+ switch (dev->lct_data.class_id) {
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ i2o_pe = rbs_dev_entries;
+ break;
+ default:
+ break;
+ }
+ if (i2o_pe)
+ i2o_proc_create_entries(devdir, i2o_pe, dev);
+}
+
+/**
+ * i2o_proc_iop_add - Add an I2O controller to the i2o proc tree
+ * @dir: parent proc dir entry
+ * @c: I2O controller which should be added
+ *
+ * Add the entries to the parent proc dir entry. Also each device is added
+ * to the controllers proc dir entry.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_proc_iop_add(struct proc_dir_entry *dir,
+ struct i2o_controller *c)
+{
+ struct proc_dir_entry *iopdir;
+ struct i2o_device *dev;
+
+ osm_debug("adding IOP /proc/i2o/%s\n", c->name);
+
+ iopdir = proc_mkdir(c->name, dir);
+ if (!iopdir)
+ return -1;
+
+ iopdir->data = c;
+
+ i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c);
+
+ list_for_each_entry(dev, &c->devices, list)
+ i2o_proc_device_add(iopdir, dev);
+
+ return 0;
+}
+
+/**
+ * i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree
+ * @dir: parent proc dir entry
+ * @c: I2O controller which should be removed
+ *
+ * Iterate over each i2o proc entry and search controller c. If it is found
+ * remove it from the tree.
+ */
+static void i2o_proc_iop_remove(struct proc_dir_entry *dir,
+ struct i2o_controller *c)
+{
+ struct proc_dir_entry *pe, *tmp;
+
+ pe = dir->subdir;
+ while (pe) {
+ tmp = pe->next;
+ if (pe->data == c) {
+ i2o_proc_subdir_remove(pe);
+ remove_proc_entry(pe->name, dir);
+ }
+ osm_debug("removing IOP /proc/i2o/%s\n", c->name);
+ pe = tmp;
+ }
+}
+
+/**
+ * i2o_proc_fs_create - Create the i2o proc fs.
+ *
+ * Iterate over each I2O controller and create the entries for it.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_proc_fs_create(void)
+{
+ struct i2o_controller *c;
+
+ i2o_proc_dir_root = proc_mkdir("i2o", NULL);
+ if (!i2o_proc_dir_root)
+ return -1;
+
+ list_for_each_entry(c, &i2o_controllers, list)
+ i2o_proc_iop_add(i2o_proc_dir_root, c);
+
+ return 0;
+};
+
+/**
+ * i2o_proc_fs_destroy - Cleanup the all i2o proc entries
+ *
+ * Iterate over each I2O controller and remove the entries for it.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __exit i2o_proc_fs_destroy(void)
+{
+ struct i2o_controller *c;
+
+ list_for_each_entry(c, &i2o_controllers, list)
+ i2o_proc_iop_remove(i2o_proc_dir_root, c);
+
+ remove_proc_entry("i2o", NULL);
+
+ return 0;
+};
+
+/**
+ * i2o_proc_init - Init function for procfs
+ *
+ * Registers Proc OSM and creates procfs entries.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_proc_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+ rc = i2o_driver_register(&i2o_proc_driver);
+ if (rc)
+ return rc;
+
+ rc = i2o_proc_fs_create();
+ if (rc) {
+ i2o_driver_unregister(&i2o_proc_driver);
+ return rc;
+ }
+
+ return 0;
+};
+
+/**
+ * i2o_proc_exit - Exit function for procfs
+ *
+ * Unregisters Proc OSM and removes procfs entries.
+ */
+static void __exit i2o_proc_exit(void)
+{
+ i2o_driver_unregister(&i2o_proc_driver);
+ i2o_proc_fs_destroy();
+};
+
+MODULE_AUTHOR("Deepak Saxena");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_proc_init);
+module_exit(i2o_proc_exit);
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
new file mode 100644
index 00000000..74fbe563
--- /dev/null
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -0,0 +1,816 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ *
+ * Complications for I2O scsi
+ *
+ * o Each (bus,lun) is a logical device in I2O. We keep a map
+ * table. We spoof failed selection for unmapped units
+ * o Request sense buffers can come back for free.
+ * o Scatter gather is a bit dynamic. We have to investigate at
+ * setup time.
+ * o Some of our resources are dynamically shared. The i2o core
+ * needs a message reservation protocol to avoid swap v net
+ * deadlocking. We need to back off queue requests.
+ *
+ * In general the firmware wants to help. Where its help isn't performance
+ * useful we just ignore the aid. Its not worth the code in truth.
+ *
+ * Fixes/additions:
+ * Steve Ralston:
+ * Scatter gather now works
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Minor fixes for 2.6.
+ *
+ * To Do:
+ * 64bit cleanups
+ * Fix the resource management problems.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/i2o.h>
+#include <linux/scatterlist.h>
+
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/sg.h>
+
+#define OSM_NAME "scsi-osm"
+#define OSM_VERSION "1.316"
+#define OSM_DESCRIPTION "I2O SCSI Peripheral OSM"
+
+static struct i2o_driver i2o_scsi_driver;
+
+static unsigned int i2o_scsi_max_id = 16;
+static unsigned int i2o_scsi_max_lun = 255;
+
+struct i2o_scsi_host {
+ struct Scsi_Host *scsi_host; /* pointer to the SCSI host */
+ struct i2o_controller *iop; /* pointer to the I2O controller */
+ unsigned int lun; /* lun's used for block devices */
+ struct i2o_device *channel[0]; /* channel->i2o_dev mapping table */
+};
+
+static struct scsi_host_template i2o_scsi_host_template;
+
+#define I2O_SCSI_CAN_QUEUE 4
+
+/* SCSI OSM class handling definition */
+static struct i2o_class_id i2o_scsi_class_id[] = {
+ {I2O_CLASS_SCSI_PERIPHERAL},
+ {I2O_CLASS_END}
+};
+
+static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
+{
+ struct i2o_scsi_host *i2o_shost;
+ struct i2o_device *i2o_dev;
+ struct Scsi_Host *scsi_host;
+ int max_channel = 0;
+ u8 type;
+ int i;
+ size_t size;
+ u16 body_size = 6;
+
+#ifdef CONFIG_I2O_EXT_ADAPTEC
+ if (c->adaptec)
+ body_size = 8;
+#endif
+
+ list_for_each_entry(i2o_dev, &c->devices, list)
+ if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
+ && (type == 0x01)) /* SCSI bus */
+ max_channel++;
+ }
+
+ if (!max_channel) {
+ osm_warn("no channels found on %s\n", c->name);
+ return ERR_PTR(-EFAULT);
+ }
+
+ size = max_channel * sizeof(struct i2o_device *)
+ + sizeof(struct i2o_scsi_host);
+
+ scsi_host = scsi_host_alloc(&i2o_scsi_host_template, size);
+ if (!scsi_host) {
+ osm_warn("Could not allocate SCSI host\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ scsi_host->max_channel = max_channel - 1;
+ scsi_host->max_id = i2o_scsi_max_id;
+ scsi_host->max_lun = i2o_scsi_max_lun;
+ scsi_host->this_id = c->unit;
+ scsi_host->sg_tablesize = i2o_sg_tablesize(c, body_size);
+
+ i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata;
+ i2o_shost->scsi_host = scsi_host;
+ i2o_shost->iop = c;
+ i2o_shost->lun = 1;
+
+ i = 0;
+ list_for_each_entry(i2o_dev, &c->devices, list)
+ if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
+ if (!i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
+ && (type == 0x01)) /* only SCSI bus */
+ i2o_shost->channel[i++] = i2o_dev;
+
+ if (i >= max_channel)
+ break;
+ }
+
+ return i2o_shost;
+};
+
+/**
+ * i2o_scsi_get_host - Get an I2O SCSI host
+ * @c: I2O controller to for which to get the SCSI host
+ *
+ * If the I2O controller already exists as SCSI host, the SCSI host
+ * is returned, otherwise the I2O controller is added to the SCSI
+ * core.
+ *
+ * Returns pointer to the I2O SCSI host on success or NULL on failure.
+ */
+static struct i2o_scsi_host *i2o_scsi_get_host(struct i2o_controller *c)
+{
+ return c->driver_data[i2o_scsi_driver.context];
+};
+
+/**
+ * i2o_scsi_remove - Remove I2O device from SCSI core
+ * @dev: device which should be removed
+ *
+ * Removes the I2O device from the SCSI core again.
+ *
+ * Returns 0 on success.
+ */
+static int i2o_scsi_remove(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+ struct i2o_controller *c = i2o_dev->iop;
+ struct i2o_scsi_host *i2o_shost;
+ struct scsi_device *scsi_dev;
+
+ osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid);
+
+ i2o_shost = i2o_scsi_get_host(c);
+
+ shost_for_each_device(scsi_dev, i2o_shost->scsi_host)
+ if (scsi_dev->hostdata == i2o_dev) {
+ sysfs_remove_link(&i2o_dev->device.kobj, "scsi");
+ scsi_remove_device(scsi_dev);
+ scsi_device_put(scsi_dev);
+ break;
+ }
+
+ return 0;
+};
+
+/**
+ * i2o_scsi_probe - verify if dev is a I2O SCSI device and install it
+ * @dev: device to verify if it is a I2O SCSI device
+ *
+ * Retrieve channel, id and lun for I2O device. If everything goes well
+ * register the I2O device as SCSI device on the I2O SCSI controller.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_scsi_probe(struct device *dev)
+{
+ struct i2o_device *i2o_dev = to_i2o_device(dev);
+ struct i2o_controller *c = i2o_dev->iop;
+ struct i2o_scsi_host *i2o_shost;
+ struct Scsi_Host *scsi_host;
+ struct i2o_device *parent;
+ struct scsi_device *scsi_dev;
+ u32 id = -1;
+ u64 lun = -1;
+ int channel = -1;
+ int i, rc;
+
+ i2o_shost = i2o_scsi_get_host(c);
+ if (!i2o_shost)
+ return -EFAULT;
+
+ scsi_host = i2o_shost->scsi_host;
+
+ switch (i2o_dev->lct_data.class_id) {
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ case I2O_CLASS_EXECUTIVE:
+#ifdef CONFIG_I2O_EXT_ADAPTEC
+ if (c->adaptec) {
+ u8 type;
+ struct i2o_device *d = i2o_shost->channel[0];
+
+ if (!i2o_parm_field_get(d, 0x0000, 0, &type, 1)
+ && (type == 0x01)) /* SCSI bus */
+ if (!i2o_parm_field_get(d, 0x0200, 4, &id, 4)) {
+ channel = 0;
+ if (i2o_dev->lct_data.class_id ==
+ I2O_CLASS_RANDOM_BLOCK_STORAGE)
+ lun =
+ cpu_to_le64(i2o_shost->
+ lun++);
+ else
+ lun = 0;
+ }
+ }
+#endif
+ break;
+
+ case I2O_CLASS_SCSI_PERIPHERAL:
+ if (i2o_parm_field_get(i2o_dev, 0x0000, 3, &id, 4))
+ return -EFAULT;
+
+ if (i2o_parm_field_get(i2o_dev, 0x0000, 4, &lun, 8))
+ return -EFAULT;
+
+ parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
+ if (!parent) {
+ osm_warn("can not find parent of device %03x\n",
+ i2o_dev->lct_data.tid);
+ return -EFAULT;
+ }
+
+ for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++)
+ if (i2o_shost->channel[i] == parent)
+ channel = i;
+ break;
+
+ default:
+ return -EFAULT;
+ }
+
+ if (channel == -1) {
+ osm_warn("can not find channel of device %03x\n",
+ i2o_dev->lct_data.tid);
+ return -EFAULT;
+ }
+
+ if (le32_to_cpu(id) >= scsi_host->max_id) {
+ osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)",
+ le32_to_cpu(id), scsi_host->max_id);
+ return -EFAULT;
+ }
+
+ if (le64_to_cpu(lun) >= scsi_host->max_lun) {
+ osm_warn("SCSI device lun (%lu) >= max_lun of I2O host (%d)",
+ (long unsigned int)le64_to_cpu(lun),
+ scsi_host->max_lun);
+ return -EFAULT;
+ }
+
+ scsi_dev =
+ __scsi_add_device(i2o_shost->scsi_host, channel, le32_to_cpu(id),
+ le64_to_cpu(lun), i2o_dev);
+
+ if (IS_ERR(scsi_dev)) {
+ osm_warn("can not add SCSI device %03x\n",
+ i2o_dev->lct_data.tid);
+ return PTR_ERR(scsi_dev);
+ }
+
+ rc = sysfs_create_link(&i2o_dev->device.kobj,
+ &scsi_dev->sdev_gendev.kobj, "scsi");
+ if (rc)
+ goto err;
+
+ osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %ld\n",
+ i2o_dev->lct_data.tid, channel, le32_to_cpu(id),
+ (long unsigned int)le64_to_cpu(lun));
+
+ return 0;
+
+err:
+ scsi_remove_device(scsi_dev);
+ return rc;
+};
+
+static const char *i2o_scsi_info(struct Scsi_Host *SChost)
+{
+ struct i2o_scsi_host *hostdata;
+ hostdata = (struct i2o_scsi_host *)SChost->hostdata;
+ return hostdata->iop->name;
+}
+
+/**
+ * i2o_scsi_reply - SCSI OSM message reply handler
+ * @c: controller issuing the reply
+ * @m: message id for flushing
+ * @msg: the message from the controller
+ *
+ * Process reply messages (interrupts in normal scsi controller think).
+ * We can get a variety of messages to process. The normal path is
+ * scsi command completions. We must also deal with IOP failures,
+ * the reply to a bus reset and the reply to a LUN query.
+ *
+ * Returns 0 on success and if the reply should not be flushed or > 0
+ * on success and if the reply should be flushed. Returns negative error
+ * code on failure and if the reply should be flushed.
+ */
+static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
+ struct i2o_message *msg)
+{
+ struct scsi_cmnd *cmd;
+ u32 error;
+ struct device *dev;
+
+ cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
+ if (unlikely(!cmd)) {
+ osm_err("NULL reply received!\n");
+ return -1;
+ }
+
+ /*
+ * Low byte is device status, next is adapter status,
+ * (then one byte reserved), then request status.
+ */
+ error = le32_to_cpu(msg->body[0]);
+
+ osm_debug("Completed %0x%p\n", cmd);
+
+ cmd->result = error & 0xff;
+ /*
+ * if DeviceStatus is not SCSI_SUCCESS copy over the sense data and let
+ * the SCSI layer handle the error
+ */
+ if (cmd->result)
+ memcpy(cmd->sense_buffer, &msg->body[3],
+ min(SCSI_SENSE_BUFFERSIZE, 40));
+
+ /* only output error code if AdapterStatus is not HBA_SUCCESS */
+ if ((error >> 8) & 0xff)
+ osm_err("SCSI error %08x\n", error);
+
+ dev = &c->pdev->dev;
+
+ scsi_dma_unmap(cmd);
+
+ cmd->scsi_done(cmd);
+
+ return 1;
+};
+
+/**
+ * i2o_scsi_notify_device_add - Retrieve notifications of added devices
+ * @i2o_dev: the I2O device which was added
+ *
+ * If a I2O device is added we catch the notification, because I2O classes
+ * other than SCSI peripheral will not be received through
+ * i2o_scsi_probe().
+ */
+static void i2o_scsi_notify_device_add(struct i2o_device *i2o_dev)
+{
+ switch (i2o_dev->lct_data.class_id) {
+ case I2O_CLASS_EXECUTIVE:
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ i2o_scsi_probe(&i2o_dev->device);
+ break;
+
+ default:
+ break;
+ }
+};
+
+/**
+ * i2o_scsi_notify_device_remove - Retrieve notifications of removed devices
+ * @i2o_dev: the I2O device which was removed
+ *
+ * If a I2O device is removed, we catch the notification to remove the
+ * corresponding SCSI device.
+ */
+static void i2o_scsi_notify_device_remove(struct i2o_device *i2o_dev)
+{
+ switch (i2o_dev->lct_data.class_id) {
+ case I2O_CLASS_EXECUTIVE:
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ i2o_scsi_remove(&i2o_dev->device);
+ break;
+
+ default:
+ break;
+ }
+};
+
+/**
+ * i2o_scsi_notify_controller_add - Retrieve notifications of added controllers
+ * @c: the controller which was added
+ *
+ * If a I2O controller is added, we catch the notification to add a
+ * corresponding Scsi_Host.
+ */
+static void i2o_scsi_notify_controller_add(struct i2o_controller *c)
+{
+ struct i2o_scsi_host *i2o_shost;
+ int rc;
+
+ i2o_shost = i2o_scsi_host_alloc(c);
+ if (IS_ERR(i2o_shost)) {
+ osm_err("Could not initialize SCSI host\n");
+ return;
+ }
+
+ rc = scsi_add_host(i2o_shost->scsi_host, &c->device);
+ if (rc) {
+ osm_err("Could not add SCSI host\n");
+ scsi_host_put(i2o_shost->scsi_host);
+ return;
+ }
+
+ c->driver_data[i2o_scsi_driver.context] = i2o_shost;
+
+ osm_debug("new I2O SCSI host added\n");
+};
+
+/**
+ * i2o_scsi_notify_controller_remove - Retrieve notifications of removed controllers
+ * @c: the controller which was removed
+ *
+ * If a I2O controller is removed, we catch the notification to remove the
+ * corresponding Scsi_Host.
+ */
+static void i2o_scsi_notify_controller_remove(struct i2o_controller *c)
+{
+ struct i2o_scsi_host *i2o_shost;
+ i2o_shost = i2o_scsi_get_host(c);
+ if (!i2o_shost)
+ return;
+
+ c->driver_data[i2o_scsi_driver.context] = NULL;
+
+ scsi_remove_host(i2o_shost->scsi_host);
+ scsi_host_put(i2o_shost->scsi_host);
+ osm_debug("I2O SCSI host removed\n");
+};
+
+/* SCSI OSM driver struct */
+static struct i2o_driver i2o_scsi_driver = {
+ .name = OSM_NAME,
+ .reply = i2o_scsi_reply,
+ .classes = i2o_scsi_class_id,
+ .notify_device_add = i2o_scsi_notify_device_add,
+ .notify_device_remove = i2o_scsi_notify_device_remove,
+ .notify_controller_add = i2o_scsi_notify_controller_add,
+ .notify_controller_remove = i2o_scsi_notify_controller_remove,
+ .driver = {
+ .probe = i2o_scsi_probe,
+ .remove = i2o_scsi_remove,
+ },
+};
+
+/**
+ * i2o_scsi_queuecommand - queue a SCSI command
+ * @SCpnt: scsi command pointer
+ * @done: callback for completion
+ *
+ * Issue a scsi command asynchronously. Return 0 on success or 1 if
+ * we hit an error (normally message queue congestion). The only
+ * minor complication here is that I2O deals with the device addressing
+ * so we have to map the bus/dev/lun back to an I2O handle as well
+ * as faking absent devices ourself.
+ *
+ * Locks: takes the controller lock on error path only
+ */
+
+static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt,
+ void (*done) (struct scsi_cmnd *))
+{
+ struct i2o_controller *c;
+ struct i2o_device *i2o_dev;
+ int tid;
+ struct i2o_message *msg;
+ /*
+ * ENABLE_DISCONNECT
+ * SIMPLE_TAG
+ * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
+ */
+ u32 scsi_flags = 0x20a00000;
+ u32 sgl_offset;
+ u32 *mptr;
+ u32 cmd = I2O_CMD_SCSI_EXEC << 24;
+ int rc = 0;
+
+ /*
+ * Do the incoming paperwork
+ */
+ i2o_dev = SCpnt->device->hostdata;
+
+ SCpnt->scsi_done = done;
+
+ if (unlikely(!i2o_dev)) {
+ osm_warn("no I2O device in request\n");
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ goto exit;
+ }
+ c = i2o_dev->iop;
+ tid = i2o_dev->lct_data.tid;
+
+ osm_debug("qcmd: Tid = %03x\n", tid);
+ osm_debug("Real scsi messages.\n");
+
+ /*
+ * Put together a scsi execscb message
+ */
+ switch (SCpnt->sc_data_direction) {
+ case PCI_DMA_NONE:
+ /* DATA NO XFER */
+ sgl_offset = SGL_OFFSET_0;
+ break;
+
+ case PCI_DMA_TODEVICE:
+ /* DATA OUT (iop-->dev) */
+ scsi_flags |= 0x80000000;
+ sgl_offset = SGL_OFFSET_10;
+ break;
+
+ case PCI_DMA_FROMDEVICE:
+ /* DATA IN (iop<--dev) */
+ scsi_flags |= 0x40000000;
+ sgl_offset = SGL_OFFSET_10;
+ break;
+
+ default:
+ /* Unknown - kill the command */
+ SCpnt->result = DID_NO_CONNECT << 16;
+ done(SCpnt);
+ goto exit;
+ }
+
+ /*
+ * Obtain an I2O message. If there are none free then
+ * throw it back to the scsi layer
+ */
+
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto exit;
+ }
+
+ mptr = &msg->body[0];
+
+#if 0 /* this code can't work */
+#ifdef CONFIG_I2O_EXT_ADAPTEC
+ if (c->adaptec) {
+ u32 adpt_flags = 0;
+
+ if (SCpnt->sc_request && SCpnt->sc_request->upper_private_data) {
+ i2o_sg_io_hdr_t __user *usr_ptr =
+ ((Sg_request *) (SCpnt->sc_request->
+ upper_private_data))->header.
+ usr_ptr;
+
+ if (usr_ptr)
+ get_user(adpt_flags, &usr_ptr->flags);
+ }
+
+ switch (i2o_dev->lct_data.class_id) {
+ case I2O_CLASS_EXECUTIVE:
+ case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+ /* interpret flag has to be set for executive */
+ adpt_flags ^= I2O_DPT_SG_FLAG_INTERPRET;
+ break;
+
+ default:
+ break;
+ }
+
+ /*
+ * for Adaptec controllers we use the PRIVATE command, because
+ * the normal SCSI EXEC doesn't support all SCSI commands on
+ * all controllers (for example READ CAPACITY).
+ */
+ if (sgl_offset == SGL_OFFSET_10)
+ sgl_offset = SGL_OFFSET_12;
+ cmd = I2O_CMD_PRIVATE << 24;
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(adpt_flags | tid);
+ }
+#endif
+#endif
+
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
+
+ /* We want the SCSI control block back */
+ msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
+
+ /* LSI_920_PCI_QUIRK
+ *
+ * Intermittant observations of msg frame word data corruption
+ * observed on msg[4] after:
+ * WRITE, READ-MODIFY-WRITE
+ * operations. 19990606 -sralston
+ *
+ * (Hence we build this word via tag. Its good practice anyway
+ * we don't want fetches over PCI needlessly)
+ */
+
+ /* Attach tags to the devices */
+ /* FIXME: implement
+ if(SCpnt->device->tagged_supported) {
+ if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
+ scsi_flags |= 0x01000000;
+ else if(SCpnt->tag == ORDERED_QUEUE_TAG)
+ scsi_flags |= 0x01800000;
+ }
+ */
+
+ *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
+
+ /* Write SCSI command into the message - always 16 byte block */
+ memcpy(mptr, SCpnt->cmnd, 16);
+ mptr += 4;
+
+ if (sgl_offset != SGL_OFFSET_0) {
+ /* write size of data addressed by SGL */
+ *mptr++ = cpu_to_le32(scsi_bufflen(SCpnt));
+
+ /* Now fill in the SGList and command */
+
+ if (scsi_sg_count(SCpnt)) {
+ if (!i2o_dma_map_sg(c, scsi_sglist(SCpnt),
+ scsi_sg_count(SCpnt),
+ SCpnt->sc_data_direction, &mptr))
+ goto nomem;
+ }
+ }
+
+ /* Stick the headers on */
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
+
+ /* Queue the message */
+ i2o_msg_post(c, msg);
+
+ osm_debug("Issued %0x%p\n", SCpnt);
+
+ return 0;
+
+ nomem:
+ rc = -ENOMEM;
+ i2o_msg_nop(c, msg);
+
+ exit:
+ return rc;
+}
+
+static DEF_SCSI_QCMD(i2o_scsi_queuecommand)
+
+/**
+ * i2o_scsi_abort - abort a running command
+ * @SCpnt: command to abort
+ *
+ * Ask the I2O controller to abort a command. This is an asynchrnous
+ * process and our callback handler will see the command complete with an
+ * aborted message if it succeeds.
+ *
+ * Returns 0 if the command is successfully aborted or negative error code
+ * on failure.
+ */
+static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
+{
+ struct i2o_device *i2o_dev;
+ struct i2o_controller *c;
+ struct i2o_message *msg;
+ int tid;
+ int status = FAILED;
+
+ osm_warn("Aborting command block.\n");
+
+ i2o_dev = SCpnt->device->hostdata;
+ c = i2o_dev->iop;
+ tid = i2o_dev->lct_data.tid;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
+ msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
+
+ if (!i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
+ status = SUCCESS;
+
+ return status;
+}
+
+/**
+ * i2o_scsi_bios_param - Invent disk geometry
+ * @sdev: scsi device
+ * @dev: block layer device
+ * @capacity: size in sectors
+ * @ip: geometry array
+ *
+ * This is anyone's guess quite frankly. We use the same rules everyone
+ * else appears to and hope. It seems to work.
+ */
+
+static int i2o_scsi_bios_param(struct scsi_device *sdev,
+ struct block_device *dev, sector_t capacity,
+ int *ip)
+{
+ int size;
+
+ size = capacity;
+ ip[0] = 64; /* heads */
+ ip[1] = 32; /* sectors */
+ if ((ip[2] = size >> 11) > 1024) { /* cylinders, test for big disk */
+ ip[0] = 255; /* heads */
+ ip[1] = 63; /* sectors */
+ ip[2] = size / (255 * 63); /* cylinders */
+ }
+ return 0;
+}
+
+static struct scsi_host_template i2o_scsi_host_template = {
+ .proc_name = OSM_NAME,
+ .name = OSM_DESCRIPTION,
+ .info = i2o_scsi_info,
+ .queuecommand = i2o_scsi_queuecommand,
+ .eh_abort_handler = i2o_scsi_abort,
+ .bios_param = i2o_scsi_bios_param,
+ .can_queue = I2O_SCSI_CAN_QUEUE,
+ .sg_tablesize = 8,
+ .cmd_per_lun = 6,
+ .use_clustering = ENABLE_CLUSTERING,
+};
+
+/**
+ * i2o_scsi_init - SCSI OSM initialization function
+ *
+ * Register SCSI OSM into I2O core.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_scsi_init(void)
+{
+ int rc;
+
+ printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+ /* Register SCSI OSM into I2O core */
+ rc = i2o_driver_register(&i2o_scsi_driver);
+ if (rc) {
+ osm_err("Could not register SCSI driver\n");
+ return rc;
+ }
+
+ return 0;
+};
+
+/**
+ * i2o_scsi_exit - SCSI OSM exit function
+ *
+ * Unregisters SCSI OSM from I2O core.
+ */
+static void __exit i2o_scsi_exit(void)
+{
+ /* Unregister I2O SCSI OSM from I2O core */
+ i2o_driver_unregister(&i2o_scsi_driver);
+};
+
+MODULE_AUTHOR("Red Hat Software");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_scsi_init);
+module_exit(i2o_scsi_exit);
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
new file mode 100644
index 00000000..090d2a3a
--- /dev/null
+++ b/drivers/message/i2o/iop.c
@@ -0,0 +1,1248 @@
+/*
+ * Functions to handle I2O controllers and I2O message handling
+ *
+ * Copyright (C) 1999-2002 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * A lot of the I2O message side code from this is taken from the
+ * Red Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ * Fixes/additions:
+ * Philipp Rumpf
+ * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ * Deepak Saxena <deepak@plexity.net>
+ * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>:
+ * Ported to Linux 2.5.
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Minor fixes for 2.6.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "core.h"
+
+#define OSM_NAME "i2o"
+#define OSM_VERSION "1.325"
+#define OSM_DESCRIPTION "I2O subsystem"
+
+/* global I2O controller list */
+LIST_HEAD(i2o_controllers);
+
+/*
+ * global I2O System Table. Contains information about all the IOPs in the
+ * system. Used to inform IOPs about each others existence.
+ */
+static struct i2o_dma i2o_systab;
+
+static int i2o_hrt_get(struct i2o_controller *c);
+
+/**
+ * i2o_msg_get_wait - obtain an I2O message from the IOP
+ * @c: I2O controller
+ * @wait: how long to wait until timeout
+ *
+ * This function waits up to wait seconds for a message slot to be
+ * available.
+ *
+ * On a success the message is returned and the pointer to the message is
+ * set in msg. The returned message is the physical page frame offset
+ * address from the read port (see the i2o spec). If no message is
+ * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
+ */
+struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
+{
+ unsigned long timeout = jiffies + wait * HZ;
+ struct i2o_message *msg;
+
+ while (IS_ERR(msg = i2o_msg_get(c))) {
+ if (time_after(jiffies, timeout)) {
+ osm_debug("%s: Timeout waiting for message frame.\n",
+ c->name);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+
+ return msg;
+};
+
+#if BITS_PER_LONG == 64
+/**
+ * i2o_cntxt_list_add - Append a pointer to context list and return a id
+ * @c: controller to which the context list belong
+ * @ptr: pointer to add to the context list
+ *
+ * Because the context field in I2O is only 32-bit large, on 64-bit the
+ * pointer is to large to fit in the context field. The i2o_cntxt_list
+ * functions therefore map pointers to context fields.
+ *
+ * Returns context id > 0 on success or 0 on failure.
+ */
+u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
+{
+ struct i2o_context_list_element *entry;
+ unsigned long flags;
+
+ if (!ptr)
+ osm_err("%s: couldn't add NULL pointer to context list!\n",
+ c->name);
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ osm_err("%s: Could not allocate memory for context list element"
+ "\n", c->name);
+ return 0;
+ }
+
+ entry->ptr = ptr;
+ entry->timestamp = jiffies;
+ INIT_LIST_HEAD(&entry->list);
+
+ spin_lock_irqsave(&c->context_list_lock, flags);
+
+ if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
+ atomic_inc(&c->context_list_counter);
+
+ entry->context = atomic_read(&c->context_list_counter);
+
+ list_add(&entry->list, &c->context_list);
+
+ spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+ osm_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context);
+
+ return entry->context;
+};
+
+/**
+ * i2o_cntxt_list_remove - Remove a pointer from the context list
+ * @c: controller to which the context list belong
+ * @ptr: pointer which should be removed from the context list
+ *
+ * Removes a previously added pointer from the context list and returns
+ * the matching context id.
+ *
+ * Returns context id on success or 0 on failure.
+ */
+u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr)
+{
+ struct i2o_context_list_element *entry;
+ u32 context = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->context_list_lock, flags);
+ list_for_each_entry(entry, &c->context_list, list)
+ if (entry->ptr == ptr) {
+ list_del(&entry->list);
+ context = entry->context;
+ kfree(entry);
+ break;
+ }
+ spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+ if (!context)
+ osm_warn("%s: Could not remove nonexistent ptr %p\n", c->name,
+ ptr);
+
+ osm_debug("%s: remove ptr from context list %d -> %p\n", c->name,
+ context, ptr);
+
+ return context;
+};
+
+/**
+ * i2o_cntxt_list_get - Get a pointer from the context list and remove it
+ * @c: controller to which the context list belong
+ * @context: context id to which the pointer belong
+ *
+ * Returns pointer to the matching context id on success or NULL on
+ * failure.
+ */
+void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
+{
+ struct i2o_context_list_element *entry;
+ unsigned long flags;
+ void *ptr = NULL;
+
+ spin_lock_irqsave(&c->context_list_lock, flags);
+ list_for_each_entry(entry, &c->context_list, list)
+ if (entry->context == context) {
+ list_del(&entry->list);
+ ptr = entry->ptr;
+ kfree(entry);
+ break;
+ }
+ spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+ if (!ptr)
+ osm_warn("%s: context id %d not found\n", c->name, context);
+
+ osm_debug("%s: get ptr from context list %d -> %p\n", c->name, context,
+ ptr);
+
+ return ptr;
+};
+
+/**
+ * i2o_cntxt_list_get_ptr - Get a context id from the context list
+ * @c: controller to which the context list belong
+ * @ptr: pointer to which the context id should be fetched
+ *
+ * Returns context id which matches to the pointer on success or 0 on
+ * failure.
+ */
+u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr)
+{
+ struct i2o_context_list_element *entry;
+ u32 context = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->context_list_lock, flags);
+ list_for_each_entry(entry, &c->context_list, list)
+ if (entry->ptr == ptr) {
+ context = entry->context;
+ break;
+ }
+ spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+ if (!context)
+ osm_warn("%s: Could not find nonexistent ptr %p\n", c->name,
+ ptr);
+
+ osm_debug("%s: get context id from context list %p -> %d\n", c->name,
+ ptr, context);
+
+ return context;
+};
+#endif
+
+/**
+ * i2o_iop_find - Find an I2O controller by id
+ * @unit: unit number of the I2O controller to search for
+ *
+ * Lookup the I2O controller on the controller list.
+ *
+ * Returns pointer to the I2O controller on success or NULL if not found.
+ */
+struct i2o_controller *i2o_find_iop(int unit)
+{
+ struct i2o_controller *c;
+
+ list_for_each_entry(c, &i2o_controllers, list) {
+ if (c->unit == unit)
+ return c;
+ }
+
+ return NULL;
+};
+
+/**
+ * i2o_iop_find_device - Find a I2O device on an I2O controller
+ * @c: I2O controller where the I2O device hangs on
+ * @tid: TID of the I2O device to search for
+ *
+ * Searches the devices of the I2O controller for a device with TID tid and
+ * returns it.
+ *
+ * Returns a pointer to the I2O device if found, otherwise NULL.
+ */
+struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
+{
+ struct i2o_device *dev;
+
+ list_for_each_entry(dev, &c->devices, list)
+ if (dev->lct_data.tid == tid)
+ return dev;
+
+ return NULL;
+};
+
+/**
+ * i2o_quiesce_controller - quiesce controller
+ * @c: controller
+ *
+ * Quiesce an IOP. Causes IOP to make external operation quiescent
+ * (i2o 'READY' state). Internal operation of the IOP continues normally.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_quiesce(struct i2o_controller *c)
+{
+ struct i2o_message *msg;
+ i2o_status_block *sb = c->status_block.virt;
+ int rc;
+
+ i2o_status_get(c);
+
+ /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
+ if ((sb->iop_state != ADAPTER_STATE_READY) &&
+ (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
+ return 0;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+
+ /* Long timeout needed for quiesce if lots of devices */
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
+ osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
+ else
+ osm_debug("%s: Quiesced.\n", c->name);
+
+ i2o_status_get(c); // Entered READY state
+
+ return rc;
+};
+
+/**
+ * i2o_iop_enable - move controller from ready to OPERATIONAL
+ * @c: I2O controller
+ *
+ * Enable IOP. This allows the IOP to resume external operations and
+ * reverses the effect of a quiesce. Returns zero or an error code if
+ * an error occurs.
+ */
+static int i2o_iop_enable(struct i2o_controller *c)
+{
+ struct i2o_message *msg;
+ i2o_status_block *sb = c->status_block.virt;
+ int rc;
+
+ i2o_status_get(c);
+
+ /* Enable only allowed on READY state */
+ if (sb->iop_state != ADAPTER_STATE_READY)
+ return -EINVAL;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+
+ /* How long of a timeout do we need? */
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
+ osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
+ else
+ osm_debug("%s: Enabled.\n", c->name);
+
+ i2o_status_get(c); // entered OPERATIONAL state
+
+ return rc;
+};
+
+/**
+ * i2o_iop_quiesce_all - Quiesce all I2O controllers on the system
+ *
+ * Quiesce all I2O controllers which are connected to the system.
+ */
+static inline void i2o_iop_quiesce_all(void)
+{
+ struct i2o_controller *c, *tmp;
+
+ list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
+ if (!c->no_quiesce)
+ i2o_iop_quiesce(c);
+ }
+};
+
+/**
+ * i2o_iop_enable_all - Enables all controllers on the system
+ *
+ * Enables all I2O controllers which are connected to the system.
+ */
+static inline void i2o_iop_enable_all(void)
+{
+ struct i2o_controller *c, *tmp;
+
+ list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
+ i2o_iop_enable(c);
+};
+
+/**
+ * i2o_clear_controller - Bring I2O controller into HOLD state
+ * @c: controller
+ *
+ * Clear an IOP to HOLD state, ie. terminate external operations, clear all
+ * input queues and prepare for a system restart. IOP's internal operation
+ * continues normally and the outbound queue is alive. The IOP is not
+ * expected to rebuild its LCT.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_clear(struct i2o_controller *c)
+{
+ struct i2o_message *msg;
+ int rc;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ /* Quiesce all IOPs first */
+ i2o_iop_quiesce_all();
+
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+
+ if ((rc = i2o_msg_post_wait(c, msg, 30)))
+ osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
+ else
+ osm_debug("%s: Cleared.\n", c->name);
+
+ /* Enable all IOPs */
+ i2o_iop_enable_all();
+
+ return rc;
+}
+
+/**
+ * i2o_iop_init_outbound_queue - setup the outbound message queue
+ * @c: I2O controller
+ *
+ * Clear and (re)initialize IOP's outbound queue and post the message
+ * frames to the IOP.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
+{
+ u32 m;
+ volatile u8 *status = c->status.virt;
+ struct i2o_message *msg;
+ ulong timeout;
+ int i;
+
+ osm_debug("%s: Initializing Outbound Queue...\n", c->name);
+
+ memset(c->status.virt, 0, 4);
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(PAGE_SIZE);
+ /* Outbound msg frame size in words and Initcode */
+ msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
+ msg->body[2] = cpu_to_le32(0xd0000004);
+ msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
+
+ i2o_msg_post(c, msg);
+
+ timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
+ while (*status <= I2O_CMD_IN_PROGRESS) {
+ if (time_after(jiffies, timeout)) {
+ osm_warn("%s: Timeout Initializing\n", c->name);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+
+ m = c->out_queue.phys;
+
+ /* Post frames */
+ for (i = 0; i < I2O_MAX_OUTBOUND_MSG_FRAMES; i++) {
+ i2o_flush_reply(c, m);
+ udelay(1); /* Promise */
+ m += I2O_OUTBOUND_MSG_FRAME_SIZE * sizeof(u32);
+ }
+
+ return 0;
+}
+
+/**
+ * i2o_iop_reset - reset an I2O controller
+ * @c: controller to reset
+ *
+ * Reset the IOP into INIT state and wait until IOP gets into RESET state.
+ * Terminate all external operations, clear IOP's inbound and outbound
+ * queues, terminate all DDMs, and reload the IOP's operating environment
+ * and all local DDMs. The IOP rebuilds its LCT.
+ */
+static int i2o_iop_reset(struct i2o_controller *c)
+{
+ volatile u8 *status = c->status.virt;
+ struct i2o_message *msg;
+ unsigned long timeout;
+ i2o_status_block *sb = c->status_block.virt;
+ int rc = 0;
+
+ osm_debug("%s: Resetting controller\n", c->name);
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ memset(c->status_block.virt, 0, 8);
+
+ /* Quiesce all IOPs first */
+ i2o_iop_quiesce_all();
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
+
+ i2o_msg_post(c, msg);
+
+ /* Wait for a reply */
+ timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
+ while (!*status) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ schedule_timeout_uninterruptible(1);
+ }
+
+ switch (*status) {
+ case I2O_CMD_REJECTED:
+ osm_warn("%s: IOP reset rejected\n", c->name);
+ rc = -EPERM;
+ break;
+
+ case I2O_CMD_IN_PROGRESS:
+ /*
+ * Once the reset is sent, the IOP goes into the INIT state
+ * which is indeterminate. We need to wait until the IOP has
+ * rebooted before we can let the system talk to it. We read
+ * the inbound Free_List until a message is available. If we
+ * can't read one in the given amount of time, we assume the
+ * IOP could not reboot properly.
+ */
+ osm_debug("%s: Reset in progress, waiting for reboot...\n",
+ c->name);
+
+ while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
+ if (time_after(jiffies, timeout)) {
+ osm_err("%s: IOP reset timeout.\n", c->name);
+ rc = PTR_ERR(msg);
+ goto exit;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+ i2o_msg_nop(c, msg);
+
+ /* from here all quiesce commands are safe */
+ c->no_quiesce = 0;
+
+ /* verify if controller is in state RESET */
+ i2o_status_get(c);
+
+ if (!c->promise && (sb->iop_state != ADAPTER_STATE_RESET))
+ osm_warn("%s: reset completed, but adapter not in RESET"
+ " state.\n", c->name);
+ else
+ osm_debug("%s: reset completed.\n", c->name);
+
+ break;
+
+ default:
+ osm_err("%s: IOP reset timeout.\n", c->name);
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ exit:
+ /* Enable all IOPs */
+ i2o_iop_enable_all();
+
+ return rc;
+};
+
+/**
+ * i2o_iop_activate - Bring controller up to HOLD
+ * @c: controller
+ *
+ * This function brings an I2O controller into HOLD state. The adapter
+ * is reset if necessary and then the queues and resource table are read.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_activate(struct i2o_controller *c)
+{
+ i2o_status_block *sb = c->status_block.virt;
+ int rc;
+ int state;
+
+ /* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
+ /* In READY state, Get status */
+
+ rc = i2o_status_get(c);
+ if (rc) {
+ osm_info("%s: Unable to obtain status, attempting a reset.\n",
+ c->name);
+ rc = i2o_iop_reset(c);
+ if (rc)
+ return rc;
+ }
+
+ if (sb->i2o_version > I2OVER15) {
+ osm_err("%s: Not running version 1.5 of the I2O Specification."
+ "\n", c->name);
+ return -ENODEV;
+ }
+
+ switch (sb->iop_state) {
+ case ADAPTER_STATE_FAULTED:
+ osm_err("%s: hardware fault\n", c->name);
+ return -EFAULT;
+
+ case ADAPTER_STATE_READY:
+ case ADAPTER_STATE_OPERATIONAL:
+ case ADAPTER_STATE_HOLD:
+ case ADAPTER_STATE_FAILED:
+ osm_debug("%s: already running, trying to reset...\n", c->name);
+ rc = i2o_iop_reset(c);
+ if (rc)
+ return rc;
+ }
+
+ /* preserve state */
+ state = sb->iop_state;
+
+ rc = i2o_iop_init_outbound_queue(c);
+ if (rc)
+ return rc;
+
+ /* if adapter was not in RESET state clear now */
+ if (state != ADAPTER_STATE_RESET)
+ i2o_iop_clear(c);
+
+ i2o_status_get(c);
+
+ if (sb->iop_state != ADAPTER_STATE_HOLD) {
+ osm_err("%s: failed to bring IOP into HOLD state\n", c->name);
+ return -EIO;
+ }
+
+ return i2o_hrt_get(c);
+};
+
+/**
+ * i2o_iop_systab_set - Set the I2O System Table of the specified IOP
+ * @c: I2O controller to which the system table should be send
+ *
+ * Before the systab could be set i2o_systab_build() must be called.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_systab_set(struct i2o_controller *c)
+{
+ struct i2o_message *msg;
+ i2o_status_block *sb = c->status_block.virt;
+ struct device *dev = &c->pdev->dev;
+ struct resource *root;
+ int rc;
+
+ if (sb->current_mem_size < sb->desired_mem_size) {
+ struct resource *res = &c->mem_resource;
+ res->name = c->pdev->bus->name;
+ res->flags = IORESOURCE_MEM;
+ res->start = 0;
+ res->end = 0;
+ osm_info("%s: requires private memory resources.\n", c->name);
+ root = pci_find_parent_resource(c->pdev, res);
+ if (root == NULL)
+ osm_warn("%s: Can't find parent resource!\n", c->name);
+ if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
+ NULL, NULL) >= 0) {
+ c->mem_alloc = 1;
+ sb->current_mem_size = 1 + res->end - res->start;
+ sb->current_mem_base = res->start;
+ osm_info("%s: allocated %llu bytes of PCI memory at "
+ "0x%016llX.\n", c->name,
+ (unsigned long long)(1 + res->end - res->start),
+ (unsigned long long)res->start);
+ }
+ }
+
+ if (sb->current_io_size < sb->desired_io_size) {
+ struct resource *res = &c->io_resource;
+ res->name = c->pdev->bus->name;
+ res->flags = IORESOURCE_IO;
+ res->start = 0;
+ res->end = 0;
+ osm_info("%s: requires private memory resources.\n", c->name);
+ root = pci_find_parent_resource(c->pdev, res);
+ if (root == NULL)
+ osm_warn("%s: Can't find parent resource!\n", c->name);
+ if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20, /* Unspecified, so use 1Mb and play safe */
+ NULL, NULL) >= 0) {
+ c->io_alloc = 1;
+ sb->current_io_size = 1 + res->end - res->start;
+ sb->current_mem_base = res->start;
+ osm_info("%s: allocated %llu bytes of PCI I/O at "
+ "0x%016llX.\n", c->name,
+ (unsigned long long)(1 + res->end - res->start),
+ (unsigned long long)res->start);
+ }
+ }
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
+ PCI_DMA_TODEVICE);
+ if (!i2o_systab.phys) {
+ i2o_msg_nop(c, msg);
+ return -ENOMEM;
+ }
+
+ msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+
+ /*
+ * Provide three SGL-elements:
+ * System table (SysTab), Private memory space declaration and
+ * Private i/o space declaration
+ */
+
+ msg->body[0] = cpu_to_le32(c->unit + 2);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
+ msg->body[3] = cpu_to_le32(i2o_systab.phys);
+ msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
+ msg->body[5] = cpu_to_le32(sb->current_mem_base);
+ msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
+ msg->body[6] = cpu_to_le32(sb->current_io_base);
+
+ rc = i2o_msg_post_wait(c, msg, 120);
+
+ dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
+ PCI_DMA_TODEVICE);
+
+ if (rc < 0)
+ osm_err("%s: Unable to set SysTab (status=%#x).\n", c->name,
+ -rc);
+ else
+ osm_debug("%s: SysTab set.\n", c->name);
+
+ return rc;
+}
+
+/**
+ * i2o_iop_online - Bring a controller online into OPERATIONAL state.
+ * @c: I2O controller
+ *
+ * Send the system table and enable the I2O controller.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_online(struct i2o_controller *c)
+{
+ int rc;
+
+ rc = i2o_iop_systab_set(c);
+ if (rc)
+ return rc;
+
+ /* In READY state */
+ osm_debug("%s: Attempting to enable...\n", c->name);
+ rc = i2o_iop_enable(c);
+ if (rc)
+ return rc;
+
+ return 0;
+};
+
+/**
+ * i2o_iop_remove - Remove the I2O controller from the I2O core
+ * @c: I2O controller
+ *
+ * Remove the I2O controller from the I2O core. If devices are attached to
+ * the controller remove these also and finally reset the controller.
+ */
+void i2o_iop_remove(struct i2o_controller *c)
+{
+ struct i2o_device *dev, *tmp;
+
+ osm_debug("%s: deleting controller\n", c->name);
+
+ i2o_driver_notify_controller_remove_all(c);
+
+ list_del(&c->list);
+
+ list_for_each_entry_safe(dev, tmp, &c->devices, list)
+ i2o_device_remove(dev);
+
+ device_del(&c->device);
+
+ /* Ask the IOP to switch to RESET state */
+ i2o_iop_reset(c);
+}
+
+/**
+ * i2o_systab_build - Build system table
+ *
+ * The system table contains information about all the IOPs in the system
+ * (duh) and is used by the Executives on the IOPs to establish peer2peer
+ * connections. We're not supporting peer2peer at the moment, but this
+ * will be needed down the road for things like lan2lan forwarding.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_systab_build(void)
+{
+ struct i2o_controller *c, *tmp;
+ int num_controllers = 0;
+ u32 change_ind = 0;
+ int count = 0;
+ struct i2o_sys_tbl *systab = i2o_systab.virt;
+
+ list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
+ num_controllers++;
+
+ if (systab) {
+ change_ind = systab->change_ind;
+ kfree(i2o_systab.virt);
+ }
+
+ /* Header + IOPs */
+ i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
+ sizeof(struct i2o_sys_tbl_entry);
+
+ systab = i2o_systab.virt = kzalloc(i2o_systab.len, GFP_KERNEL);
+ if (!systab) {
+ osm_err("unable to allocate memory for System Table\n");
+ return -ENOMEM;
+ }
+
+ systab->version = I2OVERSION;
+ systab->change_ind = change_ind + 1;
+
+ list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
+ i2o_status_block *sb;
+
+ if (count >= num_controllers) {
+ osm_err("controller added while building system table"
+ "\n");
+ break;
+ }
+
+ sb = c->status_block.virt;
+
+ /*
+ * Get updated IOP state so we have the latest information
+ *
+ * We should delete the controller at this point if it
+ * doesn't respond since if it's not on the system table
+ * it is techninically not part of the I2O subsystem...
+ */
+ if (unlikely(i2o_status_get(c))) {
+ osm_err("%s: Deleting b/c could not get status while "
+ "attempting to build system table\n", c->name);
+ i2o_iop_remove(c);
+ continue; // try the next one
+ }
+
+ systab->iops[count].org_id = sb->org_id;
+ systab->iops[count].iop_id = c->unit + 2;
+ systab->iops[count].seg_num = 0;
+ systab->iops[count].i2o_version = sb->i2o_version;
+ systab->iops[count].iop_state = sb->iop_state;
+ systab->iops[count].msg_type = sb->msg_type;
+ systab->iops[count].frame_size = sb->inbound_frame_size;
+ systab->iops[count].last_changed = change_ind;
+ systab->iops[count].iop_capabilities = sb->iop_capabilities;
+ systab->iops[count].inbound_low =
+ i2o_dma_low(c->base.phys + I2O_IN_PORT);
+ systab->iops[count].inbound_high =
+ i2o_dma_high(c->base.phys + I2O_IN_PORT);
+
+ count++;
+ }
+
+ systab->num_entries = count;
+
+ return 0;
+};
+
+/**
+ * i2o_parse_hrt - Parse the hardware resource table.
+ * @c: I2O controller
+ *
+ * We don't do anything with it except dumping it (in debug mode).
+ *
+ * Returns 0.
+ */
+static int i2o_parse_hrt(struct i2o_controller *c)
+{
+ i2o_dump_hrt(c);
+ return 0;
+};
+
+/**
+ * i2o_status_get - Get the status block from the I2O controller
+ * @c: I2O controller
+ *
+ * Issue a status query on the controller. This updates the attached
+ * status block. The status block could then be accessed through
+ * c->status_block.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_status_get(struct i2o_controller *c)
+{
+ struct i2o_message *msg;
+ volatile u8 *status_block;
+ unsigned long timeout;
+
+ status_block = (u8 *) c->status_block.virt;
+ memset(c->status_block.virt, 0, sizeof(i2o_status_block));
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
+ msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
+
+ i2o_msg_post(c, msg);
+
+ /* Wait for a reply */
+ timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
+ while (status_block[87] != 0xFF) {
+ if (time_after(jiffies, timeout)) {
+ osm_err("%s: Get status timeout.\n", c->name);
+ return -ETIMEDOUT;
+ }
+
+ schedule_timeout_uninterruptible(1);
+ }
+
+#ifdef DEBUG
+ i2o_debug_state(c);
+#endif
+
+ return 0;
+}
+
+/*
+ * i2o_hrt_get - Get the Hardware Resource Table from the I2O controller
+ * @c: I2O controller from which the HRT should be fetched
+ *
+ * The HRT contains information about possible hidden devices but is
+ * mostly useless to us.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_hrt_get(struct i2o_controller *c)
+{
+ int rc;
+ int i;
+ i2o_hrt *hrt = c->hrt.virt;
+ u32 size = sizeof(i2o_hrt);
+ struct device *dev = &c->pdev->dev;
+
+ for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
+ msg->body[1] = cpu_to_le32(c->hrt.phys);
+
+ rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
+
+ if (rc < 0) {
+ osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
+ -rc);
+ return rc;
+ }
+
+ size = hrt->num_entries * hrt->entry_len << 2;
+ if (size > c->hrt.len) {
+ if (i2o_dma_realloc(dev, &c->hrt, size))
+ return -ENOMEM;
+ else
+ hrt = c->hrt.virt;
+ } else
+ return i2o_parse_hrt(c);
+ }
+
+ osm_err("%s: Unable to get HRT after %d tries, giving up\n", c->name,
+ I2O_HRT_GET_TRIES);
+
+ return -EBUSY;
+}
+
+/**
+ * i2o_iop_release - release the memory for a I2O controller
+ * @dev: I2O controller which should be released
+ *
+ * Release the allocated memory. This function is called if refcount of
+ * device reaches 0 automatically.
+ */
+static void i2o_iop_release(struct device *dev)
+{
+ struct i2o_controller *c = to_i2o_controller(dev);
+
+ i2o_iop_free(c);
+};
+
+/**
+ * i2o_iop_alloc - Allocate and initialize a i2o_controller struct
+ *
+ * Allocate the necessary memory for a i2o_controller struct and
+ * initialize the lists and message mempool.
+ *
+ * Returns a pointer to the I2O controller or a negative error code on
+ * failure.
+ */
+struct i2o_controller *i2o_iop_alloc(void)
+{
+ static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
+ struct i2o_controller *c;
+ char poolname[32];
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ osm_err("i2o: Insufficient memory to allocate a I2O controller."
+ "\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ c->unit = unit++;
+ sprintf(c->name, "iop%d", c->unit);
+
+ snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
+ if (i2o_pool_alloc
+ (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32),
+ I2O_MSG_INPOOL_MIN)) {
+ kfree(c);
+ return ERR_PTR(-ENOMEM);
+ };
+
+ INIT_LIST_HEAD(&c->devices);
+ spin_lock_init(&c->lock);
+ mutex_init(&c->lct_lock);
+
+ device_initialize(&c->device);
+
+ c->device.release = &i2o_iop_release;
+
+ dev_set_name(&c->device, "iop%d", c->unit);
+
+#if BITS_PER_LONG == 64
+ spin_lock_init(&c->context_list_lock);
+ atomic_set(&c->context_list_counter, 0);
+ INIT_LIST_HEAD(&c->context_list);
+#endif
+
+ return c;
+};
+
+/**
+ * i2o_iop_add - Initialize the I2O controller and add him to the I2O core
+ * @c: controller
+ *
+ * Initialize the I2O controller and if no error occurs add him to the I2O
+ * core.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_iop_add(struct i2o_controller *c)
+{
+ int rc;
+
+ if ((rc = device_add(&c->device))) {
+ osm_err("%s: could not add controller\n", c->name);
+ goto iop_reset;
+ }
+
+ osm_info("%s: Activating I2O controller...\n", c->name);
+ osm_info("%s: This may take a few minutes if there are many devices\n",
+ c->name);
+
+ if ((rc = i2o_iop_activate(c))) {
+ osm_err("%s: could not activate controller\n", c->name);
+ goto device_del;
+ }
+
+ osm_debug("%s: building sys table...\n", c->name);
+
+ if ((rc = i2o_systab_build()))
+ goto device_del;
+
+ osm_debug("%s: online controller...\n", c->name);
+
+ if ((rc = i2o_iop_online(c)))
+ goto device_del;
+
+ osm_debug("%s: getting LCT...\n", c->name);
+
+ if ((rc = i2o_exec_lct_get(c)))
+ goto device_del;
+
+ list_add(&c->list, &i2o_controllers);
+
+ i2o_driver_notify_controller_add_all(c);
+
+ osm_info("%s: Controller added\n", c->name);
+
+ return 0;
+
+ device_del:
+ device_del(&c->device);
+
+ iop_reset:
+ i2o_iop_reset(c);
+
+ return rc;
+};
+
+/**
+ * i2o_event_register - Turn on/off event notification for a I2O device
+ * @dev: I2O device which should receive the event registration request
+ * @drv: driver which want to get notified
+ * @tcntxt: transaction context to use with this notifier
+ * @evt_mask: mask of events
+ *
+ * Create and posts an event registration message to the task. No reply
+ * is waited for, or expected. If you do not want further notifications,
+ * call the i2o_event_register again with a evt_mask of 0.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
+ int tcntxt, u32 evt_mask)
+{
+ struct i2o_controller *c = dev->iop;
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->u.s.icntxt = cpu_to_le32(drv->context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
+ msg->body[0] = cpu_to_le32(evt_mask);
+
+ i2o_msg_post(c, msg);
+
+ return 0;
+};
+
+/**
+ * i2o_iop_init - I2O main initialization function
+ *
+ * Initialize the I2O drivers (OSM) functions, register the Executive OSM,
+ * initialize the I2O PCI part and finally initialize I2O device stuff.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_iop_init(void)
+{
+ int rc = 0;
+
+ printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+ if ((rc = i2o_driver_init()))
+ goto exit;
+
+ if ((rc = i2o_exec_init()))
+ goto driver_exit;
+
+ if ((rc = i2o_pci_init()))
+ goto exec_exit;
+
+ return 0;
+
+ exec_exit:
+ i2o_exec_exit();
+
+ driver_exit:
+ i2o_driver_exit();
+
+ exit:
+ return rc;
+}
+
+/**
+ * i2o_iop_exit - I2O main exit function
+ *
+ * Removes I2O controllers from PCI subsystem and shut down OSMs.
+ */
+static void __exit i2o_iop_exit(void)
+{
+ i2o_pci_exit();
+ i2o_exec_exit();
+ i2o_driver_exit();
+};
+
+module_init(i2o_iop_init);
+module_exit(i2o_iop_exit);
+
+MODULE_AUTHOR("Red Hat Software");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+#if BITS_PER_LONG == 64
+EXPORT_SYMBOL(i2o_cntxt_list_add);
+EXPORT_SYMBOL(i2o_cntxt_list_get);
+EXPORT_SYMBOL(i2o_cntxt_list_remove);
+EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
+#endif
+EXPORT_SYMBOL(i2o_msg_get_wait);
+EXPORT_SYMBOL(i2o_find_iop);
+EXPORT_SYMBOL(i2o_iop_find_device);
+EXPORT_SYMBOL(i2o_event_register);
+EXPORT_SYMBOL(i2o_status_get);
+EXPORT_SYMBOL(i2o_controllers);
diff --git a/drivers/message/i2o/memory.c b/drivers/message/i2o/memory.c
new file mode 100644
index 00000000..292b41e4
--- /dev/null
+++ b/drivers/message/i2o/memory.c
@@ -0,0 +1,313 @@
+/*
+ * Functions to handle I2O memory
+ *
+ * Pulled from the inlines in i2o headers and uninlined
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "core.h"
+
+/* Protects our 32/64bit mask switching */
+static DEFINE_MUTEX(mem_lock);
+
+/**
+ * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
+ * @c: I2O controller for which the calculation should be done
+ * @body_size: maximum body size used for message in 32-bit words.
+ *
+ * Return the maximum number of SG elements in a SG list.
+ */
+u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
+{
+ i2o_status_block *sb = c->status_block.virt;
+ u16 sg_count =
+ (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
+ body_size;
+
+ if (c->pae_support) {
+ /*
+ * for 64-bit a SG attribute element must be added and each
+ * SG element needs 12 bytes instead of 8.
+ */
+ sg_count -= 2;
+ sg_count /= 3;
+ } else
+ sg_count /= 2;
+
+ if (c->short_req && (sg_count > 8))
+ sg_count = 8;
+
+ return sg_count;
+}
+EXPORT_SYMBOL_GPL(i2o_sg_tablesize);
+
+
+/**
+ * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
+ * @c: I2O controller
+ * @ptr: pointer to the data which should be mapped
+ * @size: size of data in bytes
+ * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
+ * @sg_ptr: pointer to the SG list inside the I2O message
+ *
+ * This function does all necessary DMA handling and also writes the I2O
+ * SGL elements into the I2O message. For details on DMA handling see also
+ * dma_map_single(). The pointer sg_ptr will only be set to the end of the
+ * SG list if the allocation was successful.
+ *
+ * Returns DMA address which must be checked for failures using
+ * dma_mapping_error().
+ */
+dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
+ size_t size,
+ enum dma_data_direction direction,
+ u32 ** sg_ptr)
+{
+ u32 sg_flags;
+ u32 *mptr = *sg_ptr;
+ dma_addr_t dma_addr;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ sg_flags = 0xd4000000;
+ break;
+ case DMA_FROM_DEVICE:
+ sg_flags = 0xd0000000;
+ break;
+ default:
+ return 0;
+ }
+
+ dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
+ if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
+#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
+ if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
+ }
+#endif
+
+ *mptr++ = cpu_to_le32(sg_flags | size);
+ *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
+#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
+ if ((sizeof(dma_addr_t) > 4) && c->pae_support)
+ *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
+#endif
+ *sg_ptr = mptr;
+ }
+ return dma_addr;
+}
+EXPORT_SYMBOL_GPL(i2o_dma_map_single);
+
+/**
+ * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
+ * @c: I2O controller
+ * @sg: SG list to be mapped
+ * @sg_count: number of elements in the SG list
+ * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
+ * @sg_ptr: pointer to the SG list inside the I2O message
+ *
+ * This function does all necessary DMA handling and also writes the I2O
+ * SGL elements into the I2O message. For details on DMA handling see also
+ * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
+ * list if the allocation was successful.
+ *
+ * Returns 0 on failure or 1 on success.
+ */
+int i2o_dma_map_sg(struct i2o_controller *c, struct scatterlist *sg,
+ int sg_count, enum dma_data_direction direction, u32 ** sg_ptr)
+{
+ u32 sg_flags;
+ u32 *mptr = *sg_ptr;
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ sg_flags = 0x14000000;
+ break;
+ case DMA_FROM_DEVICE:
+ sg_flags = 0x10000000;
+ break;
+ default:
+ return 0;
+ }
+
+ sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
+ if (!sg_count)
+ return 0;
+
+#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
+ if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
+ }
+#endif
+
+ while (sg_count-- > 0) {
+ if (!sg_count)
+ sg_flags |= 0xC0000000;
+ *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
+ *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
+#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
+ if ((sizeof(dma_addr_t) > 4) && c->pae_support)
+ *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
+#endif
+ sg = sg_next(sg);
+ }
+ *sg_ptr = mptr;
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(i2o_dma_map_sg);
+
+/**
+ * i2o_dma_alloc - Allocate DMA memory
+ * @dev: struct device pointer to the PCI device of the I2O controller
+ * @addr: i2o_dma struct which should get the DMA buffer
+ * @len: length of the new DMA memory
+ *
+ * Allocate a coherent DMA memory and write the pointers into addr.
+ *
+ * Returns 0 on success or -ENOMEM on failure.
+ */
+int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int dma_64 = 0;
+
+ mutex_lock(&mem_lock);
+ if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) {
+ dma_64 = 1;
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ mutex_unlock(&mem_lock);
+ return -ENOMEM;
+ }
+ }
+
+ addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);
+
+ if ((sizeof(dma_addr_t) > 4) && dma_64)
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
+ mutex_unlock(&mem_lock);
+
+ if (!addr->virt)
+ return -ENOMEM;
+
+ memset(addr->virt, 0, len);
+ addr->len = len;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2o_dma_alloc);
+
+
+/**
+ * i2o_dma_free - Free DMA memory
+ * @dev: struct device pointer to the PCI device of the I2O controller
+ * @addr: i2o_dma struct which contains the DMA buffer
+ *
+ * Free a coherent DMA memory and set virtual address of addr to NULL.
+ */
+void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
+{
+ if (addr->virt) {
+ if (addr->phys)
+ dma_free_coherent(dev, addr->len, addr->virt,
+ addr->phys);
+ else
+ kfree(addr->virt);
+ addr->virt = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(i2o_dma_free);
+
+
+/**
+ * i2o_dma_realloc - Realloc DMA memory
+ * @dev: struct device pointer to the PCI device of the I2O controller
+ * @addr: pointer to a i2o_dma struct DMA buffer
+ * @len: new length of memory
+ *
+ * If there was something allocated in the addr, free it first. If len > 0
+ * than try to allocate it and write the addresses back to the addr
+ * structure. If len == 0 set the virtual address to NULL.
+ *
+ * Returns the 0 on success or negative error code on failure.
+ */
+int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len)
+{
+ i2o_dma_free(dev, addr);
+
+ if (len)
+ return i2o_dma_alloc(dev, addr, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i2o_dma_realloc);
+
+/*
+ * i2o_pool_alloc - Allocate an slab cache and mempool
+ * @mempool: pointer to struct i2o_pool to write data into.
+ * @name: name which is used to identify cache
+ * @size: size of each object
+ * @min_nr: minimum number of objects
+ *
+ * First allocates a slab cache with name and size. Then allocates a
+ * mempool which uses the slab cache for allocation and freeing.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
+ size_t size, int min_nr)
+{
+ pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!pool->name)
+ goto exit;
+ strcpy(pool->name, name);
+
+ pool->slab =
+ kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!pool->slab)
+ goto free_name;
+
+ pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
+ if (!pool->mempool)
+ goto free_slab;
+
+ return 0;
+
+free_slab:
+ kmem_cache_destroy(pool->slab);
+
+free_name:
+ kfree(pool->name);
+
+exit:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(i2o_pool_alloc);
+
+/*
+ * i2o_pool_free - Free slab cache and mempool again
+ * @mempool: pointer to struct i2o_pool which should be freed
+ *
+ * Note that you have to return all objects to the mempool again before
+ * calling i2o_pool_free().
+ */
+void i2o_pool_free(struct i2o_pool *pool)
+{
+ mempool_destroy(pool->mempool);
+ kmem_cache_destroy(pool->slab);
+ kfree(pool->name);
+};
+EXPORT_SYMBOL_GPL(i2o_pool_free);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
new file mode 100644
index 00000000..73e4658a
--- /dev/null
+++ b/drivers/message/i2o/pci.c
@@ -0,0 +1,497 @@
+/*
+ * PCI handling of I2O controller
+ *
+ * Copyright (C) 1999-2002 Red Hat Software
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * A lot of the I2O message side code from this is taken from the Red
+ * Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ * Fixes/additions:
+ * Philipp Rumpf
+ * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ * Deepak Saxena <deepak@plexity.net>
+ * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>:
+ * Ported to Linux 2.5.
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Minor fixes for 2.6.
+ * Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ * Support for sysfs included.
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/i2o.h>
+#include "core.h"
+
+#define OSM_DESCRIPTION "I2O-subsystem"
+
+/* PCI device id table for all I2O controllers */
+static struct pci_device_id __devinitdata i2o_pci_ids[] = {
+ {PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
+ {PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
+ {.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962,
+ .subvendor = PCI_VENDOR_ID_PROMISE,.subdevice = PCI_ANY_ID},
+ {0}
+};
+
+/**
+ * i2o_pci_free - Frees the DMA memory for the I2O controller
+ * @c: I2O controller to free
+ *
+ * Remove all allocated DMA memory and unmap memory IO regions. If MTRR
+ * is enabled, also remove it again.
+ */
+static void i2o_pci_free(struct i2o_controller *c)
+{
+ struct device *dev;
+
+ dev = &c->pdev->dev;
+
+ i2o_dma_free(dev, &c->out_queue);
+ i2o_dma_free(dev, &c->status_block);
+ kfree(c->lct);
+ i2o_dma_free(dev, &c->dlct);
+ i2o_dma_free(dev, &c->hrt);
+ i2o_dma_free(dev, &c->status);
+
+ if (c->raptor && c->in_queue.virt)
+ iounmap(c->in_queue.virt);
+
+ if (c->base.virt)
+ iounmap(c->base.virt);
+
+ pci_release_regions(c->pdev);
+}
+
+/**
+ * i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller
+ * @c: I2O controller
+ *
+ * Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All
+ * IO mappings are also done here. If MTRR is enabled, also do add memory
+ * regions here.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __devinit i2o_pci_alloc(struct i2o_controller *c)
+{
+ struct pci_dev *pdev = c->pdev;
+ struct device *dev = &pdev->dev;
+ int i;
+
+ if (pci_request_regions(pdev, OSM_DESCRIPTION)) {
+ printk(KERN_ERR "%s: device already claimed\n", c->name);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < 6; i++) {
+ /* Skip I/O spaces */
+ if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
+ if (!c->base.phys) {
+ c->base.phys = pci_resource_start(pdev, i);
+ c->base.len = pci_resource_len(pdev, i);
+
+ /*
+ * If we know what card it is, set the size
+ * correctly. Code is taken from dpt_i2o.c
+ */
+ if (pdev->device == 0xa501) {
+ if (pdev->subsystem_device >= 0xc032 &&
+ pdev->subsystem_device <= 0xc03b) {
+ if (c->base.len > 0x400000)
+ c->base.len = 0x400000;
+ } else {
+ if (c->base.len > 0x100000)
+ c->base.len = 0x100000;
+ }
+ }
+ if (!c->raptor)
+ break;
+ } else {
+ c->in_queue.phys = pci_resource_start(pdev, i);
+ c->in_queue.len = pci_resource_len(pdev, i);
+ break;
+ }
+ }
+ }
+
+ if (i == 6) {
+ printk(KERN_ERR "%s: I2O controller has no memory regions"
+ " defined.\n", c->name);
+ i2o_pci_free(c);
+ return -EINVAL;
+ }
+
+ /* Map the I2O controller */
+ if (c->raptor) {
+ printk(KERN_INFO "%s: PCI I2O controller\n", c->name);
+ printk(KERN_INFO " BAR0 at 0x%08lX size=%ld\n",
+ (unsigned long)c->base.phys, (unsigned long)c->base.len);
+ printk(KERN_INFO " BAR1 at 0x%08lX size=%ld\n",
+ (unsigned long)c->in_queue.phys,
+ (unsigned long)c->in_queue.len);
+ } else
+ printk(KERN_INFO "%s: PCI I2O controller at %08lX size=%ld\n",
+ c->name, (unsigned long)c->base.phys,
+ (unsigned long)c->base.len);
+
+ c->base.virt = ioremap_nocache(c->base.phys, c->base.len);
+ if (!c->base.virt) {
+ printk(KERN_ERR "%s: Unable to map controller.\n", c->name);
+ i2o_pci_free(c);
+ return -ENOMEM;
+ }
+
+ if (c->raptor) {
+ c->in_queue.virt =
+ ioremap_nocache(c->in_queue.phys, c->in_queue.len);
+ if (!c->in_queue.virt) {
+ printk(KERN_ERR "%s: Unable to map controller.\n",
+ c->name);
+ i2o_pci_free(c);
+ return -ENOMEM;
+ }
+ } else
+ c->in_queue = c->base;
+
+ c->irq_status = c->base.virt + I2O_IRQ_STATUS;
+ c->irq_mask = c->base.virt + I2O_IRQ_MASK;
+ c->in_port = c->base.virt + I2O_IN_PORT;
+ c->out_port = c->base.virt + I2O_OUT_PORT;
+
+ /* Motorola/Freescale chip does not follow spec */
+ if (pdev->vendor == PCI_VENDOR_ID_MOTOROLA && pdev->device == 0x18c0) {
+ /* Check if CPU is enabled */
+ if (be32_to_cpu(readl(c->base.virt + 0x10000)) & 0x10000000) {
+ printk(KERN_INFO "%s: MPC82XX needs CPU running to "
+ "service I2O.\n", c->name);
+ i2o_pci_free(c);
+ return -ENODEV;
+ } else {
+ c->irq_status += I2O_MOTOROLA_PORT_OFFSET;
+ c->irq_mask += I2O_MOTOROLA_PORT_OFFSET;
+ c->in_port += I2O_MOTOROLA_PORT_OFFSET;
+ c->out_port += I2O_MOTOROLA_PORT_OFFSET;
+ printk(KERN_INFO "%s: MPC82XX workarounds activated.\n",
+ c->name);
+ }
+ }
+
+ if (i2o_dma_alloc(dev, &c->status, 8)) {
+ i2o_pci_free(c);
+ return -ENOMEM;
+ }
+
+ if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt))) {
+ i2o_pci_free(c);
+ return -ENOMEM;
+ }
+
+ if (i2o_dma_alloc(dev, &c->dlct, 8192)) {
+ i2o_pci_free(c);
+ return -ENOMEM;
+ }
+
+ if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block))) {
+ i2o_pci_free(c);
+ return -ENOMEM;
+ }
+
+ if (i2o_dma_alloc(dev, &c->out_queue,
+ I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE *
+ sizeof(u32))) {
+ i2o_pci_free(c);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, c);
+
+ return 0;
+}
+
+/**
+ * i2o_pci_interrupt - Interrupt handler for I2O controller
+ * @irq: interrupt line
+ * @dev_id: pointer to the I2O controller
+ *
+ * Handle an interrupt from a PCI based I2O controller. This turns out
+ * to be rather simple. We keep the controller pointer in the cookie.
+ */
+static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id)
+{
+ struct i2o_controller *c = dev_id;
+ u32 m;
+ irqreturn_t rc = IRQ_NONE;
+
+ while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) {
+ m = readl(c->out_port);
+ if (m == I2O_QUEUE_EMPTY) {
+ /*
+ * Old 960 steppings had a bug in the I2O unit that
+ * caused the queue to appear empty when it wasn't.
+ */
+ m = readl(c->out_port);
+ if (unlikely(m == I2O_QUEUE_EMPTY))
+ break;
+ }
+
+ /* dispatch it */
+ if (i2o_driver_dispatch(c, m))
+ /* flush it if result != 0 */
+ i2o_flush_reply(c, m);
+
+ rc = IRQ_HANDLED;
+ }
+
+ return rc;
+}
+
+/**
+ * i2o_pci_irq_enable - Allocate interrupt for I2O controller
+ * @c: i2o_controller that the request is for
+ *
+ * Allocate an interrupt for the I2O controller, and activate interrupts
+ * on the I2O controller.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int i2o_pci_irq_enable(struct i2o_controller *c)
+{
+ struct pci_dev *pdev = c->pdev;
+ int rc;
+
+ writel(0xffffffff, c->irq_mask);
+
+ if (pdev->irq) {
+ rc = request_irq(pdev->irq, i2o_pci_interrupt, IRQF_SHARED,
+ c->name, c);
+ if (rc < 0) {
+ printk(KERN_ERR "%s: unable to allocate interrupt %d."
+ "\n", c->name, pdev->irq);
+ return rc;
+ }
+ }
+
+ writel(0x00000000, c->irq_mask);
+
+ printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq);
+
+ return 0;
+}
+
+/**
+ * i2o_pci_irq_disable - Free interrupt for I2O controller
+ * @c: I2O controller
+ *
+ * Disable interrupts in I2O controller and then free interrupt.
+ */
+static void i2o_pci_irq_disable(struct i2o_controller *c)
+{
+ writel(0xffffffff, c->irq_mask);
+
+ if (c->pdev->irq > 0)
+ free_irq(c->pdev->irq, c);
+}
+
+/**
+ * i2o_pci_probe - Probe the PCI device for an I2O controller
+ * @pdev: PCI device to test
+ * @id: id which matched with the PCI device id table
+ *
+ * Probe the PCI device for any device which is a memory of the
+ * Intelligent, I2O class or an Adaptec Zero Channel Controller. We
+ * attempt to set up each such device and register it with the core.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static int __devinit i2o_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct i2o_controller *c;
+ int rc;
+ struct pci_dev *i960 = NULL;
+
+ printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
+
+ if ((pdev->class & 0xff) > 1) {
+ printk(KERN_WARNING "i2o: %s does not support I2O 1.5 "
+ "(skipping).\n", pci_name(pdev));
+ return -ENODEV;
+ }
+
+ if ((rc = pci_enable_device(pdev))) {
+ printk(KERN_WARNING "i2o: couldn't enable device %s\n",
+ pci_name(pdev));
+ return rc;
+ }
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ printk(KERN_WARNING "i2o: no suitable DMA found for %s\n",
+ pci_name(pdev));
+ rc = -ENODEV;
+ goto disable;
+ }
+
+ pci_set_master(pdev);
+
+ c = i2o_iop_alloc();
+ if (IS_ERR(c)) {
+ printk(KERN_ERR "i2o: couldn't allocate memory for %s\n",
+ pci_name(pdev));
+ rc = PTR_ERR(c);
+ goto disable;
+ } else
+ printk(KERN_INFO "%s: controller found (%s)\n", c->name,
+ pci_name(pdev));
+
+ c->pdev = pdev;
+ c->device.parent = &pdev->dev;
+
+ /* Cards that fall apart if you hit them with large I/O loads... */
+ if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
+ c->short_req = 1;
+ printk(KERN_INFO "%s: Symbios FC920 workarounds activated.\n",
+ c->name);
+ }
+
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) {
+ /*
+ * Expose the ship behind i960 for initialization, or it will
+ * failed
+ */
+ i960 = pci_get_slot(c->pdev->bus,
+ PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
+
+ if (i960) {
+ pci_write_config_word(i960, 0x42, 0);
+ pci_dev_put(i960);
+ }
+
+ c->promise = 1;
+ c->limit_sectors = 1;
+ }
+
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_DPT)
+ c->adaptec = 1;
+
+ /* Cards that go bananas if you quiesce them before you reset them. */
+ if (pdev->vendor == PCI_VENDOR_ID_DPT) {
+ c->no_quiesce = 1;
+ if (pdev->device == 0xa511)
+ c->raptor = 1;
+
+ if (pdev->subsystem_device == 0xc05a) {
+ c->limit_sectors = 1;
+ printk(KERN_INFO
+ "%s: limit sectors per request to %d\n", c->name,
+ I2O_MAX_SECTORS_LIMITED);
+ }
+#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
+ if (sizeof(dma_addr_t) > 4) {
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+ printk(KERN_INFO "%s: 64-bit DMA unavailable\n",
+ c->name);
+ else {
+ c->pae_support = 1;
+ printk(KERN_INFO "%s: using 64-bit DMA\n",
+ c->name);
+ }
+ }
+#endif
+ }
+
+ if ((rc = i2o_pci_alloc(c))) {
+ printk(KERN_ERR "%s: DMA / IO allocation for I2O controller "
+ "failed\n", c->name);
+ goto free_controller;
+ }
+
+ if (i2o_pci_irq_enable(c)) {
+ printk(KERN_ERR "%s: unable to enable interrupts for I2O "
+ "controller\n", c->name);
+ goto free_pci;
+ }
+
+ if ((rc = i2o_iop_add(c)))
+ goto uninstall;
+
+ if (i960)
+ pci_write_config_word(i960, 0x42, 0x03ff);
+
+ return 0;
+
+ uninstall:
+ i2o_pci_irq_disable(c);
+
+ free_pci:
+ i2o_pci_free(c);
+
+ free_controller:
+ i2o_iop_free(c);
+
+ disable:
+ pci_disable_device(pdev);
+
+ return rc;
+}
+
+/**
+ * i2o_pci_remove - Removes a I2O controller from the system
+ * @pdev: I2O controller which should be removed
+ *
+ * Reset the I2O controller, disable interrupts and remove all allocated
+ * resources.
+ */
+static void __devexit i2o_pci_remove(struct pci_dev *pdev)
+{
+ struct i2o_controller *c;
+ c = pci_get_drvdata(pdev);
+
+ i2o_iop_remove(c);
+ i2o_pci_irq_disable(c);
+ i2o_pci_free(c);
+
+ pci_disable_device(pdev);
+
+ printk(KERN_INFO "%s: Controller removed.\n", c->name);
+
+ put_device(&c->device);
+};
+
+/* PCI driver for I2O controller */
+static struct pci_driver i2o_pci_driver = {
+ .name = "PCI_I2O",
+ .id_table = i2o_pci_ids,
+ .probe = i2o_pci_probe,
+ .remove = __devexit_p(i2o_pci_remove),
+};
+
+/**
+ * i2o_pci_init - registers I2O PCI driver in PCI subsystem
+ *
+ * Returns > 0 on success or negative error code on failure.
+ */
+int __init i2o_pci_init(void)
+{
+ return pci_register_driver(&i2o_pci_driver);
+};
+
+/**
+ * i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem
+ */
+void __exit i2o_pci_exit(void)
+{
+ pci_unregister_driver(&i2o_pci_driver);
+};
+
+MODULE_DEVICE_TABLE(pci, i2o_pci_ids);