From 870678baa7f8b9a65945237704d84d7724f42459 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 20 Jun 2016 15:28:36 +0200 Subject: target: socfpga: Add support for QSPI NOR boot Add necessary kernel backports to support the Cadence QSPI controller present on the Altera SoCFPGA SoC. Signed-off-by: Marek Vasut --- ...-Add-driver-for-Cadence-Quad-SPI-Flash-Co.patch | 1431 ++++++++++++++++++++ 1 file changed, 1431 insertions(+) create mode 100644 target/linux/socfpga/patches-4.4/0031-mtd-spi-nor-Add-driver-for-Cadence-Quad-SPI-Flash-Co.patch (limited to 'target/linux/socfpga/patches-4.4/0031-mtd-spi-nor-Add-driver-for-Cadence-Quad-SPI-Flash-Co.patch') diff --git a/target/linux/socfpga/patches-4.4/0031-mtd-spi-nor-Add-driver-for-Cadence-Quad-SPI-Flash-Co.patch b/target/linux/socfpga/patches-4.4/0031-mtd-spi-nor-Add-driver-for-Cadence-Quad-SPI-Flash-Co.patch new file mode 100644 index 0000000000..1a55ee2dd6 --- /dev/null +++ b/target/linux/socfpga/patches-4.4/0031-mtd-spi-nor-Add-driver-for-Cadence-Quad-SPI-Flash-Co.patch @@ -0,0 +1,1431 @@ +From 30e33517815b3c518fc2483a23bfe1445c0ae92d Mon Sep 17 00:00:00 2001 +From: Graham Moore +Date: Tue, 28 Jul 2015 12:38:03 -0500 +Subject: [PATCH 31/33] mtd: spi-nor: Add driver for Cadence Quad SPI Flash + Controller. + +Add support for the Cadence QSPI controller. This controller is +present in the Altera SoCFPGA SoCs and this driver has been tested +on the Cyclone V SoC. + +Signed-off-by: Graham Moore +Signed-off-by: Marek Vasut +Cc: Alan Tull +Cc: Brian Norris +Cc: David Woodhouse +Cc: Dinh Nguyen +Cc: Graham Moore +Cc: Vignesh R +Cc: Yves Vandervennet +Cc: devicetree@vger.kernel.org + +V2: use NULL instead of modalias in spi_nor_scan call +V3: Use existing property is-decoded-cs instead of creating duplicate. +V4: Support Micron quad mode by snooping command stream for EVCR command + and subsequently configuring Cadence controller for quad mode. +V5: Clean up sparse and smatch complaints. Remove snooping of Micron + quad mode. Add comment on XIP mode bit and dummy clock cycles. Set + up SRAM partition at 1:1 during init. +V6: Remove dts patch that was included by mistake. Incorporate Vikas's + comments regarding fifo width, SRAM partition setting, and trigger + address. Trigger address was added as an unsigned int, as it is not + an IO resource per se, and does not need to be mapped. Also add + Marek Vasut's workaround for picking up OF properties on subnodes. +V7: - Perform coding-style cleanup and type fixes. Remove ugly QSPI_*() + macros and replace them with functions. Get rid of unused variables. + - Implement support for nor->set_protocol() to handle Quad-command, + this patch now depends on the following patch: + mtd: spi-nor: notify (Q)SPI controller about protocol change + - Replace that cqspi_fifo_read() disaster with plain old readsl() + and cqspi_fifo_write() tentacle horror with pretty writesl(). + - Remove CQSPI_SUPPORT_XIP_CHIPS, which is broken. + - Get rid of cqspi_find_chipselect() mess, instead just place the + struct cqspi_st and chipselect number into struct cqspi_flash_pdata + and set nor->priv to the struct cqspi_flash_pdata of that particular + chip. + - Replace the odd math in calculate_ticks_for_ns() with DIV_ROUND_UP(). + - Make variables const where applicable. +V8: - Implement a function to wait for bit being set/unset for a given + period of time and use it to replace the ad-hoc bits of code. + - Configure the write underflow watermark to be 1/8 if FIFO size. + - Extract out the SPI NOR flash probing code into separate function + to clearly mark what will soon be considered a boilerplate code. + - Repair the handling of mode bits, which caused instability in V7. + - Clean up the interrupt handling + - Fix Kconfig help text and make the patch depend on OF and COMPILE_TEST. +V9: - Rename CQSPI_REG_IRQ_IND_RD_OVERFLOW to CQSPI_REG_IRQ_IND_SRAM_FULL + - Merge cqspi_controller_disable() into cqspi_controller_enable() and + make the mode selectable via parameter. +V10: - Update against Cyrille's new patchset and changes to linux-mtd. + - Repair problem with multiple QSPI NOR devices having the same mtd->name, + they are now named devname.cs , where cs is the chipselect ID. +V11: - Replace dependency on ARCH_SOCFPGA with dependency on ARM +--- + drivers/mtd/spi-nor/Kconfig | 11 + + drivers/mtd/spi-nor/Makefile | 1 + + drivers/mtd/spi-nor/cadence-quadspi.c | 1324 +++++++++++++++++++++++++++++++++ + 3 files changed, 1336 insertions(+) + create mode 100644 drivers/mtd/spi-nor/cadence-quadspi.c + +diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig +index 2fe2a7e..02082ae 100644 +--- a/drivers/mtd/spi-nor/Kconfig ++++ b/drivers/mtd/spi-nor/Kconfig +@@ -41,4 +41,15 @@ config SPI_NXP_SPIFI + Flash. Enable this option if you have a device with a SPIFI + controller and want to access the Flash as a mtd device. + ++config SPI_CADENCE_QUADSPI ++ tristate "Cadence Quad SPI controller" ++ depends on OF && (ARM || COMPILE_TEST) ++ help ++ Enable support for the Cadence Quad SPI Flash controller. ++ ++ Cadence QSPI is a specialized controller for connecting an SPI ++ Flash over 1/2/4-bit wide bus. Enable this option if you have a ++ device with a Cadence QSPI controller and want to access the ++ Flash as an MTD device. ++ + endif # MTD_SPI_NOR +diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile +index e53333e..446c6b9 100644 +--- a/drivers/mtd/spi-nor/Makefile ++++ b/drivers/mtd/spi-nor/Makefile +@@ -1,3 +1,4 @@ + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o ++obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o + obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o + obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o +diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c +new file mode 100644 +index 0000000..7e61fba +--- /dev/null ++++ b/drivers/mtd/spi-nor/cadence-quadspi.c +@@ -0,0 +1,1324 @@ ++/* ++ * Driver for Cadence QSPI Controller ++ * ++ * Copyright Altera Corporation (C) 2012-2014. All rights reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms and conditions of the GNU General Public License, ++ * version 2, as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ * more details. ++ * ++ * You should have received a copy of the GNU General Public License along with ++ * this program. If not, see . ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define CQSPI_NAME "cadence-qspi" ++#define CQSPI_MAX_CHIPSELECT 16 ++ ++struct cqspi_st; ++ ++struct cqspi_flash_pdata { ++ struct spi_nor nor; ++ struct cqspi_st *cqspi; ++ u32 clk_rate; ++ u32 read_delay; ++ u32 tshsl_ns; ++ u32 tsd2d_ns; ++ u32 tchsh_ns; ++ u32 tslch_ns; ++ u8 inst_width; ++ u8 addr_width; ++ u8 cs; ++}; ++ ++struct cqspi_st { ++ struct platform_device *pdev; ++ ++ struct clk *clk; ++ unsigned int sclk; ++ ++ void __iomem *iobase; ++ void __iomem *ahb_base; ++ struct completion transfer_complete; ++ struct mutex bus_mutex; ++ ++ int current_cs; ++ unsigned long master_ref_clk_hz; ++ bool is_decoded_cs; ++ u32 fifo_depth; ++ u32 fifo_width; ++ u32 trigger_address; ++ struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; ++}; ++ ++/* Operation timeout value */ ++#define CQSPI_TIMEOUT_MS 500 ++#define CQSPI_READ_TIMEOUT_MS 10 ++ ++/* Instruction type */ ++#define CQSPI_INST_TYPE_SINGLE 0 ++#define CQSPI_INST_TYPE_DUAL 1 ++#define CQSPI_INST_TYPE_QUAD 2 ++ ++#define CQSPI_DUMMY_CLKS_PER_BYTE 8 ++#define CQSPI_DUMMY_BYTES_MAX 4 ++#define CQSPI_DUMMY_CLKS_MAX 31 ++ ++#define CQSPI_STIG_DATA_LEN_MAX 8 ++ ++/* Register map */ ++#define CQSPI_REG_CONFIG 0x00 ++#define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) ++#define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) ++#define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 ++#define CQSPI_REG_CONFIG_DMA_MASK BIT(15) ++#define CQSPI_REG_CONFIG_BAUD_LSB 19 ++#define CQSPI_REG_CONFIG_IDLE_LSB 31 ++#define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF ++#define CQSPI_REG_CONFIG_BAUD_MASK 0xF ++ ++#define CQSPI_REG_RD_INSTR 0x04 ++#define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 ++#define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 ++#define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 ++#define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 ++#define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 ++#define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 ++#define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 ++#define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 ++#define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 ++#define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F ++ ++#define CQSPI_REG_WR_INSTR 0x08 ++#define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 ++#define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 ++#define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 ++ ++#define CQSPI_REG_DELAY 0x0C ++#define CQSPI_REG_DELAY_TSLCH_LSB 0 ++#define CQSPI_REG_DELAY_TCHSH_LSB 8 ++#define CQSPI_REG_DELAY_TSD2D_LSB 16 ++#define CQSPI_REG_DELAY_TSHSL_LSB 24 ++#define CQSPI_REG_DELAY_TSLCH_MASK 0xFF ++#define CQSPI_REG_DELAY_TCHSH_MASK 0xFF ++#define CQSPI_REG_DELAY_TSD2D_MASK 0xFF ++#define CQSPI_REG_DELAY_TSHSL_MASK 0xFF ++ ++#define CQSPI_REG_READCAPTURE 0x10 ++#define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 ++#define CQSPI_REG_READCAPTURE_DELAY_LSB 1 ++#define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF ++ ++#define CQSPI_REG_SIZE 0x14 ++#define CQSPI_REG_SIZE_ADDRESS_LSB 0 ++#define CQSPI_REG_SIZE_PAGE_LSB 4 ++#define CQSPI_REG_SIZE_BLOCK_LSB 16 ++#define CQSPI_REG_SIZE_ADDRESS_MASK 0xF ++#define CQSPI_REG_SIZE_PAGE_MASK 0xFFF ++#define CQSPI_REG_SIZE_BLOCK_MASK 0x3F ++ ++#define CQSPI_REG_SRAMPARTITION 0x18 ++#define CQSPI_REG_INDIRECTTRIGGER 0x1C ++ ++#define CQSPI_REG_DMA 0x20 ++#define CQSPI_REG_DMA_SINGLE_LSB 0 ++#define CQSPI_REG_DMA_BURST_LSB 8 ++#define CQSPI_REG_DMA_SINGLE_MASK 0xFF ++#define CQSPI_REG_DMA_BURST_MASK 0xFF ++ ++#define CQSPI_REG_REMAP 0x24 ++#define CQSPI_REG_MODE_BIT 0x28 ++ ++#define CQSPI_REG_SDRAMLEVEL 0x2C ++#define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 ++#define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 ++#define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF ++#define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF ++ ++#define CQSPI_REG_IRQSTATUS 0x40 ++#define CQSPI_REG_IRQMASK 0x44 ++ ++#define CQSPI_REG_INDIRECTRD 0x60 ++#define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) ++#define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) ++#define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) ++ ++#define CQSPI_REG_INDIRECTRDWATERMARK 0x64 ++#define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 ++#define CQSPI_REG_INDIRECTRDBYTES 0x6C ++ ++#define CQSPI_REG_CMDCTRL 0x90 ++#define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) ++#define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) ++#define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 ++#define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 ++#define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 ++#define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 ++#define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 ++#define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 ++#define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 ++#define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 ++#define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 ++#define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 ++ ++#define CQSPI_REG_INDIRECTWR 0x70 ++#define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) ++#define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) ++#define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) ++ ++#define CQSPI_REG_INDIRECTWRWATERMARK 0x74 ++#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 ++#define CQSPI_REG_INDIRECTWRBYTES 0x7C ++ ++#define CQSPI_REG_CMDADDRESS 0x94 ++#define CQSPI_REG_CMDREADDATALOWER 0xA0 ++#define CQSPI_REG_CMDREADDATAUPPER 0xA4 ++#define CQSPI_REG_CMDWRITEDATALOWER 0xA8 ++#define CQSPI_REG_CMDWRITEDATAUPPER 0xAC ++ ++/* Interrupt status bits */ ++#define CQSPI_REG_IRQ_MODE_ERR BIT(0) ++#define CQSPI_REG_IRQ_UNDERFLOW BIT(1) ++#define CQSPI_REG_IRQ_IND_COMP BIT(2) ++#define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) ++#define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) ++#define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) ++#define CQSPI_REG_IRQ_WATERMARK BIT(6) ++#define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) ++ ++#define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ ++ CQSPI_REG_IRQ_IND_SRAM_FULL | \ ++ CQSPI_REG_IRQ_IND_COMP) ++ ++#define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ ++ CQSPI_REG_IRQ_WATERMARK | \ ++ CQSPI_REG_IRQ_UNDERFLOW) ++ ++#define CQSPI_IRQ_STATUS_MASK 0x1FFFF ++ ++static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear) ++{ ++ unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); ++ u32 val; ++ ++ while (1) { ++ val = readl(reg); ++ if (clear) ++ val = ~val; ++ val &= mask; ++ ++ if (val == mask) ++ return 0; ++ ++ if (time_after(jiffies, end)) ++ return -ETIMEDOUT; ++ } ++} ++ ++static bool cqspi_is_idle(struct cqspi_st *cqspi) ++{ ++ u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); ++ ++ return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB); ++} ++ ++static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) ++{ ++ u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); ++ ++ reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; ++ return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; ++} ++ ++static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) ++{ ++ struct cqspi_st *cqspi = dev; ++ unsigned int irq_status; ++ ++ /* Read interrupt status */ ++ irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); ++ ++ /* Clear interrupt */ ++ writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); ++ ++ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; ++ ++ if (irq_status) ++ complete(&cqspi->transfer_complete); ++ ++ return IRQ_HANDLED; ++} ++ ++static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode) ++{ ++ unsigned int rdreg = 0; ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ ++ rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; ++ rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; ++ ++ if (nor->flash_read == SPI_NOR_QUAD) ++ rdreg |= CQSPI_INST_TYPE_QUAD ++ << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; ++ return rdreg; ++} ++ ++static int cqspi_wait_idle(struct cqspi_st *cqspi) ++{ ++ const unsigned int poll_idle_retry = 3; ++ unsigned int count = 0; ++ unsigned long timeout; ++ ++ timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); ++ while (1) { ++ /* ++ * Read few times in succession to ensure the controller ++ * is indeed idle, that is, the bit does not transition ++ * low again. ++ */ ++ if (cqspi_is_idle(cqspi)) ++ count++; ++ else ++ count = 0; ++ ++ if (count >= poll_idle_retry) ++ return 0; ++ ++ if (time_after(jiffies, timeout)) { ++ /* Timeout, in busy mode. */ ++ dev_err(&cqspi->pdev->dev, ++ "QSPI is still busy after %dms timeout.\n", ++ CQSPI_TIMEOUT_MS); ++ return -ETIMEDOUT; ++ } ++ ++ cpu_relax(); ++ } ++} ++ ++static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) ++{ ++ void __iomem *reg_base = cqspi->iobase; ++ int ret; ++ ++ /* Write the CMDCTRL without start execution. */ ++ writel(reg, reg_base + CQSPI_REG_CMDCTRL); ++ /* Start execute */ ++ reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; ++ writel(reg, reg_base + CQSPI_REG_CMDCTRL); ++ ++ /* Polling for completion. */ ++ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL, ++ CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1); ++ if (ret) { ++ dev_err(&cqspi->pdev->dev, ++ "Flash command execution timed out.\n"); ++ return ret; ++ } ++ ++ /* Polling QSPI idle status. */ ++ return cqspi_wait_idle(cqspi); ++} ++ ++static int cqspi_command_read(struct spi_nor *nor, ++ const u8 *txbuf, const unsigned n_tx, ++ u8 *rxbuf, const unsigned n_rx) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int rdreg; ++ unsigned int reg; ++ unsigned int read_len; ++ int status; ++ ++ if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { ++ dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n", ++ n_rx, rxbuf); ++ return -EINVAL; ++ } ++ ++ reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB; ++ ++ rdreg = cqspi_calc_rdreg(nor, txbuf[0]); ++ writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); ++ ++ reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); ++ ++ /* 0 means 1 byte. */ ++ reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) ++ << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); ++ status = cqspi_exec_flash_cmd(cqspi, reg); ++ if (status) ++ return status; ++ ++ reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); ++ ++ /* Put the read value into rx_buf */ ++ read_len = (n_rx > 4) ? 4 : n_rx; ++ memcpy(rxbuf, ®, read_len); ++ rxbuf += read_len; ++ ++ if (n_rx > 4) { ++ reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); ++ ++ read_len = n_rx - read_len; ++ memcpy(rxbuf, ®, read_len); ++ } ++ ++ return 0; ++} ++ ++static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, ++ const u8 *txbuf, const unsigned n_tx) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int reg; ++ unsigned int data; ++ int ret; ++ ++ if (n_tx > 4 || (n_tx && !txbuf)) { ++ dev_err(nor->dev, ++ "Invalid input argument, cmdlen %d txbuf 0x%p\n", ++ n_tx, txbuf); ++ return -EINVAL; ++ } ++ ++ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; ++ if (n_tx) { ++ reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); ++ reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) ++ << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; ++ data = 0; ++ memcpy(&data, txbuf, n_tx); ++ writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); ++ } ++ ++ ret = cqspi_exec_flash_cmd(cqspi, reg); ++ return ret; ++} ++ ++static int cqspi_command_write_addr(struct spi_nor *nor, ++ const u8 opcode, const unsigned int addr) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int reg; ++ ++ reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; ++ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); ++ reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) ++ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; ++ ++ writel(addr, reg_base + CQSPI_REG_CMDADDRESS); ++ ++ return cqspi_exec_flash_cmd(cqspi, reg); ++} ++ ++static int cqspi_indirect_read_setup(struct spi_nor *nor, ++ const unsigned int from_addr) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int dummy_clk = 0; ++ unsigned int reg; ++ ++ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); ++ ++ reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; ++ reg |= cqspi_calc_rdreg(nor, nor->read_opcode); ++ ++ /* Setup dummy clock cycles */ ++ dummy_clk = nor->read_dummy; ++ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) ++ dummy_clk = CQSPI_DUMMY_CLKS_MAX; ++ ++ if (dummy_clk / 8) { ++ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); ++ /* Set mode bits high to ensure chip doesn't enter XIP */ ++ writel(0xFF, reg_base + CQSPI_REG_MODE_BIT); ++ ++ /* Need to subtract the mode byte (8 clocks). */ ++ if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD) ++ dummy_clk -= 8; ++ ++ if (dummy_clk) ++ reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) ++ << CQSPI_REG_RD_INSTR_DUMMY_LSB; ++ } ++ ++ writel(reg, reg_base + CQSPI_REG_RD_INSTR); ++ ++ /* Set address width */ ++ reg = readl(reg_base + CQSPI_REG_SIZE); ++ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; ++ reg |= (nor->addr_width - 1); ++ writel(reg, reg_base + CQSPI_REG_SIZE); ++ return 0; ++} ++ ++static int cqspi_indirect_read_execute(struct spi_nor *nor, ++ u8 *rxbuf, const unsigned n_rx) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ void __iomem *ahb_base = cqspi->ahb_base; ++ unsigned int remaining = n_rx; ++ unsigned int bytes_to_read = 0; ++ int ret = 0; ++ ++ writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); ++ ++ /* Clear all interrupts. */ ++ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); ++ ++ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); ++ ++ reinit_completion(&cqspi->transfer_complete); ++ writel(CQSPI_REG_INDIRECTRD_START_MASK, ++ reg_base + CQSPI_REG_INDIRECTRD); ++ ++ while (remaining > 0) { ++ ret = wait_for_completion_timeout(&cqspi->transfer_complete, ++ msecs_to_jiffies ++ (CQSPI_READ_TIMEOUT_MS)); ++ ++ bytes_to_read = cqspi_get_rd_sram_level(cqspi); ++ ++ if (!ret && bytes_to_read == 0) { ++ dev_err(nor->dev, "Indirect read timeout, no bytes\n"); ++ ret = -ETIMEDOUT; ++ goto failrd; ++ } ++ ++ while (bytes_to_read != 0) { ++ bytes_to_read *= cqspi->fifo_width; ++ bytes_to_read = bytes_to_read > remaining ? ++ remaining : bytes_to_read; ++ readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4)); ++ rxbuf += bytes_to_read; ++ remaining -= bytes_to_read; ++ bytes_to_read = cqspi_get_rd_sram_level(cqspi); ++ } ++ ++ if (remaining > 0) ++ reinit_completion(&cqspi->transfer_complete); ++ } ++ ++ /* Check indirect done status */ ++ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD, ++ CQSPI_REG_INDIRECTRD_DONE_MASK, 0); ++ if (ret) { ++ dev_err(nor->dev, ++ "Indirect read completion error (%i)\n", ret); ++ goto failrd; ++ } ++ ++ /* Disable interrupt */ ++ writel(0, reg_base + CQSPI_REG_IRQMASK); ++ ++ /* Clear indirect completion status */ ++ writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); ++ ++ return 0; ++ ++failrd: ++ /* Disable interrupt */ ++ writel(0, reg_base + CQSPI_REG_IRQMASK); ++ ++ /* Cancel the indirect read */ ++ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, ++ reg_base + CQSPI_REG_INDIRECTRD); ++ return ret; ++} ++ ++static int cqspi_indirect_write_setup(struct spi_nor *nor, ++ const unsigned int to_addr) ++{ ++ unsigned int reg; ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ ++ /* Set opcode. */ ++ reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; ++ writel(reg, reg_base + CQSPI_REG_WR_INSTR); ++ reg = cqspi_calc_rdreg(nor, nor->program_opcode); ++ writel(reg, reg_base + CQSPI_REG_RD_INSTR); ++ ++ writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); ++ ++ reg = readl(reg_base + CQSPI_REG_SIZE); ++ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; ++ reg |= (nor->addr_width - 1); ++ writel(reg, reg_base + CQSPI_REG_SIZE); ++ return 0; ++} ++ ++static int cqspi_indirect_write_execute(struct spi_nor *nor, ++ const u8 *txbuf, const unsigned n_tx) ++{ ++ const unsigned int page_size = nor->page_size; ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int remaining = n_tx; ++ unsigned int write_bytes; ++ int ret; ++ ++ writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); ++ ++ /* Clear all interrupts. */ ++ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); ++ ++ writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); ++ ++ reinit_completion(&cqspi->transfer_complete); ++ writel(CQSPI_REG_INDIRECTWR_START_MASK, ++ reg_base + CQSPI_REG_INDIRECTWR); ++ ++ while (remaining > 0) { ++ write_bytes = remaining > page_size ? page_size : remaining; ++ writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4)); ++ ++ ret = wait_for_completion_timeout(&cqspi->transfer_complete, ++ msecs_to_jiffies ++ (CQSPI_TIMEOUT_MS)); ++ if (!ret) { ++ dev_err(nor->dev, "Indirect write timeout\n"); ++ ret = -ETIMEDOUT; ++ goto failwr; ++ } ++ ++ txbuf += write_bytes; ++ remaining -= write_bytes; ++ ++ if (remaining > 0) ++ reinit_completion(&cqspi->transfer_complete); ++ } ++ ++ /* Check indirect done status */ ++ ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR, ++ CQSPI_REG_INDIRECTWR_DONE_MASK, 0); ++ if (ret) { ++ dev_err(nor->dev, ++ "Indirect write completion error (%i)\n", ret); ++ goto failwr; ++ } ++ ++ /* Disable interrupt. */ ++ writel(0, reg_base + CQSPI_REG_IRQMASK); ++ ++ /* Clear indirect completion status */ ++ writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); ++ ++ cqspi_wait_idle(cqspi); ++ ++ return 0; ++ ++failwr: ++ /* Disable interrupt. */ ++ writel(0, reg_base + CQSPI_REG_IRQMASK); ++ ++ /* Cancel the indirect write */ ++ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, ++ reg_base + CQSPI_REG_INDIRECTWR); ++ return ret; ++} ++ ++static int cqspi_set_protocol(struct spi_nor *nor, enum spi_nor_protocol proto) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ ++ switch (proto) { ++ case SNOR_PROTO_1_1_1: ++ case SNOR_PROTO_1_1_2: ++ case SNOR_PROTO_1_1_4: ++ case SNOR_PROTO_1_2_2: ++ case SNOR_PROTO_1_4_4: ++ f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; ++ break; ++ case SNOR_PROTO_2_2_2: ++ f_pdata->inst_width = CQSPI_INST_TYPE_DUAL; ++ break; ++ case SNOR_PROTO_4_4_4: ++ f_pdata->inst_width = CQSPI_INST_TYPE_QUAD; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ switch (proto) { ++ case SNOR_PROTO_1_1_1: ++ case SNOR_PROTO_1_1_2: ++ case SNOR_PROTO_1_1_4: ++ f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; ++ break; ++ case SNOR_PROTO_1_2_2: ++ case SNOR_PROTO_2_2_2: ++ f_pdata->addr_width = CQSPI_INST_TYPE_DUAL; ++ break; ++ case SNOR_PROTO_1_4_4: ++ case SNOR_PROTO_4_4_4: ++ f_pdata->addr_width = CQSPI_INST_TYPE_QUAD; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static void cqspi_write(struct spi_nor *nor, loff_t to, ++ size_t len, size_t *retlen, const u_char *buf) ++{ ++ int ret; ++ ++ ret = cqspi_set_protocol(nor, nor->write_proto); ++ if (ret) ++ return; ++ ++ ret = cqspi_indirect_write_setup(nor, to); ++ if (ret) ++ return; ++ ++ ret = cqspi_indirect_write_execute(nor, buf, len); ++ if (ret) ++ return; ++ ++ *retlen += len; ++} ++ ++static int cqspi_read(struct spi_nor *nor, loff_t from, ++ size_t len, size_t *retlen, u_char *buf) ++{ ++ int ret; ++ ++ ret = cqspi_set_protocol(nor, nor->read_proto); ++ if (ret) ++ return ret; ++ ++ ret = cqspi_indirect_read_setup(nor, from); ++ if (ret) ++ return ret; ++ ++ ret = cqspi_indirect_read_execute(nor, buf, len); ++ if (ret) ++ return ret; ++ ++ *retlen += len; ++ return ret; ++} ++ ++static int cqspi_erase(struct spi_nor *nor, loff_t offs) ++{ ++ int ret; ++ ++ ret = cqspi_set_protocol(nor, nor->erase_proto); ++ if (ret) ++ return ret; ++ ++ /* Send write enable, then erase commands. */ ++ ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); ++ if (ret) ++ return ret; ++ ++ /* Set up command buffer. */ ++ ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs); ++ if (ret) ++ return ret; ++ ++ return 0; ++} ++ ++static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, ++ const unsigned int ns_val) ++{ ++ unsigned int ticks; ++ ++ ticks = ref_clk_hz / 1000; /* kHz */ ++ ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); ++ ++ return ticks; ++} ++ ++static void cqspi_delay(struct spi_nor *nor, const unsigned int sclk_hz) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *iobase = cqspi->iobase; ++ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; ++ unsigned int tshsl, tchsh, tslch, tsd2d; ++ unsigned int reg; ++ unsigned int tsclk; ++ ++ /* calculate the number of ref ticks for one sclk tick */ ++ tsclk = (ref_clk_hz + sclk_hz - 1) / sclk_hz; ++ ++ tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); ++ /* this particular value must be at least one sclk */ ++ if (tshsl < tsclk) ++ tshsl = tsclk; ++ ++ tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); ++ tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); ++ tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); ++ ++ reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) ++ << CQSPI_REG_DELAY_TSHSL_LSB; ++ reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) ++ << CQSPI_REG_DELAY_TCHSH_LSB; ++ reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) ++ << CQSPI_REG_DELAY_TSLCH_LSB; ++ reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) ++ << CQSPI_REG_DELAY_TSD2D_LSB; ++ writel(reg, iobase + CQSPI_REG_DELAY); ++} ++ ++static void cqspi_config_baudrate_div(struct cqspi_st *cqspi, ++ const unsigned int sclk_hz) ++{ ++ const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int reg; ++ unsigned int div; ++ ++ reg = readl(reg_base + CQSPI_REG_CONFIG); ++ reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); ++ ++ div = ref_clk_hz / sclk_hz; ++ ++ /* Recalculate the baudrate divisor based on QSPI specification. */ ++ if (div > 32) ++ div = 32; ++ ++ /* Check if even number. */ ++ if (div & 1) ++ div = (div / 2); ++ else ++ div = (div / 2) - 1; ++ ++ div = (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; ++ reg |= div; ++ writel(reg, reg_base + CQSPI_REG_CONFIG); ++} ++ ++static void cqspi_readdata_capture(struct cqspi_st *cqspi, ++ const unsigned int bypass, ++ const unsigned int delay) ++{ ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int reg; ++ ++ reg = readl(reg_base + CQSPI_REG_READCAPTURE); ++ ++ if (bypass) ++ reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); ++ else ++ reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); ++ ++ reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK ++ << CQSPI_REG_READCAPTURE_DELAY_LSB); ++ ++ reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) ++ << CQSPI_REG_READCAPTURE_DELAY_LSB; ++ ++ writel(reg, reg_base + CQSPI_REG_READCAPTURE); ++} ++ ++static void cqspi_chipselect(struct spi_nor *nor) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int chip_select = f_pdata->cs; ++ unsigned int reg; ++ ++ reg = readl(reg_base + CQSPI_REG_CONFIG); ++ if (cqspi->is_decoded_cs) { ++ reg |= CQSPI_REG_CONFIG_DECODE_MASK; ++ } else { ++ reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; ++ ++ /* Convert CS if without decoder. ++ * CS0 to 4b'1110 ++ * CS1 to 4b'1101 ++ * CS2 to 4b'1011 ++ * CS3 to 4b'0111 ++ */ ++ chip_select = 0xF & ~(1 << chip_select); ++ } ++ ++ reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK ++ << CQSPI_REG_CONFIG_CHIPSELECT_LSB); ++ reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) ++ << CQSPI_REG_CONFIG_CHIPSELECT_LSB; ++ writel(reg, reg_base + CQSPI_REG_CONFIG); ++} ++ ++static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) ++{ ++ void __iomem *reg_base = cqspi->iobase; ++ unsigned int reg; ++ ++ reg = readl(reg_base + CQSPI_REG_CONFIG); ++ ++ if (enable) ++ reg |= CQSPI_REG_CONFIG_ENABLE_MASK; ++ else ++ reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; ++ ++ writel(reg, reg_base + CQSPI_REG_CONFIG); ++} ++ ++static void cqspi_switch_cs(struct spi_nor *nor) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ void __iomem *iobase = cqspi->iobase; ++ unsigned int reg; ++ ++ /* configure page size and block size. */ ++ reg = readl(iobase + CQSPI_REG_SIZE); ++ reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB); ++ reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB); ++ reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; ++ reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB); ++ reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB); ++ reg |= (nor->addr_width - 1); ++ writel(reg, iobase + CQSPI_REG_SIZE); ++ ++ /* configure the chip select */ ++ cqspi_chipselect(nor); ++} ++ ++static int cqspi_prep_unlocked(struct spi_nor *nor, enum spi_nor_ops ops) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ const unsigned int sclk = f_pdata->clk_rate; ++ const int switch_cs = (cqspi->current_cs != f_pdata->cs); ++ const int switch_ck = (cqspi->sclk != sclk); ++ ++ if (switch_cs || switch_ck) ++ cqspi_controller_enable(cqspi, 0); ++ ++ /* Switch chip select. */ ++ if (switch_cs) { ++ cqspi->current_cs = f_pdata->cs; ++ cqspi_switch_cs(nor); ++ } ++ ++ /* Setup baudrate divisor and delays */ ++ if (switch_ck) { ++ cqspi->sclk = sclk; ++ cqspi_config_baudrate_div(cqspi, sclk); ++ cqspi_delay(nor, sclk); ++ cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay); ++ } ++ ++ if (switch_cs || switch_ck) ++ cqspi_controller_enable(cqspi, 1); ++ ++ return 0; ++} ++ ++static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ ++ mutex_lock(&cqspi->bus_mutex); ++ ++ return cqspi_prep_unlocked(nor, ops); ++} ++ ++static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) ++{ ++ struct cqspi_flash_pdata *f_pdata = nor->priv; ++ struct cqspi_st *cqspi = f_pdata->cqspi; ++ ++ mutex_unlock(&cqspi->bus_mutex); ++} ++ ++static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) ++{ ++ int ret; ++ ++ ret = cqspi_set_protocol(nor, nor->reg_proto); ++ if (ret) ++ goto exit; ++ ++ cqspi_prep_unlocked(nor, SPI_NOR_OPS_READ); ++ ++ ret = cqspi_command_read(nor, &opcode, 1, buf, len); ++exit: ++ return ret; ++} ++ ++static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) ++{ ++ int ret; ++ ++ ret = cqspi_set_protocol(nor, nor->reg_proto); ++ if (ret) ++ goto exit; ++ ++ cqspi_prep_unlocked(nor, SPI_NOR_OPS_WRITE); ++ ++ ret = cqspi_command_write(nor, opcode, buf, len); ++exit: ++ return ret; ++} ++ ++static int cqspi_of_get_flash_pdata(struct platform_device *pdev, ++ struct cqspi_flash_pdata *f_pdata, ++ struct device_node *np) ++{ ++ if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { ++ dev_err(&pdev->dev, "couldn't determine read-delay\n"); ++ return -ENXIO; ++ } ++ ++ if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { ++ dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); ++ return -ENXIO; ++ } ++ ++ if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { ++ dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); ++ return -ENXIO; ++ } ++ ++ if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { ++ dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); ++ return -ENXIO; ++ } ++ ++ if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { ++ dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); ++ return -ENXIO; ++ } ++ ++ if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { ++ dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); ++ return -ENXIO; ++ } ++ ++ return 0; ++} ++ ++static int cqspi_of_get_pdata(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct cqspi_st *cqspi = platform_get_drvdata(pdev); ++ ++ cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); ++ ++ if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { ++ dev_err(&pdev->dev, "couldn't determine fifo-depth\n"); ++ return -ENXIO; ++ } ++ ++ if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { ++ dev_err(&pdev->dev, "couldn't determine fifo-width\n"); ++ return -ENXIO; ++ } ++ ++ if (of_property_read_u32(np, "cdns,trigger-address", ++ &cqspi->trigger_address)) { ++ dev_err(&pdev->dev, "couldn't determine trigger-address\n"); ++ return -ENXIO; ++ } ++ ++ return 0; ++} ++ ++static void cqspi_controller_init(struct cqspi_st *cqspi) ++{ ++ cqspi_controller_enable(cqspi, 0); ++ ++ /* Configure the remap address register, no remap */ ++ writel(0, cqspi->iobase + CQSPI_REG_REMAP); ++ ++ /* Disable all interrupts. */ ++ writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); ++ ++ /* Configure the SRAM split to 1:1 . */ ++ writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); ++ ++ /* Load indirect trigger address. */ ++ writel(cqspi->trigger_address, ++ cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); ++ ++ /* Program read watermark -- 1/2 of the FIFO. */ ++ writel(cqspi->fifo_depth * cqspi->fifo_width / 2, ++ cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); ++ /* Program write watermark -- 1/8 of the FIFO. */ ++ writel(cqspi->fifo_depth * cqspi->fifo_width / 8, ++ cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); ++ ++ cqspi_controller_enable(cqspi, 1); ++} ++ ++static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) ++{ ++ struct platform_device *pdev = cqspi->pdev; ++ struct device *dev = &pdev->dev; ++ struct cqspi_flash_pdata *f_pdata; ++ struct spi_nor *nor; ++ struct mtd_info *mtd; ++ unsigned int cs; ++ int i, ret; ++ ++ /* Get flash device data */ ++ for_each_available_child_of_node(dev->of_node, np) { ++ if (of_property_read_u32(np, "reg", &cs)) { ++ dev_err(dev, "Couldn't determine chip select.\n"); ++ goto err; ++ } ++ ++ if (cs > CQSPI_MAX_CHIPSELECT) { ++ dev_err(dev, "Chip select %d out of range.\n", cs); ++ goto err; ++ } ++ ++ f_pdata = &cqspi->f_pdata[cs]; ++ f_pdata->cqspi = cqspi; ++ f_pdata->cs = cs; ++ ++ ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); ++ if (ret) ++ goto err; ++ ++ nor = &f_pdata->nor; ++ mtd = &nor->mtd; ++ ++ mtd->priv = nor; ++ ++ nor->dev = dev; ++ spi_nor_set_flash_node(nor, np); ++ nor->priv = f_pdata; ++ ++ nor->read_reg = cqspi_read_reg; ++ nor->write_reg = cqspi_write_reg; ++ nor->read = cqspi_read; ++ nor->write = cqspi_write; ++ nor->erase = cqspi_erase; ++ nor->prepare = cqspi_prep; ++ nor->unprepare = cqspi_unprep; ++ ++ mtd->name = kasprintf(GFP_KERNEL, "%s.%d", dev_name(dev), cs); ++ if (!mtd->name) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); ++ if (ret) ++ goto err; ++ ++ ret = mtd_device_register(mtd, NULL, 0); ++ if (ret) ++ goto err; ++ } ++ ++ return 0; ++ ++err: ++ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) ++ if (cqspi->f_pdata[i].nor.mtd.name) { ++ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); ++ kfree(cqspi->f_pdata[i].nor.mtd.name); ++ } ++ return ret; ++} ++ ++static int cqspi_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct device *dev = &pdev->dev; ++ struct cqspi_st *cqspi; ++ struct resource *res; ++ struct resource *res_ahb; ++ int ret; ++ int irq; ++ ++ cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL); ++ if (!cqspi) ++ return -ENOMEM; ++ ++ mutex_init(&cqspi->bus_mutex); ++ cqspi->pdev = pdev; ++ platform_set_drvdata(pdev, cqspi); ++ ++ /* Obtain configuration from OF. */ ++ ret = cqspi_of_get_pdata(pdev); ++ if (ret) { ++ dev_err(dev, "Cannot get mandatory OF data.\n"); ++ return -ENODEV; ++ } ++ ++ /* Obtain QSPI clock. */ ++ cqspi->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(cqspi->clk)) { ++ dev_err(dev, "Cannot claim QSPI clock.\n"); ++ return PTR_ERR(cqspi->clk); ++ } ++ ++ /* Obtain and remap controller address. */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ cqspi->iobase = devm_ioremap_resource(dev, res); ++ if (IS_ERR(cqspi->iobase)) { ++ dev_err(dev, "Cannot remap controller address.\n"); ++ return PTR_ERR(cqspi->iobase); ++ } ++ ++ /* Obtain and remap AHB address. */ ++ res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb); ++ if (IS_ERR(cqspi->ahb_base)) { ++ dev_err(dev, "Cannot remap AHB address.\n"); ++ return PTR_ERR(cqspi->ahb_base); ++ } ++ ++ init_completion(&cqspi->transfer_complete); ++ ++ /* Obtain IRQ line. */ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) { ++ dev_err(dev, "Cannot obtain IRQ.\n"); ++ return -ENXIO; ++ } ++ ++ ret = clk_prepare_enable(cqspi->clk); ++ if (ret) { ++ dev_err(dev, "Cannot enable QSPI clock.\n"); ++ return ret; ++ } ++ ++ cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); ++ ++ ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, ++ pdev->name, cqspi); ++ if (ret) { ++ dev_err(dev, "Cannot request IRQ.\n"); ++ goto probe_irq_failed; ++ } ++ ++ cqspi_wait_idle(cqspi); ++ cqspi_controller_init(cqspi); ++ cqspi->current_cs = -1; ++ cqspi->sclk = 0; ++ ++ ret = cqspi_setup_flash(cqspi, np); ++ if (ret) { ++ dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret); ++ goto probe_setup_failed; ++ } ++ ++ return ret; ++probe_irq_failed: ++ cqspi_controller_enable(cqspi, 0); ++probe_setup_failed: ++ clk_disable_unprepare(cqspi->clk); ++ return ret; ++} ++ ++static int cqspi_remove(struct platform_device *pdev) ++{ ++ struct cqspi_st *cqspi = platform_get_drvdata(pdev); ++ int i; ++ ++ cqspi_controller_enable(cqspi, 0); ++ ++ for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) ++ if (cqspi->f_pdata[i].nor.mtd.name) { ++ mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); ++ kfree(cqspi->f_pdata[i].nor.mtd.name); ++ } ++ ++ clk_disable_unprepare(cqspi->clk); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int cqspi_suspend(struct device *dev) ++{ ++ struct cqspi_st *cqspi = dev_get_drvdata(dev); ++ ++ cqspi_controller_enable(cqspi, 0); ++ return 0; ++} ++ ++static int cqspi_resume(struct device *dev) ++{ ++ struct cqspi_st *cqspi = dev_get_drvdata(dev); ++ ++ cqspi_controller_enable(cqspi, 1); ++ return 0; ++} ++ ++static const struct dev_pm_ops cqspi__dev_pm_ops = { ++ .suspend = cqspi_suspend, ++ .resume = cqspi_resume, ++}; ++ ++#define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops) ++#else ++#define CQSPI_DEV_PM_OPS NULL ++#endif ++ ++static struct of_device_id const cqspi_dt_ids[] = { ++ {.compatible = "cdns,qspi-nor",}, ++ { /* end of table */ } ++}; ++ ++MODULE_DEVICE_TABLE(of, cqspi_dt_ids); ++ ++static struct platform_driver cqspi_platform_driver = { ++ .probe = cqspi_probe, ++ .remove = cqspi_remove, ++ .driver = { ++ .name = CQSPI_NAME, ++ .pm = CQSPI_DEV_PM_OPS, ++ .of_match_table = cqspi_dt_ids, ++ }, ++}; ++ ++module_platform_driver(cqspi_platform_driver); ++ ++MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:" CQSPI_NAME); ++MODULE_AUTHOR("Ley Foon Tan "); ++MODULE_AUTHOR("Graham Moore "); +-- +2.8.1 + -- cgit v1.2.3