summaryrefslogtreecommitdiffstats
path: root/target/linux/mvebu
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2016-05-21 15:15:51 +0200
committerFelix Fietkau <nbd@nbd.name>2016-05-21 15:16:38 +0200
commit566343246f6558fc2a3a485064c7b8c6f22ec261 (patch)
tree3653942d836ac84c8c04fc151573c068141c6bc9 /target/linux/mvebu
parent5166732a4e08bd905f3df958da29e62a0a08cc27 (diff)
downloadmaster-31e0f0ae-566343246f6558fc2a3a485064c7b8c6f22ec261.tar.gz
master-31e0f0ae-566343246f6558fc2a3a485064c7b8c6f22ec261.tar.bz2
master-31e0f0ae-566343246f6558fc2a3a485064c7b8c6f22ec261.zip
mvebu: backport an upstream NAND flash driver fix
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target/linux/mvebu')
-rw-r--r--target/linux/mvebu/patches-4.4/010-mtd-nand-pxa3xx_nand-add-support-for-partial-chunks.patch428
1 files changed, 428 insertions, 0 deletions
diff --git a/target/linux/mvebu/patches-4.4/010-mtd-nand-pxa3xx_nand-add-support-for-partial-chunks.patch b/target/linux/mvebu/patches-4.4/010-mtd-nand-pxa3xx_nand-add-support-for-partial-chunks.patch
new file mode 100644
index 0000000000..2e6709781f
--- /dev/null
+++ b/target/linux/mvebu/patches-4.4/010-mtd-nand-pxa3xx_nand-add-support-for-partial-chunks.patch
@@ -0,0 +1,428 @@
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Wed, 10 Feb 2016 14:54:21 +0100
+Subject: [PATCH] mtd: nand: pxa3xx_nand: add support for partial chunks
+
+This commit is needed to properly support the 8-bits ECC configuration
+with 4KB pages.
+
+When pages larger than 2 KB are used on platforms using the PXA3xx
+NAND controller, the reading/programming operations need to be split
+in chunks of 2 KBs or less because the controller FIFO is limited to
+about 2 KB (i.e a bit more than 2 KB to accommodate OOB data). Due to
+this requirement, the data layout on NAND is a bit strange, with ECC
+interleaved with data, at the end of each chunk.
+
+When a 4-bits ECC configuration is used with 4 KB pages, the physical
+data layout on the NAND looks like this:
+
+| 2048 data | 32 spare | 30 ECC | 2048 data | 32 spare | 30 ECC |
+
+So the data chunks have an equal size, 2080 bytes for each chunk,
+which the driver supports properly.
+
+When a 8-bits ECC configuration is used with 4KB pages, the physical
+data layout on the NAND looks like this:
+
+| 1024 data | 30 ECC | 1024 data | 30 ECC | 1024 data | 30 ECC | 1024 data | 30 ECC | 64 spare | 30 ECC |
+
+So, the spare area is stored in its own chunk, which has a different
+size than the other chunks. Since OOB is not used by UBIFS, the initial
+implementation of the driver has chosen to not support reading this
+additional "spare" chunk of data.
+
+Unfortunately, Marvell has chosen to store the BBT signature in the
+OOB area. Therefore, if the driver doesn't read this spare area, Linux
+has no way of finding the BBT. It thinks there is no BBT, and rewrites
+one, which U-Boot does not recognize, causing compatibility problems
+between the bootloader and the kernel in terms of NAND usage.
+
+To fix this, this commit implements the support for reading a partial
+last chunk. This support is currently only useful for the case of 8
+bits ECC with 4 KB pages, but it will be useful in the future to
+enable other configurations such as 12 bits and 16 bits ECC with 4 KB
+pages, or 8 bits ECC with 8 KB pages, etc. All those configurations
+have a "last" chunk that doesn't have the same size as the other
+chunks.
+
+In order to implement reading of the last chunk, this commit:
+
+ - Adds a number of new fields to the pxa3xx_nand_info to describe how
+ many full chunks and how many chunks we have, the size of full
+ chunks and partial chunks, both in terms of data area and spare
+ area.
+
+ - Fills in the step_chunk_size and step_spare_size variables to
+ describe how much data and spare should be read/written for the
+ current read/program step.
+
+ - Reworks the state machine to accommodate doing the additional read
+ or program step when a last partial chunk is used.
+
+This commit has been tested on a Marvell Armada 398 DB board, with a
+4KB page NAND, tested in both 4 bits ECC and 8 bits ECC
+configurations. Robert Jarzmik has tested on some PXA platforms.
+
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Tested-by: Robert Jarzmik <robert.jarzmik@free.fr>
+Acked-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
+Signed-off-by: Brian Norris <computersforpeace@gmail.com>
+---
+
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -228,15 +228,44 @@ struct pxa3xx_nand_info {
+ int use_spare; /* use spare ? */
+ int need_wait;
+
+- unsigned int data_size; /* data to be read from FIFO */
+- unsigned int chunk_size; /* split commands chunk size */
+- unsigned int oob_size;
++ /* Amount of real data per full chunk */
++ unsigned int chunk_size;
++
++ /* Amount of spare data per full chunk */
+ unsigned int spare_size;
++
++ /* Number of full chunks (i.e chunk_size + spare_size) */
++ unsigned int nfullchunks;
++
++ /*
++ * Total number of chunks. If equal to nfullchunks, then there
++ * are only full chunks. Otherwise, there is one last chunk of
++ * size (last_chunk_size + last_spare_size)
++ */
++ unsigned int ntotalchunks;
++
++ /* Amount of real data in the last chunk */
++ unsigned int last_chunk_size;
++
++ /* Amount of spare data in the last chunk */
++ unsigned int last_spare_size;
++
+ unsigned int ecc_size;
+ unsigned int ecc_err_cnt;
+ unsigned int max_bitflips;
+ int retcode;
+
++ /*
++ * Variables only valid during command
++ * execution. step_chunk_size and step_spare_size is the
++ * amount of real data and spare data in the current
++ * chunk. cur_chunk is the current chunk being
++ * read/programmed.
++ */
++ unsigned int step_chunk_size;
++ unsigned int step_spare_size;
++ unsigned int cur_chunk;
++
+ /* cached register value */
+ uint32_t reg_ndcr;
+ uint32_t ndtr0cs0;
+@@ -531,25 +560,6 @@ static int pxa3xx_nand_init(struct pxa3x
+ return 0;
+ }
+
+-/*
+- * Set the data and OOB size, depending on the selected
+- * spare and ECC configuration.
+- * Only applicable to READ0, READOOB and PAGEPROG commands.
+- */
+-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
+- struct mtd_info *mtd)
+-{
+- int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
+-
+- info->data_size = mtd->writesize;
+- if (!oob_enable)
+- return;
+-
+- info->oob_size = info->spare_size;
+- if (!info->use_ecc)
+- info->oob_size += info->ecc_size;
+-}
+-
+ /**
+ * NOTE: it is a must to set ND_RUN firstly, then write
+ * command buffer, otherwise, it does not work.
+@@ -665,28 +675,28 @@ static void drain_fifo(struct pxa3xx_nan
+
+ static void handle_data_pio(struct pxa3xx_nand_info *info)
+ {
+- unsigned int do_bytes = min(info->data_size, info->chunk_size);
+-
+ switch (info->state) {
+ case STATE_PIO_WRITING:
+- writesl(info->mmio_base + NDDB,
+- info->data_buff + info->data_buff_pos,
+- DIV_ROUND_UP(do_bytes, 4));
++ if (info->step_chunk_size)
++ writesl(info->mmio_base + NDDB,
++ info->data_buff + info->data_buff_pos,
++ DIV_ROUND_UP(info->step_chunk_size, 4));
+
+- if (info->oob_size > 0)
++ if (info->step_spare_size)
+ writesl(info->mmio_base + NDDB,
+ info->oob_buff + info->oob_buff_pos,
+- DIV_ROUND_UP(info->oob_size, 4));
++ DIV_ROUND_UP(info->step_spare_size, 4));
+ break;
+ case STATE_PIO_READING:
+- drain_fifo(info,
+- info->data_buff + info->data_buff_pos,
+- DIV_ROUND_UP(do_bytes, 4));
++ if (info->step_chunk_size)
++ drain_fifo(info,
++ info->data_buff + info->data_buff_pos,
++ DIV_ROUND_UP(info->step_chunk_size, 4));
+
+- if (info->oob_size > 0)
++ if (info->step_spare_size)
+ drain_fifo(info,
+ info->oob_buff + info->oob_buff_pos,
+- DIV_ROUND_UP(info->oob_size, 4));
++ DIV_ROUND_UP(info->step_spare_size, 4));
+ break;
+ default:
+ dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+@@ -695,9 +705,8 @@ static void handle_data_pio(struct pxa3x
+ }
+
+ /* Update buffer pointers for multi-page read/write */
+- info->data_buff_pos += do_bytes;
+- info->oob_buff_pos += info->oob_size;
+- info->data_size -= do_bytes;
++ info->data_buff_pos += info->step_chunk_size;
++ info->oob_buff_pos += info->step_spare_size;
+ }
+
+ static void pxa3xx_nand_data_dma_irq(void *data)
+@@ -738,8 +747,9 @@ static void start_data_dma(struct pxa3xx
+ info->state);
+ BUG();
+ }
+- info->sg.length = info->data_size +
+- (info->oob_size ? info->spare_size + info->ecc_size : 0);
++ info->sg.length = info->chunk_size;
++ if (info->use_spare)
++ info->sg.length += info->spare_size + info->ecc_size;
+ dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
+
+ tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
+@@ -900,9 +910,11 @@ static void prepare_start_command(struct
+ /* reset data and oob column point to handle data */
+ info->buf_start = 0;
+ info->buf_count = 0;
+- info->oob_size = 0;
+ info->data_buff_pos = 0;
+ info->oob_buff_pos = 0;
++ info->step_chunk_size = 0;
++ info->step_spare_size = 0;
++ info->cur_chunk = 0;
+ info->use_ecc = 0;
+ info->use_spare = 1;
+ info->retcode = ERR_NONE;
+@@ -914,8 +926,6 @@ static void prepare_start_command(struct
+ case NAND_CMD_READ0:
+ case NAND_CMD_PAGEPROG:
+ info->use_ecc = 1;
+- case NAND_CMD_READOOB:
+- pxa3xx_set_datasize(info, mtd);
+ break;
+ case NAND_CMD_PARAM:
+ info->use_spare = 0;
+@@ -974,6 +984,14 @@ static int prepare_set_command(struct px
+ if (command == NAND_CMD_READOOB)
+ info->buf_start += mtd->writesize;
+
++ if (info->cur_chunk < info->nfullchunks) {
++ info->step_chunk_size = info->chunk_size;
++ info->step_spare_size = info->spare_size;
++ } else {
++ info->step_chunk_size = info->last_chunk_size;
++ info->step_spare_size = info->last_spare_size;
++ }
++
+ /*
+ * Multiple page read needs an 'extended command type' field,
+ * which is either naked-read or last-read according to the
+@@ -985,8 +1003,8 @@ static int prepare_set_command(struct px
+ info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+- info->ndcb3 = info->chunk_size +
+- info->oob_size;
++ info->ndcb3 = info->step_chunk_size +
++ info->step_spare_size;
+ }
+
+ set_command_address(info, mtd->writesize, column, page_addr);
+@@ -1006,8 +1024,6 @@ static int prepare_set_command(struct px
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | addr_cycle
+ | command;
+- /* No data transfer in this case */
+- info->data_size = 0;
+ exec_cmd = 1;
+ }
+ break;
+@@ -1019,6 +1035,14 @@ static int prepare_set_command(struct px
+ break;
+ }
+
++ if (info->cur_chunk < info->nfullchunks) {
++ info->step_chunk_size = info->chunk_size;
++ info->step_spare_size = info->spare_size;
++ } else {
++ info->step_chunk_size = info->last_chunk_size;
++ info->step_spare_size = info->last_spare_size;
++ }
++
+ /* Second command setting for large pages */
+ if (mtd->writesize > PAGE_CHUNK_SIZE) {
+ /*
+@@ -1029,14 +1053,14 @@ static int prepare_set_command(struct px
+ info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+ | NDCB0_LEN_OVRD
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+- info->ndcb3 = info->chunk_size +
+- info->oob_size;
++ info->ndcb3 = info->step_chunk_size +
++ info->step_spare_size;
+
+ /*
+ * This is the command dispatch that completes a chunked
+ * page program operation.
+ */
+- if (info->data_size == 0) {
++ if (info->cur_chunk == info->ntotalchunks) {
+ info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+ | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+ | command;
+@@ -1063,7 +1087,7 @@ static int prepare_set_command(struct px
+ | command;
+ info->ndcb1 = (column & 0xFF);
+ info->ndcb3 = INIT_BUFFER_SIZE;
+- info->data_size = INIT_BUFFER_SIZE;
++ info->step_chunk_size = INIT_BUFFER_SIZE;
+ break;
+
+ case NAND_CMD_READID:
+@@ -1073,7 +1097,7 @@ static int prepare_set_command(struct px
+ | command;
+ info->ndcb1 = (column & 0xFF);
+
+- info->data_size = 8;
++ info->step_chunk_size = 8;
+ break;
+ case NAND_CMD_STATUS:
+ info->buf_count = 1;
+@@ -1081,7 +1105,7 @@ static int prepare_set_command(struct px
+ | NDCB0_ADDR_CYC(1)
+ | command;
+
+- info->data_size = 8;
++ info->step_chunk_size = 8;
+ break;
+
+ case NAND_CMD_ERASE1:
+@@ -1220,6 +1244,7 @@ static void nand_cmdfunc_extended(struct
+ init_completion(&info->dev_ready);
+ do {
+ info->state = STATE_PREPARED;
++
+ exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+ column, page_addr);
+ if (!exec_cmd) {
+@@ -1239,22 +1264,30 @@ static void nand_cmdfunc_extended(struct
+ break;
+ }
+
++ /* Only a few commands need several steps */
++ if (command != NAND_CMD_PAGEPROG &&
++ command != NAND_CMD_READ0 &&
++ command != NAND_CMD_READOOB)
++ break;
++
++ info->cur_chunk++;
++
+ /* Check if the sequence is complete */
+- if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
++ if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
+ break;
+
+ /*
+ * After a splitted program command sequence has issued
+ * the command dispatch, the command sequence is complete.
+ */
+- if (info->data_size == 0 &&
++ if (info->cur_chunk == (info->ntotalchunks + 1) &&
+ command == NAND_CMD_PAGEPROG &&
+ ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+ break;
+
+ if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+ /* Last read: issue a 'last naked read' */
+- if (info->data_size == info->chunk_size)
++ if (info->cur_chunk == info->ntotalchunks - 1)
+ ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+ else
+ ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+@@ -1264,7 +1297,7 @@ static void nand_cmdfunc_extended(struct
+ * the command dispatch must be issued to complete.
+ */
+ } else if (command == NAND_CMD_PAGEPROG &&
+- info->data_size == 0) {
++ info->cur_chunk == info->ntotalchunks) {
+ ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+ }
+ } while (1);
+@@ -1514,6 +1547,8 @@ static int pxa_ecc_init(struct pxa3xx_na
+ int strength, int ecc_stepsize, int page_size)
+ {
+ if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
++ info->nfullchunks = 1;
++ info->ntotalchunks = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 40;
+ info->ecc_size = 24;
+@@ -1522,6 +1557,8 @@ static int pxa_ecc_init(struct pxa3xx_na
+ ecc->strength = 1;
+
+ } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
++ info->nfullchunks = 1;
++ info->ntotalchunks = 1;
+ info->chunk_size = 512;
+ info->spare_size = 8;
+ info->ecc_size = 8;
+@@ -1535,6 +1572,8 @@ static int pxa_ecc_init(struct pxa3xx_na
+ */
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+ info->ecc_bch = 1;
++ info->nfullchunks = 1;
++ info->ntotalchunks = 1;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+@@ -1545,6 +1584,8 @@ static int pxa_ecc_init(struct pxa3xx_na
+
+ } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
++ info->nfullchunks = 2;
++ info->ntotalchunks = 2;
+ info->chunk_size = 2048;
+ info->spare_size = 32;
+ info->ecc_size = 32;
+@@ -1559,8 +1600,12 @@ static int pxa_ecc_init(struct pxa3xx_na
+ */
+ } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+ info->ecc_bch = 1;
++ info->nfullchunks = 4;
++ info->ntotalchunks = 5;
+ info->chunk_size = 1024;
+ info->spare_size = 0;
++ info->last_chunk_size = 0;
++ info->last_spare_size = 64;
+ info->ecc_size = 32;
+ ecc->mode = NAND_ECC_HW;
+ ecc->size = info->chunk_size;