diff options
| author | fishsoupisgood <github@madingley.org> | 2019-04-29 01:17:54 +0100 | 
|---|---|---|
| committer | fishsoupisgood <github@madingley.org> | 2019-05-27 03:43:43 +0100 | 
| commit | 3f2546b2ef55b661fd8dd69682b38992225e86f6 (patch) | |
| tree | 65ca85f13617aee1dce474596800950f266a456c /roms/u-boot/drivers/mtd | |
| download | qemu-master.tar.gz qemu-master.tar.bz2 qemu-master.zip  | |
Diffstat (limited to 'roms/u-boot/drivers/mtd')
88 files changed, 45691 insertions, 0 deletions
diff --git a/roms/u-boot/drivers/mtd/Makefile b/roms/u-boot/drivers/mtd/Makefile new file mode 100644 index 00000000..5467a951 --- /dev/null +++ b/roms/u-boot/drivers/mtd/Makefile @@ -0,0 +1,20 @@ +# +# (C) Copyright 2000-2007 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. +# +# SPDX-License-Identifier:	GPL-2.0+ +# + +ifneq (,$(findstring y,$(CONFIG_MTD_DEVICE)$(CONFIG_CMD_NAND)$(CONFIG_CMD_ONENAND))) +obj-y += mtdcore.o +endif +obj-$(CONFIG_MTD_PARTITIONS) += mtdpart.o +obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o +obj-$(CONFIG_HAS_DATAFLASH) += at45.o +obj-$(CONFIG_FLASH_CFI_DRIVER) += cfi_flash.o +obj-$(CONFIG_FLASH_CFI_MTD) += cfi_mtd.o +obj-$(CONFIG_HAS_DATAFLASH) += dataflash.o +obj-$(CONFIG_FTSMC020) += ftsmc020.o +obj-$(CONFIG_FLASH_CFI_LEGACY) += jedec_flash.o +obj-$(CONFIG_MW_EEPROM) += mw_eeprom.o +obj-$(CONFIG_ST_SMI) += st_smi.o diff --git a/roms/u-boot/drivers/mtd/at45.c b/roms/u-boot/drivers/mtd/at45.c new file mode 100644 index 00000000..2f49be38 --- /dev/null +++ b/roms/u-boot/drivers/mtd/at45.c @@ -0,0 +1,545 @@ +/* Driver for ATMEL DataFlash support + * Author : Hamid Ikdoumi (Atmel) + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <config.h> +#include <common.h> +#include <dataflash.h> + +/* + * spi.c API + */ +extern unsigned int AT91F_SpiWrite(AT91PS_DataflashDesc pDesc); +extern void AT91F_SpiEnable(int cs); + +#define AT91C_TIMEOUT_WRDY			200000 + +/*----------------------------------------------------------------------*/ +/* \fn    AT91F_DataFlashSendCommand					*/ +/* \brief Generic function to send a command to the dataflash		*/ +/*----------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_DataFlashSendCommand(AT91PS_DataFlash pDataFlash, +						 unsigned char OpCode, +						 unsigned int CmdSize, +						 unsigned int DataflashAddress) +{ +	unsigned int adr; + +	if ((pDataFlash->pDataFlashDesc->state) != IDLE) +		return DATAFLASH_BUSY; + +	/* process the address to obtain page address and byte address */ +	adr = ((DataflashAddress / (pDataFlash->pDevice->pages_size)) << +		pDataFlash->pDevice->page_offset) + +			(DataflashAddress % (pDataFlash->pDevice->pages_size)); + +	/* fill the command buffer */ +	pDataFlash->pDataFlashDesc->command[0] = OpCode; +	if (pDataFlash->pDevice->pages_number >= 16384) { +		pDataFlash->pDataFlashDesc->command[1] = +			(unsigned char)((adr & 0x0F000000) >> 24); +		pDataFlash->pDataFlashDesc->command[2] = +			(unsigned char)((adr & 0x00FF0000) >> 16); +		pDataFlash->pDataFlashDesc->command[3] = +			(unsigned char)((adr & 0x0000FF00) >> 8); +		pDataFlash->pDataFlashDesc->command[4] = +			(unsigned char)(adr & 0x000000FF); +	} else { +		pDataFlash->pDataFlashDesc->command[1] = +			(unsigned char)((adr & 0x00FF0000) >> 16); +		pDataFlash->pDataFlashDesc->command[2] = +			(unsigned char)((adr & 0x0000FF00) >> 8); +		pDataFlash->pDataFlashDesc->command[3] = +			(unsigned char)(adr & 0x000000FF); +		pDataFlash->pDataFlashDesc->command[4] = 0; +	} +	pDataFlash->pDataFlashDesc->command[5] = 0; +	pDataFlash->pDataFlashDesc->command[6] = 0; +	pDataFlash->pDataFlashDesc->command[7] = 0; + +	/* Initialize the SpiData structure for the spi write fuction */ +	pDataFlash->pDataFlashDesc->tx_cmd_pt = +		pDataFlash->pDataFlashDesc->command; +	pDataFlash->pDataFlashDesc->tx_cmd_size = CmdSize; +	pDataFlash->pDataFlashDesc->rx_cmd_pt = +		pDataFlash->pDataFlashDesc->command; +	pDataFlash->pDataFlashDesc->rx_cmd_size = CmdSize; + +	/* send the command and read the data */ +	return AT91F_SpiWrite(pDataFlash->pDataFlashDesc); +} + +/*----------------------------------------------------------------------*/ +/* \fn    AT91F_DataFlashGetStatus					*/ +/* \brief Read the status register of the dataflash			*/ +/*----------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_DataFlashGetStatus(AT91PS_DataflashDesc pDesc) +{ +	AT91S_DataFlashStatus status; + +	/* if a transfert is in progress ==> return 0 */ +	if ((pDesc->state) != IDLE) +		return DATAFLASH_BUSY; + +	/* first send the read status command (D7H) */ +	pDesc->command[0] = DB_STATUS; +	pDesc->command[1] = 0; + +	pDesc->DataFlash_state = GET_STATUS; +	pDesc->tx_data_size = 0;	/* Transmit the command */ +	/* and receive response */ +	pDesc->tx_cmd_pt = pDesc->command; +	pDesc->rx_cmd_pt = pDesc->command; +	pDesc->rx_cmd_size = 2; +	pDesc->tx_cmd_size = 2; +	status = AT91F_SpiWrite(pDesc); + +	pDesc->DataFlash_state = *((unsigned char *)(pDesc->rx_cmd_pt) + 1); + +	return status; +} + +/*----------------------------------------------------------------------*/ +/* \fn    AT91F_DataFlashWaitReady					*/ +/* \brief wait for dataflash ready (bit7 of the status register == 1)	*/ +/*----------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_DataFlashWaitReady(AT91PS_DataflashDesc +						pDataFlashDesc, +						unsigned int timeout) +{ +	pDataFlashDesc->DataFlash_state = IDLE; + +	do { +		AT91F_DataFlashGetStatus(pDataFlashDesc); +		timeout--; +	} while (((pDataFlashDesc->DataFlash_state & 0x80) != 0x80) && +		 (timeout > 0)); + +	if ((pDataFlashDesc->DataFlash_state & 0x80) != 0x80) +		return DATAFLASH_ERROR; + +	return DATAFLASH_OK; +} + +/*--------------------------------------------------------------------------*/ +/* Function Name       : AT91F_DataFlashContinuousRead			    */ +/* Object              : Continuous stream Read			    */ +/* Input Parameters    : DataFlash Service				    */ +/*						: <src> = dataflash address */ +/*                     : <*dataBuffer> = data buffer pointer		    */ +/*                     : <sizeToRead> = data buffer size		    */ +/* Return value		: State of the dataflash			    */ +/*--------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_DataFlashContinuousRead( +				AT91PS_DataFlash pDataFlash, +				int src, +				unsigned char *dataBuffer, +				int sizeToRead) +{ +	AT91S_DataFlashStatus status; +	/* Test the size to read in the device */ +	if ((src + sizeToRead) > +			(pDataFlash->pDevice->pages_size * +				(pDataFlash->pDevice->pages_number))) +		return DATAFLASH_MEMORY_OVERFLOW; + +	pDataFlash->pDataFlashDesc->rx_data_pt = dataBuffer; +	pDataFlash->pDataFlashDesc->rx_data_size = sizeToRead; +	pDataFlash->pDataFlashDesc->tx_data_pt = dataBuffer; +	pDataFlash->pDataFlashDesc->tx_data_size = sizeToRead; + +	status = AT91F_DataFlashSendCommand( +			pDataFlash, DB_CONTINUOUS_ARRAY_READ, 8, src); +	/* Send the command to the dataflash */ +	return (status); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_DataFlashPagePgmBuf			     */ +/* Object              : Main memory page program thru buffer 1 or buffer 2  */ +/* Input Parameters    : DataFlash Service				     */ +/*						: <*src> = Source buffer     */ +/*                     : <dest> = dataflash destination address		     */ +/*                     : <SizeToWrite> = data buffer size		     */ +/* Return value		: State of the dataflash			     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_DataFlashPagePgmBuf(AT91PS_DataFlash pDataFlash, +						unsigned char *src, +						unsigned int dest, +						unsigned int SizeToWrite) +{ +	int cmdsize; +	pDataFlash->pDataFlashDesc->tx_data_pt = src; +	pDataFlash->pDataFlashDesc->tx_data_size = SizeToWrite; +	pDataFlash->pDataFlashDesc->rx_data_pt = src; +	pDataFlash->pDataFlashDesc->rx_data_size = SizeToWrite; + +	cmdsize = 4; +	/* Send the command to the dataflash */ +	if (pDataFlash->pDevice->pages_number >= 16384) +		cmdsize = 5; +	return (AT91F_DataFlashSendCommand( +			pDataFlash, DB_PAGE_PGM_BUF1, cmdsize, dest)); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_MainMemoryToBufferTransfert		     */ +/* Object              : Read a page in the SRAM Buffer 1 or 2		     */ +/* Input Parameters    : DataFlash Service				     */ +/*                     : Page concerned					     */ +/*                     :						     */ +/* Return value		: State of the dataflash			     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_MainMemoryToBufferTransfert( +					AT91PS_DataFlash +					pDataFlash, +					unsigned char +					BufferCommand, +					unsigned int page) +{ +	int cmdsize; +	/* Test if the buffer command is legal */ +	if ((BufferCommand != DB_PAGE_2_BUF1_TRF) && +			(BufferCommand != DB_PAGE_2_BUF2_TRF)) { +		return DATAFLASH_BAD_COMMAND; +	} + +	/* no data to transmit or receive */ +	pDataFlash->pDataFlashDesc->tx_data_size = 0; +	cmdsize = 4; +	if (pDataFlash->pDevice->pages_number >= 16384) +		cmdsize = 5; +	return (AT91F_DataFlashSendCommand( +			pDataFlash, BufferCommand, cmdsize, +			page * pDataFlash->pDevice->pages_size)); +} + +/*-------------------------------------------------------------------------- */ +/* Function Name       : AT91F_DataFlashWriteBuffer			     */ +/* Object              : Write data to the internal sram buffer 1 or 2	     */ +/* Input Parameters    : DataFlash Service				     */ +/*			: <BufferCommand> = command to write buffer1 or 2    */ +/*                     : <*dataBuffer> = data buffer to write		     */ +/*                     : <bufferAddress> = address in the internal buffer    */ +/*                     : <SizeToWrite> = data buffer size		     */ +/* Return value		: State of the dataflash			     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_DataFlashWriteBuffer( +					AT91PS_DataFlash pDataFlash, +					unsigned char BufferCommand, +					unsigned char *dataBuffer, +					unsigned int bufferAddress, +					int SizeToWrite) +{ +	int cmdsize; +	/* Test if the buffer command is legal */ +	if ((BufferCommand != DB_BUF1_WRITE) && +			(BufferCommand != DB_BUF2_WRITE)) { +		return DATAFLASH_BAD_COMMAND; +	} + +	/* buffer address must be lower than page size */ +	if (bufferAddress > pDataFlash->pDevice->pages_size) +		return DATAFLASH_BAD_ADDRESS; + +	if ((pDataFlash->pDataFlashDesc->state) != IDLE) +		return DATAFLASH_BUSY; + +	/* Send first Write Command */ +	pDataFlash->pDataFlashDesc->command[0] = BufferCommand; +	pDataFlash->pDataFlashDesc->command[1] = 0; +	if (pDataFlash->pDevice->pages_number >= 16384) { +		pDataFlash->pDataFlashDesc->command[2] = 0; +		pDataFlash->pDataFlashDesc->command[3] = +			(unsigned char)(((unsigned int)(bufferAddress & +							pDataFlash->pDevice-> +							byte_mask)) >> 8); +		pDataFlash->pDataFlashDesc->command[4] = +			(unsigned char)((unsigned int)bufferAddress & 0x00FF); +		cmdsize = 5; +	} else { +		pDataFlash->pDataFlashDesc->command[2] = +			(unsigned char)(((unsigned int)(bufferAddress & +							pDataFlash->pDevice-> +							byte_mask)) >> 8); +		pDataFlash->pDataFlashDesc->command[3] = +			(unsigned char)((unsigned int)bufferAddress & 0x00FF); +		pDataFlash->pDataFlashDesc->command[4] = 0; +		cmdsize = 4; +	} + +	pDataFlash->pDataFlashDesc->tx_cmd_pt = +		pDataFlash->pDataFlashDesc->command; +	pDataFlash->pDataFlashDesc->tx_cmd_size = cmdsize; +	pDataFlash->pDataFlashDesc->rx_cmd_pt = +		pDataFlash->pDataFlashDesc->command; +	pDataFlash->pDataFlashDesc->rx_cmd_size = cmdsize; + +	pDataFlash->pDataFlashDesc->rx_data_pt = dataBuffer; +	pDataFlash->pDataFlashDesc->tx_data_pt = dataBuffer; +	pDataFlash->pDataFlashDesc->rx_data_size = SizeToWrite; +	pDataFlash->pDataFlashDesc->tx_data_size = SizeToWrite; + +	return AT91F_SpiWrite(pDataFlash->pDataFlashDesc); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_PageErase                                     */ +/* Object              : Erase a page					     */ +/* Input Parameters    : DataFlash Service				     */ +/*                     : Page concerned					     */ +/*                     :						     */ +/* Return value		: State of the dataflash			     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_PageErase( +					AT91PS_DataFlash pDataFlash, +					unsigned int page) +{ +	int cmdsize; +	/* Test if the buffer command is legal */ +	/* no data to transmit or receive */ +	pDataFlash->pDataFlashDesc->tx_data_size = 0; + +	cmdsize = 4; +	if (pDataFlash->pDevice->pages_number >= 16384) +		cmdsize = 5; +	return (AT91F_DataFlashSendCommand(pDataFlash, +				DB_PAGE_ERASE, cmdsize, +				page * pDataFlash->pDevice->pages_size)); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_BlockErase                                    */ +/* Object              : Erase a Block					     */ +/* Input Parameters    : DataFlash Service				     */ +/*                     : Page concerned					     */ +/*                     :						     */ +/* Return value		: State of the dataflash			     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_BlockErase( +				AT91PS_DataFlash pDataFlash, +				unsigned int block) +{ +	int cmdsize; +	/* Test if the buffer command is legal */ +	/* no data to transmit or receive */ +	pDataFlash->pDataFlashDesc->tx_data_size = 0; +	cmdsize = 4; +	if (pDataFlash->pDevice->pages_number >= 16384) +		cmdsize = 5; +	return (AT91F_DataFlashSendCommand(pDataFlash, DB_BLOCK_ERASE, cmdsize, +					block * 8 * +					pDataFlash->pDevice->pages_size)); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_WriteBufferToMain			     */ +/* Object              : Write buffer to the main memory		     */ +/* Input Parameters    : DataFlash Service				     */ +/*		: <BufferCommand> = command to send to buffer1 or buffer2    */ +/*                     : <dest> = main memory address			     */ +/* Return value		: State of the dataflash			     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_WriteBufferToMain(AT91PS_DataFlash pDataFlash, +					unsigned char BufferCommand, +					unsigned int dest) +{ +	int cmdsize; +	/* Test if the buffer command is correct */ +	if ((BufferCommand != DB_BUF1_PAGE_PGM) && +			(BufferCommand != DB_BUF1_PAGE_ERASE_PGM) && +			(BufferCommand != DB_BUF2_PAGE_PGM) && +			(BufferCommand != DB_BUF2_PAGE_ERASE_PGM)) +		return DATAFLASH_BAD_COMMAND; + +	/* no data to transmit or receive */ +	pDataFlash->pDataFlashDesc->tx_data_size = 0; + +	cmdsize = 4; +	if (pDataFlash->pDevice->pages_number >= 16384) +		cmdsize = 5; +	/* Send the command to the dataflash */ +	return (AT91F_DataFlashSendCommand(pDataFlash, BufferCommand, +						cmdsize, dest)); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_PartialPageWrite				     */ +/* Object              : Erase partielly a page				     */ +/* Input Parameters    : <page> = page number				     */ +/*			: <AdrInpage> = adr to begin the fading		     */ +/*                     : <length> = Number of bytes to erase		     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_PartialPageWrite(AT91PS_DataFlash pDataFlash, +					unsigned char *src, +					unsigned int dest, +					unsigned int size) +{ +	unsigned int page; +	unsigned int AdrInPage; + +	page = dest / (pDataFlash->pDevice->pages_size); +	AdrInPage = dest % (pDataFlash->pDevice->pages_size); + +	/* Read the contents of the page in the Sram Buffer */ +	AT91F_MainMemoryToBufferTransfert(pDataFlash, DB_PAGE_2_BUF1_TRF, page); +	AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +				 AT91C_TIMEOUT_WRDY); +	/*Update the SRAM buffer */ +	AT91F_DataFlashWriteBuffer(pDataFlash, DB_BUF1_WRITE, src, +					AdrInPage, size); + +	AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					AT91C_TIMEOUT_WRDY); + +	/* Erase page if a 128 Mbits device */ +	if (pDataFlash->pDevice->pages_number >= 16384) { +		AT91F_PageErase(pDataFlash, page); +		/* Rewrite the modified Sram Buffer in the main memory */ +		AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					 AT91C_TIMEOUT_WRDY); +	} + +	/* Rewrite the modified Sram Buffer in the main memory */ +	return (AT91F_WriteBufferToMain(pDataFlash, DB_BUF1_PAGE_ERASE_PGM, +					(page * +					 pDataFlash->pDevice->pages_size))); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_DataFlashWrite				     */ +/* Object              :						     */ +/* Input Parameters    : <*src> = Source buffer				     */ +/*                     : <dest> = dataflash adress			     */ +/*                     : <size> = data buffer size			     */ +/*---------------------------------------------------------------------------*/ +AT91S_DataFlashStatus AT91F_DataFlashWrite(AT91PS_DataFlash pDataFlash, +						unsigned char *src, +						int dest, int size) +{ +	unsigned int length; +	unsigned int page; +	unsigned int status; + +	AT91F_SpiEnable(pDataFlash->pDevice->cs); + +	if ((dest + size) > (pDataFlash->pDevice->pages_size * +			(pDataFlash->pDevice->pages_number))) +		return DATAFLASH_MEMORY_OVERFLOW; + +	/* If destination does not fit a page start address */ +	if ((dest % ((unsigned int)(pDataFlash->pDevice->pages_size))) != 0) { +		length = +			pDataFlash->pDevice->pages_size - +			(dest % ((unsigned int)(pDataFlash->pDevice->pages_size))); + +		if (size < length) +			length = size; + +		if (!AT91F_PartialPageWrite(pDataFlash, src, dest, length)) +			return DATAFLASH_ERROR; + +		AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					 AT91C_TIMEOUT_WRDY); + +		/* Update size, source and destination pointers */ +		size -= length; +		dest += length; +		src += length; +	} + +	while ((size - pDataFlash->pDevice->pages_size) >= 0) { +		/* program dataflash page */ +		page = (unsigned int)dest / (pDataFlash->pDevice->pages_size); + +		status = AT91F_DataFlashWriteBuffer(pDataFlash, +					DB_BUF1_WRITE, src, 0, +					pDataFlash->pDevice-> +					pages_size); +		AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					 AT91C_TIMEOUT_WRDY); + +		status = AT91F_PageErase(pDataFlash, page); +		AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					 AT91C_TIMEOUT_WRDY); +		if (!status) +			return DATAFLASH_ERROR; + +		status = AT91F_WriteBufferToMain(pDataFlash, +					 DB_BUF1_PAGE_PGM, dest); +		if (!status) +			return DATAFLASH_ERROR; + +		AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					 AT91C_TIMEOUT_WRDY); + +		/* Update size, source and destination pointers */ +		size -= pDataFlash->pDevice->pages_size; +		dest += pDataFlash->pDevice->pages_size; +		src += pDataFlash->pDevice->pages_size; +	} + +	/* If still some bytes to read */ +	if (size > 0) { +		/* program dataflash page */ +		if (!AT91F_PartialPageWrite(pDataFlash, src, dest, size)) +			return DATAFLASH_ERROR; + +		AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					 AT91C_TIMEOUT_WRDY); +	} +	return DATAFLASH_OK; +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_DataFlashRead				     */ +/* Object              : Read a block in dataflash			     */ +/* Input Parameters    :						     */ +/* Return value		:						     */ +/*---------------------------------------------------------------------------*/ +int AT91F_DataFlashRead(AT91PS_DataFlash pDataFlash, +			unsigned long addr, unsigned long size, char *buffer) +{ +	unsigned long SizeToRead; + +	AT91F_SpiEnable(pDataFlash->pDevice->cs); + +	if (AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					AT91C_TIMEOUT_WRDY) != DATAFLASH_OK) +		return -1; + +	while (size) { +		SizeToRead = (size < 0x8000) ? size : 0x8000; + +		if (AT91F_DataFlashWaitReady(pDataFlash->pDataFlashDesc, +					AT91C_TIMEOUT_WRDY) != +						DATAFLASH_OK) +			return -1; + +		if (AT91F_DataFlashContinuousRead(pDataFlash, addr, +						(uchar *) buffer, +						SizeToRead) != DATAFLASH_OK) +			return -1; + +		size -= SizeToRead; +		addr += SizeToRead; +		buffer += SizeToRead; +	} + +	return DATAFLASH_OK; +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_DataflashProbe				     */ +/* Object              :						     */ +/* Input Parameters    :						     */ +/* Return value	       : Dataflash status register			     */ +/*---------------------------------------------------------------------------*/ +int AT91F_DataflashProbe(int cs, AT91PS_DataflashDesc pDesc) +{ +	AT91F_SpiEnable(cs); +	AT91F_DataFlashGetStatus(pDesc); +	return ((pDesc->command[1] == 0xFF) ? 0 : pDesc->command[1] & 0x3C); +} diff --git a/roms/u-boot/drivers/mtd/cfi_flash.c b/roms/u-boot/drivers/mtd/cfi_flash.c new file mode 100644 index 00000000..a389cd10 --- /dev/null +++ b/roms/u-boot/drivers/mtd/cfi_flash.c @@ -0,0 +1,2418 @@ +/* + * (C) Copyright 2002-2004 + * Brad Kemp, Seranoa Networks, Brad.Kemp@seranoa.com + * + * Copyright (C) 2003 Arabella Software Ltd. + * Yuli Barcohen <yuli@arabellasw.com> + * + * Copyright (C) 2004 + * Ed Okerson + * + * Copyright (C) 2006 + * Tolunay Orkun <listmember@orkun.us> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +/* The DEBUG define must be before common to enable debugging */ +/* #define DEBUG	*/ + +#include <common.h> +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/byteorder.h> +#include <asm/unaligned.h> +#include <environment.h> +#include <mtd/cfi_flash.h> +#include <watchdog.h> + +/* + * This file implements a Common Flash Interface (CFI) driver for + * U-Boot. + * + * The width of the port and the width of the chips are determined at + * initialization.  These widths are used to calculate the address for + * access CFI data structures. + * + * References + * JEDEC Standard JESD68 - Common Flash Interface (CFI) + * JEDEC Standard JEP137-A Common Flash Interface (CFI) ID Codes + * Intel Application Note 646 Common Flash Interface (CFI) and Command Sets + * Intel 290667-008 3 Volt Intel StrataFlash Memory datasheet + * AMD CFI Specification, Release 2.0 December 1, 2001 + * AMD/Spansion Application Note: Migration from Single-byte to Three-byte + *   Device IDs, Publication Number 25538 Revision A, November 8, 2001 + * + * Define CONFIG_SYS_WRITE_SWAPPED_DATA, if you have to swap the Bytes between + * reading and writing ... (yes there is such a Hardware). + */ + +static uint flash_offset_cfi[2] = { FLASH_OFFSET_CFI, FLASH_OFFSET_CFI_ALT }; +#ifdef CONFIG_FLASH_CFI_MTD +static uint flash_verbose = 1; +#else +#define flash_verbose 1 +#endif + +flash_info_t flash_info[CFI_MAX_FLASH_BANKS];	/* FLASH chips info */ + +/* + * Check if chip width is defined. If not, start detecting with 8bit. + */ +#ifndef CONFIG_SYS_FLASH_CFI_WIDTH +#define CONFIG_SYS_FLASH_CFI_WIDTH	FLASH_CFI_8BIT +#endif + +/* + * 0xffff is an undefined value for the configuration register. When + * this value is returned, the configuration register shall not be + * written at all (default mode). + */ +static u16 cfi_flash_config_reg(int i) +{ +#ifdef CONFIG_SYS_CFI_FLASH_CONFIG_REGS +	return ((u16 [])CONFIG_SYS_CFI_FLASH_CONFIG_REGS)[i]; +#else +	return 0xffff; +#endif +} + +#if defined(CONFIG_SYS_MAX_FLASH_BANKS_DETECT) +int cfi_flash_num_flash_banks = CONFIG_SYS_MAX_FLASH_BANKS_DETECT; +#endif + +static phys_addr_t __cfi_flash_bank_addr(int i) +{ +	return ((phys_addr_t [])CONFIG_SYS_FLASH_BANKS_LIST)[i]; +} +phys_addr_t cfi_flash_bank_addr(int i) +	__attribute__((weak, alias("__cfi_flash_bank_addr"))); + +static unsigned long __cfi_flash_bank_size(int i) +{ +#ifdef CONFIG_SYS_FLASH_BANKS_SIZES +	return ((unsigned long [])CONFIG_SYS_FLASH_BANKS_SIZES)[i]; +#else +	return 0; +#endif +} +unsigned long cfi_flash_bank_size(int i) +	__attribute__((weak, alias("__cfi_flash_bank_size"))); + +static void __flash_write8(u8 value, void *addr) +{ +	__raw_writeb(value, addr); +} + +static void __flash_write16(u16 value, void *addr) +{ +	__raw_writew(value, addr); +} + +static void __flash_write32(u32 value, void *addr) +{ +	__raw_writel(value, addr); +} + +static void __flash_write64(u64 value, void *addr) +{ +	/* No architectures currently implement __raw_writeq() */ +	*(volatile u64 *)addr = value; +} + +static u8 __flash_read8(void *addr) +{ +	return __raw_readb(addr); +} + +static u16 __flash_read16(void *addr) +{ +	return __raw_readw(addr); +} + +static u32 __flash_read32(void *addr) +{ +	return __raw_readl(addr); +} + +static u64 __flash_read64(void *addr) +{ +	/* No architectures currently implement __raw_readq() */ +	return *(volatile u64 *)addr; +} + +#ifdef CONFIG_CFI_FLASH_USE_WEAK_ACCESSORS +void flash_write8(u8 value, void *addr)__attribute__((weak, alias("__flash_write8"))); +void flash_write16(u16 value, void *addr)__attribute__((weak, alias("__flash_write16"))); +void flash_write32(u32 value, void *addr)__attribute__((weak, alias("__flash_write32"))); +void flash_write64(u64 value, void *addr)__attribute__((weak, alias("__flash_write64"))); +u8 flash_read8(void *addr)__attribute__((weak, alias("__flash_read8"))); +u16 flash_read16(void *addr)__attribute__((weak, alias("__flash_read16"))); +u32 flash_read32(void *addr)__attribute__((weak, alias("__flash_read32"))); +u64 flash_read64(void *addr)__attribute__((weak, alias("__flash_read64"))); +#else +#define flash_write8	__flash_write8 +#define flash_write16	__flash_write16 +#define flash_write32	__flash_write32 +#define flash_write64	__flash_write64 +#define flash_read8	__flash_read8 +#define flash_read16	__flash_read16 +#define flash_read32	__flash_read32 +#define flash_read64	__flash_read64 +#endif + +/*----------------------------------------------------------------------- + */ +#if defined(CONFIG_ENV_IS_IN_FLASH) || defined(CONFIG_ENV_ADDR_REDUND) || (CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE) +flash_info_t *flash_get_info(ulong base) +{ +	int i; +	flash_info_t *info; + +	for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) { +		info = &flash_info[i]; +		if (info->size && info->start[0] <= base && +		    base <= info->start[0] + info->size - 1) +			return info; +	} + +	return NULL; +} +#endif + +unsigned long flash_sector_size(flash_info_t *info, flash_sect_t sect) +{ +	if (sect != (info->sector_count - 1)) +		return info->start[sect + 1] - info->start[sect]; +	else +		return info->start[0] + info->size - info->start[sect]; +} + +/*----------------------------------------------------------------------- + * create an address based on the offset and the port width + */ +static inline void * +flash_map (flash_info_t * info, flash_sect_t sect, uint offset) +{ +	unsigned int byte_offset = offset * info->portwidth; + +	return (void *)(info->start[sect] + byte_offset); +} + +static inline void flash_unmap(flash_info_t *info, flash_sect_t sect, +		unsigned int offset, void *addr) +{ +} + +/*----------------------------------------------------------------------- + * make a proper sized command based on the port and chip widths + */ +static void flash_make_cmd(flash_info_t *info, u32 cmd, void *cmdbuf) +{ +	int i; +	int cword_offset; +	int cp_offset; +#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +	u32 cmd_le = cpu_to_le32(cmd); +#endif +	uchar val; +	uchar *cp = (uchar *) cmdbuf; + +	for (i = info->portwidth; i > 0; i--){ +		cword_offset = (info->portwidth-i)%info->chipwidth; +#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +		cp_offset = info->portwidth - i; +		val = *((uchar*)&cmd_le + cword_offset); +#else +		cp_offset = i - 1; +		val = *((uchar*)&cmd + sizeof(u32) - cword_offset - 1); +#endif +		cp[cp_offset] = (cword_offset >= sizeof(u32)) ? 0x00 : val; +	} +} + +#ifdef DEBUG +/*----------------------------------------------------------------------- + * Debug support + */ +static void print_longlong (char *str, unsigned long long data) +{ +	int i; +	char *cp; + +	cp = (char *) &data; +	for (i = 0; i < 8; i++) +		sprintf (&str[i * 2], "%2.2x", *cp++); +} + +static void flash_printqry (struct cfi_qry *qry) +{ +	u8 *p = (u8 *)qry; +	int x, y; + +	for (x = 0; x < sizeof(struct cfi_qry); x += 16) { +		debug("%02x : ", x); +		for (y = 0; y < 16; y++) +			debug("%2.2x ", p[x + y]); +		debug(" "); +		for (y = 0; y < 16; y++) { +			unsigned char c = p[x + y]; +			if (c >= 0x20 && c <= 0x7e) +				debug("%c", c); +			else +				debug("."); +		} +		debug("\n"); +	} +} +#endif + + +/*----------------------------------------------------------------------- + * read a character at a port width address + */ +static inline uchar flash_read_uchar (flash_info_t * info, uint offset) +{ +	uchar *cp; +	uchar retval; + +	cp = flash_map (info, 0, offset); +#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +	retval = flash_read8(cp); +#else +	retval = flash_read8(cp + info->portwidth - 1); +#endif +	flash_unmap (info, 0, offset, cp); +	return retval; +} + +/*----------------------------------------------------------------------- + * read a word at a port width address, assume 16bit bus + */ +static inline ushort flash_read_word (flash_info_t * info, uint offset) +{ +	ushort *addr, retval; + +	addr = flash_map (info, 0, offset); +	retval = flash_read16 (addr); +	flash_unmap (info, 0, offset, addr); +	return retval; +} + + +/*----------------------------------------------------------------------- + * read a long word by picking the least significant byte of each maximum + * port size word. Swap for ppc format. + */ +static ulong flash_read_long (flash_info_t * info, flash_sect_t sect, +			      uint offset) +{ +	uchar *addr; +	ulong retval; + +#ifdef DEBUG +	int x; +#endif +	addr = flash_map (info, sect, offset); + +#ifdef DEBUG +	debug ("long addr is at %p info->portwidth = %d\n", addr, +	       info->portwidth); +	for (x = 0; x < 4 * info->portwidth; x++) { +		debug ("addr[%x] = 0x%x\n", x, flash_read8(addr + x)); +	} +#endif +#if defined(__LITTLE_ENDIAN) || defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +	retval = ((flash_read8(addr) << 16) | +		  (flash_read8(addr + info->portwidth) << 24) | +		  (flash_read8(addr + 2 * info->portwidth)) | +		  (flash_read8(addr + 3 * info->portwidth) << 8)); +#else +	retval = ((flash_read8(addr + 2 * info->portwidth - 1) << 24) | +		  (flash_read8(addr + info->portwidth - 1) << 16) | +		  (flash_read8(addr + 4 * info->portwidth - 1) << 8) | +		  (flash_read8(addr + 3 * info->portwidth - 1))); +#endif +	flash_unmap(info, sect, offset, addr); + +	return retval; +} + +/* + * Write a proper sized command to the correct address + */ +void flash_write_cmd (flash_info_t * info, flash_sect_t sect, +		      uint offset, u32 cmd) +{ + +	void *addr; +	cfiword_t cword; + +	addr = flash_map (info, sect, offset); +	flash_make_cmd (info, cmd, &cword); +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		debug ("fwc addr %p cmd %x %x 8bit x %d bit\n", addr, cmd, +		       cword.c, info->chipwidth << CFI_FLASH_SHIFT_WIDTH); +		flash_write8(cword.c, addr); +		break; +	case FLASH_CFI_16BIT: +		debug ("fwc addr %p cmd %x %4.4x 16bit x %d bit\n", addr, +		       cmd, cword.w, +		       info->chipwidth << CFI_FLASH_SHIFT_WIDTH); +		flash_write16(cword.w, addr); +		break; +	case FLASH_CFI_32BIT: +		debug ("fwc addr %p cmd %x %8.8lx 32bit x %d bit\n", addr, +		       cmd, cword.l, +		       info->chipwidth << CFI_FLASH_SHIFT_WIDTH); +		flash_write32(cword.l, addr); +		break; +	case FLASH_CFI_64BIT: +#ifdef DEBUG +		{ +			char str[20]; + +			print_longlong (str, cword.ll); + +			debug ("fwrite addr %p cmd %x %s 64 bit x %d bit\n", +			       addr, cmd, str, +			       info->chipwidth << CFI_FLASH_SHIFT_WIDTH); +		} +#endif +		flash_write64(cword.ll, addr); +		break; +	} + +	/* Ensure all the instructions are fully finished */ +	sync(); + +	flash_unmap(info, sect, offset, addr); +} + +static void flash_unlock_seq (flash_info_t * info, flash_sect_t sect) +{ +	flash_write_cmd (info, sect, info->addr_unlock1, AMD_CMD_UNLOCK_START); +	flash_write_cmd (info, sect, info->addr_unlock2, AMD_CMD_UNLOCK_ACK); +} + +/*----------------------------------------------------------------------- + */ +static int flash_isequal (flash_info_t * info, flash_sect_t sect, +			  uint offset, uchar cmd) +{ +	void *addr; +	cfiword_t cword; +	int retval; + +	addr = flash_map (info, sect, offset); +	flash_make_cmd (info, cmd, &cword); + +	debug ("is= cmd %x(%c) addr %p ", cmd, cmd, addr); +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		debug ("is= %x %x\n", flash_read8(addr), cword.c); +		retval = (flash_read8(addr) == cword.c); +		break; +	case FLASH_CFI_16BIT: +		debug ("is= %4.4x %4.4x\n", flash_read16(addr), cword.w); +		retval = (flash_read16(addr) == cword.w); +		break; +	case FLASH_CFI_32BIT: +		debug ("is= %8.8x %8.8lx\n", flash_read32(addr), cword.l); +		retval = (flash_read32(addr) == cword.l); +		break; +	case FLASH_CFI_64BIT: +#ifdef DEBUG +		{ +			char str1[20]; +			char str2[20]; + +			print_longlong (str1, flash_read64(addr)); +			print_longlong (str2, cword.ll); +			debug ("is= %s %s\n", str1, str2); +		} +#endif +		retval = (flash_read64(addr) == cword.ll); +		break; +	default: +		retval = 0; +		break; +	} +	flash_unmap(info, sect, offset, addr); + +	return retval; +} + +/*----------------------------------------------------------------------- + */ +static int flash_isset (flash_info_t * info, flash_sect_t sect, +			uint offset, uchar cmd) +{ +	void *addr; +	cfiword_t cword; +	int retval; + +	addr = flash_map (info, sect, offset); +	flash_make_cmd (info, cmd, &cword); +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		retval = ((flash_read8(addr) & cword.c) == cword.c); +		break; +	case FLASH_CFI_16BIT: +		retval = ((flash_read16(addr) & cword.w) == cword.w); +		break; +	case FLASH_CFI_32BIT: +		retval = ((flash_read32(addr) & cword.l) == cword.l); +		break; +	case FLASH_CFI_64BIT: +		retval = ((flash_read64(addr) & cword.ll) == cword.ll); +		break; +	default: +		retval = 0; +		break; +	} +	flash_unmap(info, sect, offset, addr); + +	return retval; +} + +/*----------------------------------------------------------------------- + */ +static int flash_toggle (flash_info_t * info, flash_sect_t sect, +			 uint offset, uchar cmd) +{ +	void *addr; +	cfiword_t cword; +	int retval; + +	addr = flash_map (info, sect, offset); +	flash_make_cmd (info, cmd, &cword); +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		retval = flash_read8(addr) != flash_read8(addr); +		break; +	case FLASH_CFI_16BIT: +		retval = flash_read16(addr) != flash_read16(addr); +		break; +	case FLASH_CFI_32BIT: +		retval = flash_read32(addr) != flash_read32(addr); +		break; +	case FLASH_CFI_64BIT: +		retval = ( (flash_read32( addr ) != flash_read32( addr )) || +			   (flash_read32(addr+4) != flash_read32(addr+4)) ); +		break; +	default: +		retval = 0; +		break; +	} +	flash_unmap(info, sect, offset, addr); + +	return retval; +} + +/* + * flash_is_busy - check to see if the flash is busy + * + * This routine checks the status of the chip and returns true if the + * chip is busy. + */ +static int flash_is_busy (flash_info_t * info, flash_sect_t sect) +{ +	int retval; + +	switch (info->vendor) { +	case CFI_CMDSET_INTEL_PROG_REGIONS: +	case CFI_CMDSET_INTEL_STANDARD: +	case CFI_CMDSET_INTEL_EXTENDED: +		retval = !flash_isset (info, sect, 0, FLASH_STATUS_DONE); +		break; +	case CFI_CMDSET_AMD_STANDARD: +	case CFI_CMDSET_AMD_EXTENDED: +#ifdef CONFIG_FLASH_CFI_LEGACY +	case CFI_CMDSET_AMD_LEGACY: +#endif +		retval = flash_toggle (info, sect, 0, AMD_STATUS_TOGGLE); +		break; +	default: +		retval = 0; +	} +	debug ("flash_is_busy: %d\n", retval); +	return retval; +} + +/*----------------------------------------------------------------------- + *  wait for XSR.7 to be set. Time out with an error if it does not. + *  This routine does not set the flash to read-array mode. + */ +static int flash_status_check (flash_info_t * info, flash_sect_t sector, +			       ulong tout, char *prompt) +{ +	ulong start; + +#if CONFIG_SYS_HZ != 1000 +	if ((ulong)CONFIG_SYS_HZ > 100000) +		tout *= (ulong)CONFIG_SYS_HZ / 1000;  /* for a big HZ, avoid overflow */ +	else +		tout = DIV_ROUND_UP(tout * (ulong)CONFIG_SYS_HZ, 1000); +#endif + +	/* Wait for command completion */ +#ifdef CONFIG_SYS_LOW_RES_TIMER +	reset_timer(); +#endif +	start = get_timer (0); +	WATCHDOG_RESET(); +	while (flash_is_busy (info, sector)) { +		if (get_timer (start) > tout) { +			printf ("Flash %s timeout at address %lx data %lx\n", +				prompt, info->start[sector], +				flash_read_long (info, sector, 0)); +			flash_write_cmd (info, sector, 0, info->cmd_reset); +			udelay(1); +			return ERR_TIMOUT; +		} +		udelay (1);		/* also triggers watchdog */ +	} +	return ERR_OK; +} + +/*----------------------------------------------------------------------- + * Wait for XSR.7 to be set, if it times out print an error, otherwise + * do a full status check. + * + * This routine sets the flash to read-array mode. + */ +static int flash_full_status_check (flash_info_t * info, flash_sect_t sector, +				    ulong tout, char *prompt) +{ +	int retcode; + +	retcode = flash_status_check (info, sector, tout, prompt); +	switch (info->vendor) { +	case CFI_CMDSET_INTEL_PROG_REGIONS: +	case CFI_CMDSET_INTEL_EXTENDED: +	case CFI_CMDSET_INTEL_STANDARD: +		if ((retcode != ERR_OK) +		    && !flash_isequal (info, sector, 0, FLASH_STATUS_DONE)) { +			retcode = ERR_INVAL; +			printf ("Flash %s error at address %lx\n", prompt, +				info->start[sector]); +			if (flash_isset (info, sector, 0, FLASH_STATUS_ECLBS | +					 FLASH_STATUS_PSLBS)) { +				puts ("Command Sequence Error.\n"); +			} else if (flash_isset (info, sector, 0, +						FLASH_STATUS_ECLBS)) { +				puts ("Block Erase Error.\n"); +				retcode = ERR_NOT_ERASED; +			} else if (flash_isset (info, sector, 0, +						FLASH_STATUS_PSLBS)) { +				puts ("Locking Error\n"); +			} +			if (flash_isset (info, sector, 0, FLASH_STATUS_DPS)) { +				puts ("Block locked.\n"); +				retcode = ERR_PROTECTED; +			} +			if (flash_isset (info, sector, 0, FLASH_STATUS_VPENS)) +				puts ("Vpp Low Error.\n"); +		} +		flash_write_cmd (info, sector, 0, info->cmd_reset); +		udelay(1); +		break; +	default: +		break; +	} +	return retcode; +} + +static int use_flash_status_poll(flash_info_t *info) +{ +#ifdef CONFIG_SYS_CFI_FLASH_STATUS_POLL +	if (info->vendor == CFI_CMDSET_AMD_EXTENDED || +	    info->vendor == CFI_CMDSET_AMD_STANDARD) +		return 1; +#endif +	return 0; +} + +static int flash_status_poll(flash_info_t *info, void *src, void *dst, +			     ulong tout, char *prompt) +{ +#ifdef CONFIG_SYS_CFI_FLASH_STATUS_POLL +	ulong start; +	int ready; + +#if CONFIG_SYS_HZ != 1000 +	if ((ulong)CONFIG_SYS_HZ > 100000) +		tout *= (ulong)CONFIG_SYS_HZ / 1000;  /* for a big HZ, avoid overflow */ +	else +		tout = DIV_ROUND_UP(tout * (ulong)CONFIG_SYS_HZ, 1000); +#endif + +	/* Wait for command completion */ +#ifdef CONFIG_SYS_LOW_RES_TIMER +	reset_timer(); +#endif +	start = get_timer(0); +	WATCHDOG_RESET(); +	while (1) { +		switch (info->portwidth) { +		case FLASH_CFI_8BIT: +			ready = flash_read8(dst) == flash_read8(src); +			break; +		case FLASH_CFI_16BIT: +			ready = flash_read16(dst) == flash_read16(src); +			break; +		case FLASH_CFI_32BIT: +			ready = flash_read32(dst) == flash_read32(src); +			break; +		case FLASH_CFI_64BIT: +			ready = flash_read64(dst) == flash_read64(src); +			break; +		default: +			ready = 0; +			break; +		} +		if (ready) +			break; +		if (get_timer(start) > tout) { +			printf("Flash %s timeout at address %lx data %lx\n", +			       prompt, (ulong)dst, (ulong)flash_read8(dst)); +			return ERR_TIMOUT; +		} +		udelay(1);		/* also triggers watchdog */ +	} +#endif /* CONFIG_SYS_CFI_FLASH_STATUS_POLL */ +	return ERR_OK; +} + +/*----------------------------------------------------------------------- + */ +static void flash_add_byte (flash_info_t * info, cfiword_t * cword, uchar c) +{ +#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +	unsigned short	w; +	unsigned int	l; +	unsigned long long ll; +#endif + +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		cword->c = c; +		break; +	case FLASH_CFI_16BIT: +#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +		w = c; +		w <<= 8; +		cword->w = (cword->w >> 8) | w; +#else +		cword->w = (cword->w << 8) | c; +#endif +		break; +	case FLASH_CFI_32BIT: +#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +		l = c; +		l <<= 24; +		cword->l = (cword->l >> 8) | l; +#else +		cword->l = (cword->l << 8) | c; +#endif +		break; +	case FLASH_CFI_64BIT: +#if defined(__LITTLE_ENDIAN) && !defined(CONFIG_SYS_WRITE_SWAPPED_DATA) +		ll = c; +		ll <<= 56; +		cword->ll = (cword->ll >> 8) | ll; +#else +		cword->ll = (cword->ll << 8) | c; +#endif +		break; +	} +} + +/* + * Loop through the sector table starting from the previously found sector. + * Searches forwards or backwards, dependent on the passed address. + */ +static flash_sect_t find_sector (flash_info_t * info, ulong addr) +{ +	static flash_sect_t saved_sector; /* previously found sector */ +	static flash_info_t *saved_info; /* previously used flash bank */ +	flash_sect_t sector = saved_sector; + +	if ((info != saved_info) || (sector >= info->sector_count)) +		sector = 0; + +	while ((info->start[sector] < addr) +			&& (sector < info->sector_count - 1)) +		sector++; +	while ((info->start[sector] > addr) && (sector > 0)) +		/* +		 * also decrements the sector in case of an overshot +		 * in the first loop +		 */ +		sector--; + +	saved_sector = sector; +	saved_info = info; +	return sector; +} + +/*----------------------------------------------------------------------- + */ +static int flash_write_cfiword (flash_info_t * info, ulong dest, +				cfiword_t cword) +{ +	void *dstaddr = (void *)dest; +	int flag; +	flash_sect_t sect = 0; +	char sect_found = 0; + +	/* Check if Flash is (sufficiently) erased */ +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		flag = ((flash_read8(dstaddr) & cword.c) == cword.c); +		break; +	case FLASH_CFI_16BIT: +		flag = ((flash_read16(dstaddr) & cword.w) == cword.w); +		break; +	case FLASH_CFI_32BIT: +		flag = ((flash_read32(dstaddr) & cword.l) == cword.l); +		break; +	case FLASH_CFI_64BIT: +		flag = ((flash_read64(dstaddr) & cword.ll) == cword.ll); +		break; +	default: +		flag = 0; +		break; +	} +	if (!flag) +		return ERR_NOT_ERASED; + +	/* Disable interrupts which might cause a timeout here */ +	flag = disable_interrupts (); + +	switch (info->vendor) { +	case CFI_CMDSET_INTEL_PROG_REGIONS: +	case CFI_CMDSET_INTEL_EXTENDED: +	case CFI_CMDSET_INTEL_STANDARD: +		flash_write_cmd (info, 0, 0, FLASH_CMD_CLEAR_STATUS); +		flash_write_cmd (info, 0, 0, FLASH_CMD_WRITE); +		break; +	case CFI_CMDSET_AMD_EXTENDED: +	case CFI_CMDSET_AMD_STANDARD: +		sect = find_sector(info, dest); +		flash_unlock_seq (info, sect); +		flash_write_cmd (info, sect, info->addr_unlock1, AMD_CMD_WRITE); +		sect_found = 1; +		break; +#ifdef CONFIG_FLASH_CFI_LEGACY +	case CFI_CMDSET_AMD_LEGACY: +		sect = find_sector(info, dest); +		flash_unlock_seq (info, 0); +		flash_write_cmd (info, 0, info->addr_unlock1, AMD_CMD_WRITE); +		sect_found = 1; +		break; +#endif +	} + +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		flash_write8(cword.c, dstaddr); +		break; +	case FLASH_CFI_16BIT: +		flash_write16(cword.w, dstaddr); +		break; +	case FLASH_CFI_32BIT: +		flash_write32(cword.l, dstaddr); +		break; +	case FLASH_CFI_64BIT: +		flash_write64(cword.ll, dstaddr); +		break; +	} + +	/* re-enable interrupts if necessary */ +	if (flag) +		enable_interrupts (); + +	if (!sect_found) +		sect = find_sector (info, dest); + +	if (use_flash_status_poll(info)) +		return flash_status_poll(info, &cword, dstaddr, +					 info->write_tout, "write"); +	else +		return flash_full_status_check(info, sect, +					       info->write_tout, "write"); +} + +#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE + +static int flash_write_cfibuffer (flash_info_t * info, ulong dest, uchar * cp, +				  int len) +{ +	flash_sect_t sector; +	int cnt; +	int retcode; +	void *src = cp; +	void *dst = (void *)dest; +	void *dst2 = dst; +	int flag = 1; +	uint offset = 0; +	unsigned int shift; +	uchar write_cmd; + +	switch (info->portwidth) { +	case FLASH_CFI_8BIT: +		shift = 0; +		break; +	case FLASH_CFI_16BIT: +		shift = 1; +		break; +	case FLASH_CFI_32BIT: +		shift = 2; +		break; +	case FLASH_CFI_64BIT: +		shift = 3; +		break; +	default: +		retcode = ERR_INVAL; +		goto out_unmap; +	} + +	cnt = len >> shift; + +	while ((cnt-- > 0) && (flag == 1)) { +		switch (info->portwidth) { +		case FLASH_CFI_8BIT: +			flag = ((flash_read8(dst2) & flash_read8(src)) == +				flash_read8(src)); +			src += 1, dst2 += 1; +			break; +		case FLASH_CFI_16BIT: +			flag = ((flash_read16(dst2) & flash_read16(src)) == +				flash_read16(src)); +			src += 2, dst2 += 2; +			break; +		case FLASH_CFI_32BIT: +			flag = ((flash_read32(dst2) & flash_read32(src)) == +				flash_read32(src)); +			src += 4, dst2 += 4; +			break; +		case FLASH_CFI_64BIT: +			flag = ((flash_read64(dst2) & flash_read64(src)) == +				flash_read64(src)); +			src += 8, dst2 += 8; +			break; +		} +	} +	if (!flag) { +		retcode = ERR_NOT_ERASED; +		goto out_unmap; +	} + +	src = cp; +	sector = find_sector (info, dest); + +	switch (info->vendor) { +	case CFI_CMDSET_INTEL_PROG_REGIONS: +	case CFI_CMDSET_INTEL_STANDARD: +	case CFI_CMDSET_INTEL_EXTENDED: +		write_cmd = (info->vendor == CFI_CMDSET_INTEL_PROG_REGIONS) ? +					FLASH_CMD_WRITE_BUFFER_PROG : FLASH_CMD_WRITE_TO_BUFFER; +		flash_write_cmd (info, sector, 0, FLASH_CMD_CLEAR_STATUS); +		flash_write_cmd (info, sector, 0, FLASH_CMD_READ_STATUS); +		flash_write_cmd (info, sector, 0, write_cmd); +		retcode = flash_status_check (info, sector, +					      info->buffer_write_tout, +					      "write to buffer"); +		if (retcode == ERR_OK) { +			/* reduce the number of loops by the width of +			 * the port */ +			cnt = len >> shift; +			flash_write_cmd (info, sector, 0, cnt - 1); +			while (cnt-- > 0) { +				switch (info->portwidth) { +				case FLASH_CFI_8BIT: +					flash_write8(flash_read8(src), dst); +					src += 1, dst += 1; +					break; +				case FLASH_CFI_16BIT: +					flash_write16(flash_read16(src), dst); +					src += 2, dst += 2; +					break; +				case FLASH_CFI_32BIT: +					flash_write32(flash_read32(src), dst); +					src += 4, dst += 4; +					break; +				case FLASH_CFI_64BIT: +					flash_write64(flash_read64(src), dst); +					src += 8, dst += 8; +					break; +				default: +					retcode = ERR_INVAL; +					goto out_unmap; +				} +			} +			flash_write_cmd (info, sector, 0, +					 FLASH_CMD_WRITE_BUFFER_CONFIRM); +			retcode = flash_full_status_check ( +				info, sector, info->buffer_write_tout, +				"buffer write"); +		} + +		break; + +	case CFI_CMDSET_AMD_STANDARD: +	case CFI_CMDSET_AMD_EXTENDED: +		flash_unlock_seq(info,0); + +#ifdef CONFIG_FLASH_SPANSION_S29WS_N +		offset = ((unsigned long)dst - info->start[sector]) >> shift; +#endif +		flash_write_cmd(info, sector, offset, AMD_CMD_WRITE_TO_BUFFER); +		cnt = len >> shift; +		flash_write_cmd(info, sector, offset, cnt - 1); + +		switch (info->portwidth) { +		case FLASH_CFI_8BIT: +			while (cnt-- > 0) { +				flash_write8(flash_read8(src), dst); +				src += 1, dst += 1; +			} +			break; +		case FLASH_CFI_16BIT: +			while (cnt-- > 0) { +				flash_write16(flash_read16(src), dst); +				src += 2, dst += 2; +			} +			break; +		case FLASH_CFI_32BIT: +			while (cnt-- > 0) { +				flash_write32(flash_read32(src), dst); +				src += 4, dst += 4; +			} +			break; +		case FLASH_CFI_64BIT: +			while (cnt-- > 0) { +				flash_write64(flash_read64(src), dst); +				src += 8, dst += 8; +			} +			break; +		default: +			retcode = ERR_INVAL; +			goto out_unmap; +		} + +		flash_write_cmd (info, sector, 0, AMD_CMD_WRITE_BUFFER_CONFIRM); +		if (use_flash_status_poll(info)) +			retcode = flash_status_poll(info, src - (1 << shift), +						    dst - (1 << shift), +						    info->buffer_write_tout, +						    "buffer write"); +		else +			retcode = flash_full_status_check(info, sector, +							  info->buffer_write_tout, +							  "buffer write"); +		break; + +	default: +		debug ("Unknown Command Set\n"); +		retcode = ERR_INVAL; +		break; +	} + +out_unmap: +	return retcode; +} +#endif /* CONFIG_SYS_FLASH_USE_BUFFER_WRITE */ + + +/*----------------------------------------------------------------------- + */ +int flash_erase (flash_info_t * info, int s_first, int s_last) +{ +	int rcode = 0; +	int prot; +	flash_sect_t sect; +	int st; + +	if (info->flash_id != FLASH_MAN_CFI) { +		puts ("Can't erase unknown flash type - aborted\n"); +		return 1; +	} +	if ((s_first < 0) || (s_first > s_last)) { +		puts ("- no sectors to erase\n"); +		return 1; +	} + +	prot = 0; +	for (sect = s_first; sect <= s_last; ++sect) { +		if (info->protect[sect]) { +			prot++; +		} +	} +	if (prot) { +		printf ("- Warning: %d protected sectors will not be erased!\n", +			prot); +	} else if (flash_verbose) { +		putc ('\n'); +	} + + +	for (sect = s_first; sect <= s_last; sect++) { +		if (ctrlc()) { +			printf("\n"); +			return 1; +		} + +		if (info->protect[sect] == 0) { /* not protected */ +#ifdef CONFIG_SYS_FLASH_CHECK_BLANK_BEFORE_ERASE +			int k; +			int size; +			int erased; +			u32 *flash; + +			/* +			 * Check if whole sector is erased +			 */ +			size = flash_sector_size(info, sect); +			erased = 1; +			flash = (u32 *)info->start[sect]; +			/* divide by 4 for longword access */ +			size = size >> 2; +			for (k = 0; k < size; k++) { +				if (flash_read32(flash++) != 0xffffffff) { +					erased = 0; +					break; +				} +			} +			if (erased) { +				if (flash_verbose) +					putc(','); +				continue; +			} +#endif +			switch (info->vendor) { +			case CFI_CMDSET_INTEL_PROG_REGIONS: +			case CFI_CMDSET_INTEL_STANDARD: +			case CFI_CMDSET_INTEL_EXTENDED: +				flash_write_cmd (info, sect, 0, +						 FLASH_CMD_CLEAR_STATUS); +				flash_write_cmd (info, sect, 0, +						 FLASH_CMD_BLOCK_ERASE); +				flash_write_cmd (info, sect, 0, +						 FLASH_CMD_ERASE_CONFIRM); +				break; +			case CFI_CMDSET_AMD_STANDARD: +			case CFI_CMDSET_AMD_EXTENDED: +				flash_unlock_seq (info, sect); +				flash_write_cmd (info, sect, +						info->addr_unlock1, +						AMD_CMD_ERASE_START); +				flash_unlock_seq (info, sect); +				flash_write_cmd (info, sect, 0, +						 info->cmd_erase_sector); +				break; +#ifdef CONFIG_FLASH_CFI_LEGACY +			case CFI_CMDSET_AMD_LEGACY: +				flash_unlock_seq (info, 0); +				flash_write_cmd (info, 0, info->addr_unlock1, +						AMD_CMD_ERASE_START); +				flash_unlock_seq (info, 0); +				flash_write_cmd (info, sect, 0, +						AMD_CMD_ERASE_SECTOR); +				break; +#endif +			default: +				debug ("Unkown flash vendor %d\n", +				       info->vendor); +				break; +			} + +			if (use_flash_status_poll(info)) { +				cfiword_t cword; +				void *dest; +				cword.ll = 0xffffffffffffffffULL; +				dest = flash_map(info, sect, 0); +				st = flash_status_poll(info, &cword, dest, +						       info->erase_blk_tout, "erase"); +				flash_unmap(info, sect, 0, dest); +			} else +				st = flash_full_status_check(info, sect, +							     info->erase_blk_tout, +							     "erase"); +			if (st) +				rcode = 1; +			else if (flash_verbose) +				putc ('.'); +		} +	} + +	if (flash_verbose) +		puts (" done\n"); + +	return rcode; +} + +#ifdef CONFIG_SYS_FLASH_EMPTY_INFO +static int sector_erased(flash_info_t *info, int i) +{ +	int k; +	int size; +	u32 *flash; + +	/* +	 * Check if whole sector is erased +	 */ +	size = flash_sector_size(info, i); +	flash = (u32 *)info->start[i]; +	/* divide by 4 for longword access */ +	size = size >> 2; + +	for (k = 0; k < size; k++) { +		if (flash_read32(flash++) != 0xffffffff) +			return 0;	/* not erased */ +	} + +	return 1;			/* erased */ +} +#endif /* CONFIG_SYS_FLASH_EMPTY_INFO */ + +void flash_print_info (flash_info_t * info) +{ +	int i; + +	if (info->flash_id != FLASH_MAN_CFI) { +		puts ("missing or unknown FLASH type\n"); +		return; +	} + +	printf ("%s flash (%d x %d)", +		info->name, +		(info->portwidth << 3), (info->chipwidth << 3)); +	if (info->size < 1024*1024) +		printf ("  Size: %ld kB in %d Sectors\n", +			info->size >> 10, info->sector_count); +	else +		printf ("  Size: %ld MB in %d Sectors\n", +			info->size >> 20, info->sector_count); +	printf ("  "); +	switch (info->vendor) { +		case CFI_CMDSET_INTEL_PROG_REGIONS: +			printf ("Intel Prog Regions"); +			break; +		case CFI_CMDSET_INTEL_STANDARD: +			printf ("Intel Standard"); +			break; +		case CFI_CMDSET_INTEL_EXTENDED: +			printf ("Intel Extended"); +			break; +		case CFI_CMDSET_AMD_STANDARD: +			printf ("AMD Standard"); +			break; +		case CFI_CMDSET_AMD_EXTENDED: +			printf ("AMD Extended"); +			break; +#ifdef CONFIG_FLASH_CFI_LEGACY +		case CFI_CMDSET_AMD_LEGACY: +			printf ("AMD Legacy"); +			break; +#endif +		default: +			printf ("Unknown (%d)", info->vendor); +			break; +	} +	printf (" command set, Manufacturer ID: 0x%02X, Device ID: 0x", +		info->manufacturer_id); +	printf (info->chipwidth == FLASH_CFI_16BIT ? "%04X" : "%02X", +		info->device_id); +	if ((info->device_id & 0xff) == 0x7E) { +		printf(info->chipwidth == FLASH_CFI_16BIT ? "%04X" : "%02X", +		info->device_id2); +	} +	if ((info->vendor == CFI_CMDSET_AMD_STANDARD) && (info->legacy_unlock)) +		printf("\n  Advanced Sector Protection (PPB) enabled"); +	printf ("\n  Erase timeout: %ld ms, write timeout: %ld ms\n", +		info->erase_blk_tout, +		info->write_tout); +	if (info->buffer_size > 1) { +		printf ("  Buffer write timeout: %ld ms, " +			"buffer size: %d bytes\n", +		info->buffer_write_tout, +		info->buffer_size); +	} + +	puts ("\n  Sector Start Addresses:"); +	for (i = 0; i < info->sector_count; ++i) { +		if (ctrlc()) +			break; +		if ((i % 5) == 0) +			putc('\n'); +#ifdef CONFIG_SYS_FLASH_EMPTY_INFO +		/* print empty and read-only info */ +		printf ("  %08lX %c %s ", +			info->start[i], +			sector_erased(info, i) ? 'E' : ' ', +			info->protect[i] ? "RO" : "  "); +#else	/* ! CONFIG_SYS_FLASH_EMPTY_INFO */ +		printf ("  %08lX   %s ", +			info->start[i], +			info->protect[i] ? "RO" : "  "); +#endif +	} +	putc ('\n'); +	return; +} + +/*----------------------------------------------------------------------- + * This is used in a few places in write_buf() to show programming + * progress.  Making it a function is nasty because it needs to do side + * effect updates to digit and dots.  Repeated code is nasty too, so + * we define it once here. + */ +#ifdef CONFIG_FLASH_SHOW_PROGRESS +#define FLASH_SHOW_PROGRESS(scale, dots, digit, dots_sub) \ +	if (flash_verbose) { \ +		dots -= dots_sub; \ +		if ((scale > 0) && (dots <= 0)) { \ +			if ((digit % 5) == 0) \ +				printf ("%d", digit / 5); \ +			else \ +				putc ('.'); \ +			digit--; \ +			dots += scale; \ +		} \ +	} +#else +#define FLASH_SHOW_PROGRESS(scale, dots, digit, dots_sub) +#endif + +/*----------------------------------------------------------------------- + * Copy memory to flash, returns: + * 0 - OK + * 1 - write timeout + * 2 - Flash not erased + */ +int write_buff (flash_info_t * info, uchar * src, ulong addr, ulong cnt) +{ +	ulong wp; +	uchar *p; +	int aln; +	cfiword_t cword; +	int i, rc; +#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE +	int buffered_size; +#endif +#ifdef CONFIG_FLASH_SHOW_PROGRESS +	int digit = CONFIG_FLASH_SHOW_PROGRESS; +	int scale = 0; +	int dots  = 0; + +	/* +	 * Suppress if there are fewer than CONFIG_FLASH_SHOW_PROGRESS writes. +	 */ +	if (cnt >= CONFIG_FLASH_SHOW_PROGRESS) { +		scale = (int)((cnt + CONFIG_FLASH_SHOW_PROGRESS - 1) / +			CONFIG_FLASH_SHOW_PROGRESS); +	} +#endif + +	/* get lower aligned address */ +	wp = (addr & ~(info->portwidth - 1)); + +	/* handle unaligned start */ +	if ((aln = addr - wp) != 0) { +		cword.l = 0; +		p = (uchar *)wp; +		for (i = 0; i < aln; ++i) +			flash_add_byte (info, &cword, flash_read8(p + i)); + +		for (; (i < info->portwidth) && (cnt > 0); i++) { +			flash_add_byte (info, &cword, *src++); +			cnt--; +		} +		for (; (cnt == 0) && (i < info->portwidth); ++i) +			flash_add_byte (info, &cword, flash_read8(p + i)); + +		rc = flash_write_cfiword (info, wp, cword); +		if (rc != 0) +			return rc; + +		wp += i; +		FLASH_SHOW_PROGRESS(scale, dots, digit, i); +	} + +	/* handle the aligned part */ +#ifdef CONFIG_SYS_FLASH_USE_BUFFER_WRITE +	buffered_size = (info->portwidth / info->chipwidth); +	buffered_size *= info->buffer_size; +	while (cnt >= info->portwidth) { +		/* prohibit buffer write when buffer_size is 1 */ +		if (info->buffer_size == 1) { +			cword.l = 0; +			for (i = 0; i < info->portwidth; i++) +				flash_add_byte (info, &cword, *src++); +			if ((rc = flash_write_cfiword (info, wp, cword)) != 0) +				return rc; +			wp += info->portwidth; +			cnt -= info->portwidth; +			continue; +		} + +		/* write buffer until next buffered_size aligned boundary */ +		i = buffered_size - (wp % buffered_size); +		if (i > cnt) +			i = cnt; +		if ((rc = flash_write_cfibuffer (info, wp, src, i)) != ERR_OK) +			return rc; +		i -= i & (info->portwidth - 1); +		wp += i; +		src += i; +		cnt -= i; +		FLASH_SHOW_PROGRESS(scale, dots, digit, i); +		/* Only check every once in a while */ +		if ((cnt & 0xFFFF) < buffered_size && ctrlc()) +			return ERR_ABORTED; +	} +#else +	while (cnt >= info->portwidth) { +		cword.l = 0; +		for (i = 0; i < info->portwidth; i++) { +			flash_add_byte (info, &cword, *src++); +		} +		if ((rc = flash_write_cfiword (info, wp, cword)) != 0) +			return rc; +		wp += info->portwidth; +		cnt -= info->portwidth; +		FLASH_SHOW_PROGRESS(scale, dots, digit, info->portwidth); +		/* Only check every once in a while */ +		if ((cnt & 0xFFFF) < info->portwidth && ctrlc()) +			return ERR_ABORTED; +	} +#endif /* CONFIG_SYS_FLASH_USE_BUFFER_WRITE */ + +	if (cnt == 0) { +		return (0); +	} + +	/* +	 * handle unaligned tail bytes +	 */ +	cword.l = 0; +	p = (uchar *)wp; +	for (i = 0; (i < info->portwidth) && (cnt > 0); ++i) { +		flash_add_byte (info, &cword, *src++); +		--cnt; +	} +	for (; i < info->portwidth; ++i) +		flash_add_byte (info, &cword, flash_read8(p + i)); + +	return flash_write_cfiword (info, wp, cword); +} + +static inline int manufact_match(flash_info_t *info, u32 manu) +{ +	return info->manufacturer_id == ((manu & FLASH_VENDMASK) >> 16); +} + +/*----------------------------------------------------------------------- + */ +#ifdef CONFIG_SYS_FLASH_PROTECTION + +static int cfi_protect_bugfix(flash_info_t *info, long sector, int prot) +{ +	if (manufact_match(info, INTEL_MANUFACT) +	    && info->device_id == NUMONYX_256MBIT) { +		/* +		 * see errata called +		 * "Numonyx Axcell P33/P30 Specification Update" :) +		 */ +		flash_write_cmd(info, sector, 0, FLASH_CMD_READ_ID); +		if (!flash_isequal(info, sector, FLASH_OFFSET_PROTECT, +				   prot)) { +			/* +			 * cmd must come before FLASH_CMD_PROTECT + 20us +			 * Disable interrupts which might cause a timeout here. +			 */ +			int flag = disable_interrupts(); +			unsigned short cmd; + +			if (prot) +				cmd = FLASH_CMD_PROTECT_SET; +			else +				cmd = FLASH_CMD_PROTECT_CLEAR; +				flash_write_cmd(info, sector, 0, +					  FLASH_CMD_PROTECT); +			flash_write_cmd(info, sector, 0, cmd); +			/* re-enable interrupts if necessary */ +			if (flag) +				enable_interrupts(); +		} +		return 1; +	} +	return 0; +} + +int flash_real_protect (flash_info_t * info, long sector, int prot) +{ +	int retcode = 0; + +	switch (info->vendor) { +		case CFI_CMDSET_INTEL_PROG_REGIONS: +		case CFI_CMDSET_INTEL_STANDARD: +		case CFI_CMDSET_INTEL_EXTENDED: +			if (!cfi_protect_bugfix(info, sector, prot)) { +				flash_write_cmd(info, sector, 0, +					 FLASH_CMD_CLEAR_STATUS); +				flash_write_cmd(info, sector, 0, +					FLASH_CMD_PROTECT); +				if (prot) +					flash_write_cmd(info, sector, 0, +						FLASH_CMD_PROTECT_SET); +				else +					flash_write_cmd(info, sector, 0, +						FLASH_CMD_PROTECT_CLEAR); + +			} +			break; +		case CFI_CMDSET_AMD_EXTENDED: +		case CFI_CMDSET_AMD_STANDARD: +			/* U-Boot only checks the first byte */ +			if (manufact_match(info, ATM_MANUFACT)) { +				if (prot) { +					flash_unlock_seq (info, 0); +					flash_write_cmd (info, 0, +							info->addr_unlock1, +							ATM_CMD_SOFTLOCK_START); +					flash_unlock_seq (info, 0); +					flash_write_cmd (info, sector, 0, +							ATM_CMD_LOCK_SECT); +				} else { +					flash_write_cmd (info, 0, +							info->addr_unlock1, +							AMD_CMD_UNLOCK_START); +					if (info->device_id == ATM_ID_BV6416) +						flash_write_cmd (info, sector, +							0, ATM_CMD_UNLOCK_SECT); +				} +			} +			if (info->legacy_unlock) { +				int flag = disable_interrupts(); +				int lock_flag; + +				flash_unlock_seq(info, 0); +				flash_write_cmd(info, 0, info->addr_unlock1, +						AMD_CMD_SET_PPB_ENTRY); +				lock_flag = flash_isset(info, sector, 0, 0x01); +				if (prot) { +					if (lock_flag) { +						flash_write_cmd(info, sector, 0, +							AMD_CMD_PPB_LOCK_BC1); +						flash_write_cmd(info, sector, 0, +							AMD_CMD_PPB_LOCK_BC2); +					} +					debug("sector %ld %slocked\n", sector, +						lock_flag ? "" : "already "); +				} else { +					if (!lock_flag) { +						debug("unlock %ld\n", sector); +						flash_write_cmd(info, 0, 0, +							AMD_CMD_PPB_UNLOCK_BC1); +						flash_write_cmd(info, 0, 0, +							AMD_CMD_PPB_UNLOCK_BC2); +					} +					debug("sector %ld %sunlocked\n", sector, +						!lock_flag ? "" : "already "); +				} +				if (flag) +					enable_interrupts(); + +				if (flash_status_check(info, sector, +						info->erase_blk_tout, +						prot ? "protect" : "unprotect")) +					printf("status check error\n"); + +				flash_write_cmd(info, 0, 0, +						AMD_CMD_SET_PPB_EXIT_BC1); +				flash_write_cmd(info, 0, 0, +						AMD_CMD_SET_PPB_EXIT_BC2); +			} +			break; +#ifdef CONFIG_FLASH_CFI_LEGACY +		case CFI_CMDSET_AMD_LEGACY: +			flash_write_cmd (info, sector, 0, FLASH_CMD_CLEAR_STATUS); +			flash_write_cmd (info, sector, 0, FLASH_CMD_PROTECT); +			if (prot) +				flash_write_cmd (info, sector, 0, FLASH_CMD_PROTECT_SET); +			else +				flash_write_cmd (info, sector, 0, FLASH_CMD_PROTECT_CLEAR); +#endif +	}; + +	/* +	 * Flash needs to be in status register read mode for +	 * flash_full_status_check() to work correctly +	 */ +	flash_write_cmd(info, sector, 0, FLASH_CMD_READ_STATUS); +	if ((retcode = +	     flash_full_status_check (info, sector, info->erase_blk_tout, +				      prot ? "protect" : "unprotect")) == 0) { + +		info->protect[sector] = prot; + +		/* +		 * On some of Intel's flash chips (marked via legacy_unlock) +		 * unprotect unprotects all locking. +		 */ +		if ((prot == 0) && (info->legacy_unlock)) { +			flash_sect_t i; + +			for (i = 0; i < info->sector_count; i++) { +				if (info->protect[i]) +					flash_real_protect (info, i, 1); +			} +		} +	} +	return retcode; +} + +/*----------------------------------------------------------------------- + * flash_read_user_serial - read the OneTimeProgramming cells + */ +void flash_read_user_serial (flash_info_t * info, void *buffer, int offset, +			     int len) +{ +	uchar *src; +	uchar *dst; + +	dst = buffer; +	src = flash_map (info, 0, FLASH_OFFSET_USER_PROTECTION); +	flash_write_cmd (info, 0, 0, FLASH_CMD_READ_ID); +	memcpy (dst, src + offset, len); +	flash_write_cmd (info, 0, 0, info->cmd_reset); +	udelay(1); +	flash_unmap(info, 0, FLASH_OFFSET_USER_PROTECTION, src); +} + +/* + * flash_read_factory_serial - read the device Id from the protection area + */ +void flash_read_factory_serial (flash_info_t * info, void *buffer, int offset, +				int len) +{ +	uchar *src; + +	src = flash_map (info, 0, FLASH_OFFSET_INTEL_PROTECTION); +	flash_write_cmd (info, 0, 0, FLASH_CMD_READ_ID); +	memcpy (buffer, src + offset, len); +	flash_write_cmd (info, 0, 0, info->cmd_reset); +	udelay(1); +	flash_unmap(info, 0, FLASH_OFFSET_INTEL_PROTECTION, src); +} + +#endif /* CONFIG_SYS_FLASH_PROTECTION */ + +/*----------------------------------------------------------------------- + * Reverse the order of the erase regions in the CFI QRY structure. + * This is needed for chips that are either a) correctly detected as + * top-boot, or b) buggy. + */ +static void cfi_reverse_geometry(struct cfi_qry *qry) +{ +	unsigned int i, j; +	u32 tmp; + +	for (i = 0, j = qry->num_erase_regions - 1; i < j; i++, j--) { +		tmp = get_unaligned(&(qry->erase_region_info[i])); +		put_unaligned(get_unaligned(&(qry->erase_region_info[j])), +			      &(qry->erase_region_info[i])); +		put_unaligned(tmp, &(qry->erase_region_info[j])); +	} +} + +/*----------------------------------------------------------------------- + * read jedec ids from device and set corresponding fields in info struct + * + * Note: assume cfi->vendor, cfi->portwidth and cfi->chipwidth are correct + * + */ +static void cmdset_intel_read_jedec_ids(flash_info_t *info) +{ +	flash_write_cmd(info, 0, 0, FLASH_CMD_RESET); +	udelay(1); +	flash_write_cmd(info, 0, 0, FLASH_CMD_READ_ID); +	udelay(1000); /* some flash are slow to respond */ +	info->manufacturer_id = flash_read_uchar (info, +					FLASH_OFFSET_MANUFACTURER_ID); +	info->device_id = (info->chipwidth == FLASH_CFI_16BIT) ? +			flash_read_word (info, FLASH_OFFSET_DEVICE_ID) : +			flash_read_uchar (info, FLASH_OFFSET_DEVICE_ID); +	flash_write_cmd(info, 0, 0, FLASH_CMD_RESET); +} + +static int cmdset_intel_init(flash_info_t *info, struct cfi_qry *qry) +{ +	info->cmd_reset = FLASH_CMD_RESET; + +	cmdset_intel_read_jedec_ids(info); +	flash_write_cmd(info, 0, info->cfi_offset, FLASH_CMD_CFI); + +#ifdef CONFIG_SYS_FLASH_PROTECTION +	/* read legacy lock/unlock bit from intel flash */ +	if (info->ext_addr) { +		info->legacy_unlock = flash_read_uchar (info, +				info->ext_addr + 5) & 0x08; +	} +#endif + +	return 0; +} + +static void cmdset_amd_read_jedec_ids(flash_info_t *info) +{ +	ushort bankId = 0; +	uchar  manuId; + +	flash_write_cmd(info, 0, 0, AMD_CMD_RESET); +	flash_unlock_seq(info, 0); +	flash_write_cmd(info, 0, info->addr_unlock1, FLASH_CMD_READ_ID); +	udelay(1000); /* some flash are slow to respond */ + +	manuId = flash_read_uchar (info, FLASH_OFFSET_MANUFACTURER_ID); +	/* JEDEC JEP106Z specifies ID codes up to bank 7 */ +	while (manuId == FLASH_CONTINUATION_CODE && bankId < 0x800) { +		bankId += 0x100; +		manuId = flash_read_uchar (info, +			bankId | FLASH_OFFSET_MANUFACTURER_ID); +	} +	info->manufacturer_id = manuId; + +	switch (info->chipwidth){ +	case FLASH_CFI_8BIT: +		info->device_id = flash_read_uchar (info, +						FLASH_OFFSET_DEVICE_ID); +		if (info->device_id == 0x7E) { +			/* AMD 3-byte (expanded) device ids */ +			info->device_id2 = flash_read_uchar (info, +						FLASH_OFFSET_DEVICE_ID2); +			info->device_id2 <<= 8; +			info->device_id2 |= flash_read_uchar (info, +						FLASH_OFFSET_DEVICE_ID3); +		} +		break; +	case FLASH_CFI_16BIT: +		info->device_id = flash_read_word (info, +						FLASH_OFFSET_DEVICE_ID); +		if ((info->device_id & 0xff) == 0x7E) { +			/* AMD 3-byte (expanded) device ids */ +			info->device_id2 = flash_read_uchar (info, +						FLASH_OFFSET_DEVICE_ID2); +			info->device_id2 <<= 8; +			info->device_id2 |= flash_read_uchar (info, +						FLASH_OFFSET_DEVICE_ID3); +		} +		break; +	default: +		break; +	} +	flash_write_cmd(info, 0, 0, AMD_CMD_RESET); +	udelay(1); +} + +static int cmdset_amd_init(flash_info_t *info, struct cfi_qry *qry) +{ +	info->cmd_reset = AMD_CMD_RESET; +	info->cmd_erase_sector = AMD_CMD_ERASE_SECTOR; + +	cmdset_amd_read_jedec_ids(info); +	flash_write_cmd(info, 0, info->cfi_offset, FLASH_CMD_CFI); + +#ifdef CONFIG_SYS_FLASH_PROTECTION +	if (info->ext_addr) { +		/* read sector protect/unprotect scheme (at 0x49) */ +		if (flash_read_uchar(info, info->ext_addr + 9) == 0x8) +			info->legacy_unlock = 1; +	} +#endif + +	return 0; +} + +#ifdef CONFIG_FLASH_CFI_LEGACY +static void flash_read_jedec_ids (flash_info_t * info) +{ +	info->manufacturer_id = 0; +	info->device_id       = 0; +	info->device_id2      = 0; + +	switch (info->vendor) { +	case CFI_CMDSET_INTEL_PROG_REGIONS: +	case CFI_CMDSET_INTEL_STANDARD: +	case CFI_CMDSET_INTEL_EXTENDED: +		cmdset_intel_read_jedec_ids(info); +		break; +	case CFI_CMDSET_AMD_STANDARD: +	case CFI_CMDSET_AMD_EXTENDED: +		cmdset_amd_read_jedec_ids(info); +		break; +	default: +		break; +	} +} + +/*----------------------------------------------------------------------- + * Call board code to request info about non-CFI flash. + * board_flash_get_legacy needs to fill in at least: + * info->portwidth, info->chipwidth and info->interface for Jedec probing. + */ +static int flash_detect_legacy(phys_addr_t base, int banknum) +{ +	flash_info_t *info = &flash_info[banknum]; + +	if (board_flash_get_legacy(base, banknum, info)) { +		/* board code may have filled info completely. If not, we +		   use JEDEC ID probing. */ +		if (!info->vendor) { +			int modes[] = { +				CFI_CMDSET_AMD_STANDARD, +				CFI_CMDSET_INTEL_STANDARD +			}; +			int i; + +			for (i = 0; i < ARRAY_SIZE(modes); i++) { +				info->vendor = modes[i]; +				info->start[0] = +					(ulong)map_physmem(base, +							   info->portwidth, +							   MAP_NOCACHE); +				if (info->portwidth == FLASH_CFI_8BIT +					&& info->interface == FLASH_CFI_X8X16) { +					info->addr_unlock1 = 0x2AAA; +					info->addr_unlock2 = 0x5555; +				} else { +					info->addr_unlock1 = 0x5555; +					info->addr_unlock2 = 0x2AAA; +				} +				flash_read_jedec_ids(info); +				debug("JEDEC PROBE: ID %x %x %x\n", +						info->manufacturer_id, +						info->device_id, +						info->device_id2); +				if (jedec_flash_match(info, info->start[0])) +					break; +				else +					unmap_physmem((void *)info->start[0], +						      info->portwidth); +			} +		} + +		switch(info->vendor) { +		case CFI_CMDSET_INTEL_PROG_REGIONS: +		case CFI_CMDSET_INTEL_STANDARD: +		case CFI_CMDSET_INTEL_EXTENDED: +			info->cmd_reset = FLASH_CMD_RESET; +			break; +		case CFI_CMDSET_AMD_STANDARD: +		case CFI_CMDSET_AMD_EXTENDED: +		case CFI_CMDSET_AMD_LEGACY: +			info->cmd_reset = AMD_CMD_RESET; +			break; +		} +		info->flash_id = FLASH_MAN_CFI; +		return 1; +	} +	return 0; /* use CFI */ +} +#else +static inline int flash_detect_legacy(phys_addr_t base, int banknum) +{ +	return 0; /* use CFI */ +} +#endif + +/*----------------------------------------------------------------------- + * detect if flash is compatible with the Common Flash Interface (CFI) + * http://www.jedec.org/download/search/jesd68.pdf + */ +static void flash_read_cfi (flash_info_t *info, void *buf, +		unsigned int start, size_t len) +{ +	u8 *p = buf; +	unsigned int i; + +	for (i = 0; i < len; i++) +		p[i] = flash_read_uchar(info, start + i); +} + +static void __flash_cmd_reset(flash_info_t *info) +{ +	/* +	 * We do not yet know what kind of commandset to use, so we issue +	 * the reset command in both Intel and AMD variants, in the hope +	 * that AMD flash roms ignore the Intel command. +	 */ +	flash_write_cmd(info, 0, 0, AMD_CMD_RESET); +	udelay(1); +	flash_write_cmd(info, 0, 0, FLASH_CMD_RESET); +} +void flash_cmd_reset(flash_info_t *info) +	__attribute__((weak,alias("__flash_cmd_reset"))); + +static int __flash_detect_cfi (flash_info_t * info, struct cfi_qry *qry) +{ +	int cfi_offset; + +	/* Issue FLASH reset command */ +	flash_cmd_reset(info); + +	for (cfi_offset = 0; cfi_offset < ARRAY_SIZE(flash_offset_cfi); +	     cfi_offset++) { +		flash_write_cmd (info, 0, flash_offset_cfi[cfi_offset], +				 FLASH_CMD_CFI); +		if (flash_isequal (info, 0, FLASH_OFFSET_CFI_RESP, 'Q') +		    && flash_isequal (info, 0, FLASH_OFFSET_CFI_RESP + 1, 'R') +		    && flash_isequal (info, 0, FLASH_OFFSET_CFI_RESP + 2, 'Y')) { +			flash_read_cfi(info, qry, FLASH_OFFSET_CFI_RESP, +					sizeof(struct cfi_qry)); +			info->interface	= le16_to_cpu(qry->interface_desc); + +			info->cfi_offset = flash_offset_cfi[cfi_offset]; +			debug ("device interface is %d\n", +			       info->interface); +			debug ("found port %d chip %d ", +			       info->portwidth, info->chipwidth); +			debug ("port %d bits chip %d bits\n", +			       info->portwidth << CFI_FLASH_SHIFT_WIDTH, +			       info->chipwidth << CFI_FLASH_SHIFT_WIDTH); + +			/* calculate command offsets as in the Linux driver */ +			info->addr_unlock1 = 0x555; +			info->addr_unlock2 = 0x2aa; + +			/* +			 * modify the unlock address if we are +			 * in compatibility mode +			 */ +			if (	/* x8/x16 in x8 mode */ +				((info->chipwidth == FLASH_CFI_BY8) && +					(info->interface == FLASH_CFI_X8X16)) || +				/* x16/x32 in x16 mode */ +				((info->chipwidth == FLASH_CFI_BY16) && +					(info->interface == FLASH_CFI_X16X32))) +			{ +				info->addr_unlock1 = 0xaaa; +				info->addr_unlock2 = 0x555; +			} + +			info->name = "CFI conformant"; +			return 1; +		} +	} + +	return 0; +} + +static int flash_detect_cfi (flash_info_t * info, struct cfi_qry *qry) +{ +	debug ("flash detect cfi\n"); + +	for (info->portwidth = CONFIG_SYS_FLASH_CFI_WIDTH; +	     info->portwidth <= FLASH_CFI_64BIT; info->portwidth <<= 1) { +		for (info->chipwidth = FLASH_CFI_BY8; +		     info->chipwidth <= info->portwidth; +		     info->chipwidth <<= 1) +			if (__flash_detect_cfi(info, qry)) +				return 1; +	} +	debug ("not found\n"); +	return 0; +} + +/* + * Manufacturer-specific quirks. Add workarounds for geometry + * reversal, etc. here. + */ +static void flash_fixup_amd(flash_info_t *info, struct cfi_qry *qry) +{ +	/* check if flash geometry needs reversal */ +	if (qry->num_erase_regions > 1) { +		/* reverse geometry if top boot part */ +		if (info->cfi_version < 0x3131) { +			/* CFI < 1.1, try to guess from device id */ +			if ((info->device_id & 0x80) != 0) +				cfi_reverse_geometry(qry); +		} else if (flash_read_uchar(info, info->ext_addr + 0xf) == 3) { +			/* CFI >= 1.1, deduct from top/bottom flag */ +			/* note: ext_addr is valid since cfi_version > 0 */ +			cfi_reverse_geometry(qry); +		} +	} +} + +static void flash_fixup_atmel(flash_info_t *info, struct cfi_qry *qry) +{ +	int reverse_geometry = 0; + +	/* Check the "top boot" bit in the PRI */ +	if (info->ext_addr && !(flash_read_uchar(info, info->ext_addr + 6) & 1)) +		reverse_geometry = 1; + +	/* AT49BV6416(T) list the erase regions in the wrong order. +	 * However, the device ID is identical with the non-broken +	 * AT49BV642D they differ in the high byte. +	 */ +	if (info->device_id == 0xd6 || info->device_id == 0xd2) +		reverse_geometry = !reverse_geometry; + +	if (reverse_geometry) +		cfi_reverse_geometry(qry); +} + +static void flash_fixup_stm(flash_info_t *info, struct cfi_qry *qry) +{ +	/* check if flash geometry needs reversal */ +	if (qry->num_erase_regions > 1) { +		/* reverse geometry if top boot part */ +		if (info->cfi_version < 0x3131) { +			/* CFI < 1.1, guess by device id */ +			if (info->device_id == 0x22CA || /* M29W320DT */ +			    info->device_id == 0x2256 || /* M29W320ET */ +			    info->device_id == 0x22D7) { /* M29W800DT */ +				cfi_reverse_geometry(qry); +			} +		} else if (flash_read_uchar(info, info->ext_addr + 0xf) == 3) { +			/* CFI >= 1.1, deduct from top/bottom flag */ +			/* note: ext_addr is valid since cfi_version > 0 */ +			cfi_reverse_geometry(qry); +		} +	} +} + +static void flash_fixup_sst(flash_info_t *info, struct cfi_qry *qry) +{ +	/* +	 * SST, for many recent nor parallel flashes, says they are +	 * CFI-conformant. This is not true, since qry struct. +	 * reports a std. AMD command set (0x0002), while SST allows to +	 * erase two different sector sizes for the same memory. +	 * 64KB sector (SST call it block)  needs 0x30 to be erased. +	 * 4KB  sector (SST call it sector) needs 0x50 to be erased. +	 * Since CFI query detect the 4KB number of sectors, users expects +	 * a sector granularity of 4KB, and it is here set. +	 */ +	if (info->device_id == 0x5D23 || /* SST39VF3201B */ +	    info->device_id == 0x5C23) { /* SST39VF3202B */ +		/* set sector granularity to 4KB */ +		info->cmd_erase_sector=0x50; +	} +} + +static void flash_fixup_num(flash_info_t *info, struct cfi_qry *qry) +{ +	/* +	 * The M29EW devices seem to report the CFI information wrong +	 * when it's in 8 bit mode. +	 * There's an app note from Numonyx on this issue. +	 * So adjust the buffer size for M29EW while operating in 8-bit mode +	 */ +	if (((qry->max_buf_write_size) > 0x8) && +			(info->device_id == 0x7E) && +			(info->device_id2 == 0x2201 || +			info->device_id2 == 0x2301 || +			info->device_id2 == 0x2801 || +			info->device_id2 == 0x4801)) { +		debug("Adjusted buffer size on Numonyx flash" +			" M29EW family in 8 bit mode\n"); +		qry->max_buf_write_size = 0x8; +	} +} + +/* + * The following code cannot be run from FLASH! + * + */ +ulong flash_get_size (phys_addr_t base, int banknum) +{ +	flash_info_t *info = &flash_info[banknum]; +	int i, j; +	flash_sect_t sect_cnt; +	phys_addr_t sector; +	unsigned long tmp; +	int size_ratio; +	uchar num_erase_regions; +	int erase_region_size; +	int erase_region_count; +	struct cfi_qry qry; +	unsigned long max_size; + +	memset(&qry, 0, sizeof(qry)); + +	info->ext_addr = 0; +	info->cfi_version = 0; +#ifdef CONFIG_SYS_FLASH_PROTECTION +	info->legacy_unlock = 0; +#endif + +	info->start[0] = (ulong)map_physmem(base, info->portwidth, MAP_NOCACHE); + +	if (flash_detect_cfi (info, &qry)) { +		info->vendor = le16_to_cpu(get_unaligned(&(qry.p_id))); +		info->ext_addr = le16_to_cpu(get_unaligned(&(qry.p_adr))); +		num_erase_regions = qry.num_erase_regions; + +		if (info->ext_addr) { +			info->cfi_version = (ushort) flash_read_uchar (info, +						info->ext_addr + 3) << 8; +			info->cfi_version |= (ushort) flash_read_uchar (info, +						info->ext_addr + 4); +		} + +#ifdef DEBUG +		flash_printqry (&qry); +#endif + +		switch (info->vendor) { +		case CFI_CMDSET_INTEL_PROG_REGIONS: +		case CFI_CMDSET_INTEL_STANDARD: +		case CFI_CMDSET_INTEL_EXTENDED: +			cmdset_intel_init(info, &qry); +			break; +		case CFI_CMDSET_AMD_STANDARD: +		case CFI_CMDSET_AMD_EXTENDED: +			cmdset_amd_init(info, &qry); +			break; +		default: +			printf("CFI: Unknown command set 0x%x\n", +					info->vendor); +			/* +			 * Unfortunately, this means we don't know how +			 * to get the chip back to Read mode. Might +			 * as well try an Intel-style reset... +			 */ +			flash_write_cmd(info, 0, 0, FLASH_CMD_RESET); +			return 0; +		} + +		/* Do manufacturer-specific fixups */ +		switch (info->manufacturer_id) { +		case 0x0001: /* AMD */ +		case 0x0037: /* AMIC */ +			flash_fixup_amd(info, &qry); +			break; +		case 0x001f: +			flash_fixup_atmel(info, &qry); +			break; +		case 0x0020: +			flash_fixup_stm(info, &qry); +			break; +		case 0x00bf: /* SST */ +			flash_fixup_sst(info, &qry); +			break; +		case 0x0089: /* Numonyx */ +			flash_fixup_num(info, &qry); +			break; +		} + +		debug ("manufacturer is %d\n", info->vendor); +		debug ("manufacturer id is 0x%x\n", info->manufacturer_id); +		debug ("device id is 0x%x\n", info->device_id); +		debug ("device id2 is 0x%x\n", info->device_id2); +		debug ("cfi version is 0x%04x\n", info->cfi_version); + +		size_ratio = info->portwidth / info->chipwidth; +		/* if the chip is x8/x16 reduce the ratio by half */ +		if ((info->interface == FLASH_CFI_X8X16) +		    && (info->chipwidth == FLASH_CFI_BY8)) { +			size_ratio >>= 1; +		} +		debug ("size_ratio %d port %d bits chip %d bits\n", +		       size_ratio, info->portwidth << CFI_FLASH_SHIFT_WIDTH, +		       info->chipwidth << CFI_FLASH_SHIFT_WIDTH); +		info->size = 1 << qry.dev_size; +		/* multiply the size by the number of chips */ +		info->size *= size_ratio; +		max_size = cfi_flash_bank_size(banknum); +		if (max_size && (info->size > max_size)) { +			debug("[truncated from %ldMiB]", info->size >> 20); +			info->size = max_size; +		} +		debug ("found %d erase regions\n", num_erase_regions); +		sect_cnt = 0; +		sector = base; +		for (i = 0; i < num_erase_regions; i++) { +			if (i > NUM_ERASE_REGIONS) { +				printf ("%d erase regions found, only %d used\n", +					num_erase_regions, NUM_ERASE_REGIONS); +				break; +			} + +			tmp = le32_to_cpu(get_unaligned( +						&(qry.erase_region_info[i]))); +			debug("erase region %u: 0x%08lx\n", i, tmp); + +			erase_region_count = (tmp & 0xffff) + 1; +			tmp >>= 16; +			erase_region_size = +				(tmp & 0xffff) ? ((tmp & 0xffff) * 256) : 128; +			debug ("erase_region_count = %d erase_region_size = %d\n", +				erase_region_count, erase_region_size); +			for (j = 0; j < erase_region_count; j++) { +				if (sector - base >= info->size) +					break; +				if (sect_cnt >= CONFIG_SYS_MAX_FLASH_SECT) { +					printf("ERROR: too many flash sectors\n"); +					break; +				} +				info->start[sect_cnt] = +					(ulong)map_physmem(sector, +							   info->portwidth, +							   MAP_NOCACHE); +				sector += (erase_region_size * size_ratio); + +				/* +				 * Only read protection status from +				 * supported devices (intel...) +				 */ +				switch (info->vendor) { +				case CFI_CMDSET_INTEL_PROG_REGIONS: +				case CFI_CMDSET_INTEL_EXTENDED: +				case CFI_CMDSET_INTEL_STANDARD: +					/* +					 * Set flash to read-id mode. Otherwise +					 * reading protected status is not +					 * guaranteed. +					 */ +					flash_write_cmd(info, sect_cnt, 0, +							FLASH_CMD_READ_ID); +					info->protect[sect_cnt] = +						flash_isset (info, sect_cnt, +							     FLASH_OFFSET_PROTECT, +							     FLASH_STATUS_PROTECT); +					break; +				case CFI_CMDSET_AMD_EXTENDED: +				case CFI_CMDSET_AMD_STANDARD: +					if (!info->legacy_unlock) { +						/* default: not protected */ +						info->protect[sect_cnt] = 0; +						break; +					} + +					/* Read protection (PPB) from sector */ +					flash_write_cmd(info, 0, 0, +							info->cmd_reset); +					flash_unlock_seq(info, 0); +					flash_write_cmd(info, 0, +							info->addr_unlock1, +							FLASH_CMD_READ_ID); +					info->protect[sect_cnt] = +						flash_isset( +							info, sect_cnt, +							FLASH_OFFSET_PROTECT, +							FLASH_STATUS_PROTECT); +					break; +				default: +					/* default: not protected */ +					info->protect[sect_cnt] = 0; +				} + +				sect_cnt++; +			} +		} + +		info->sector_count = sect_cnt; +		info->buffer_size = 1 << le16_to_cpu(qry.max_buf_write_size); +		tmp = 1 << qry.block_erase_timeout_typ; +		info->erase_blk_tout = tmp * +			(1 << qry.block_erase_timeout_max); +		tmp = (1 << qry.buf_write_timeout_typ) * +			(1 << qry.buf_write_timeout_max); + +		/* round up when converting to ms */ +		info->buffer_write_tout = (tmp + 999) / 1000; +		tmp = (1 << qry.word_write_timeout_typ) * +			(1 << qry.word_write_timeout_max); +		/* round up when converting to ms */ +		info->write_tout = (tmp + 999) / 1000; +		info->flash_id = FLASH_MAN_CFI; +		if ((info->interface == FLASH_CFI_X8X16) && +		    (info->chipwidth == FLASH_CFI_BY8)) { +			/* XXX - Need to test on x8/x16 in parallel. */ +			info->portwidth >>= 1; +		} + +		flash_write_cmd (info, 0, 0, info->cmd_reset); +	} + +	return (info->size); +} + +#ifdef CONFIG_FLASH_CFI_MTD +void flash_set_verbose(uint v) +{ +	flash_verbose = v; +} +#endif + +static void cfi_flash_set_config_reg(u32 base, u16 val) +{ +#ifdef CONFIG_SYS_CFI_FLASH_CONFIG_REGS +	/* +	 * Only set this config register if really defined +	 * to a valid value (0xffff is invalid) +	 */ +	if (val == 0xffff) +		return; + +	/* +	 * Set configuration register. Data is "encrypted" in the 16 lower +	 * address bits. +	 */ +	flash_write16(FLASH_CMD_SETUP, (void *)(base + (val << 1))); +	flash_write16(FLASH_CMD_SET_CR_CONFIRM, (void *)(base + (val << 1))); + +	/* +	 * Finally issue reset-command to bring device back to +	 * read-array mode +	 */ +	flash_write16(FLASH_CMD_RESET, (void *)base); +#endif +} + +/*----------------------------------------------------------------------- + */ + +void flash_protect_default(void) +{ +#if defined(CONFIG_SYS_FLASH_AUTOPROTECT_LIST) +	int i; +	struct apl_s { +		ulong start; +		ulong size; +	} apl[] = CONFIG_SYS_FLASH_AUTOPROTECT_LIST; +#endif + +	/* Monitor protection ON by default */ +#if (CONFIG_SYS_MONITOR_BASE >= CONFIG_SYS_FLASH_BASE) && \ +	(!defined(CONFIG_MONITOR_IS_IN_RAM)) +	flash_protect(FLAG_PROTECT_SET, +		       CONFIG_SYS_MONITOR_BASE, +		       CONFIG_SYS_MONITOR_BASE + monitor_flash_len  - 1, +		       flash_get_info(CONFIG_SYS_MONITOR_BASE)); +#endif + +	/* Environment protection ON by default */ +#ifdef CONFIG_ENV_IS_IN_FLASH +	flash_protect(FLAG_PROTECT_SET, +		       CONFIG_ENV_ADDR, +		       CONFIG_ENV_ADDR + CONFIG_ENV_SECT_SIZE - 1, +		       flash_get_info(CONFIG_ENV_ADDR)); +#endif + +	/* Redundant environment protection ON by default */ +#ifdef CONFIG_ENV_ADDR_REDUND +	flash_protect(FLAG_PROTECT_SET, +		       CONFIG_ENV_ADDR_REDUND, +		       CONFIG_ENV_ADDR_REDUND + CONFIG_ENV_SECT_SIZE - 1, +		       flash_get_info(CONFIG_ENV_ADDR_REDUND)); +#endif + +#if defined(CONFIG_SYS_FLASH_AUTOPROTECT_LIST) +	for (i = 0; i < ARRAY_SIZE(apl); i++) { +		debug("autoprotecting from %08lx to %08lx\n", +		      apl[i].start, apl[i].start + apl[i].size - 1); +		flash_protect(FLAG_PROTECT_SET, +			       apl[i].start, +			       apl[i].start + apl[i].size - 1, +			       flash_get_info(apl[i].start)); +	} +#endif +} + +unsigned long flash_init (void) +{ +	unsigned long size = 0; +	int i; + +#ifdef CONFIG_SYS_FLASH_PROTECTION +	/* read environment from EEPROM */ +	char s[64]; +	getenv_f("unlock", s, sizeof(s)); +#endif + +	/* Init: no FLASHes known */ +	for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; ++i) { +		flash_info[i].flash_id = FLASH_UNKNOWN; + +		/* Optionally write flash configuration register */ +		cfi_flash_set_config_reg(cfi_flash_bank_addr(i), +					 cfi_flash_config_reg(i)); + +		if (!flash_detect_legacy(cfi_flash_bank_addr(i), i)) +			flash_get_size(cfi_flash_bank_addr(i), i); +		size += flash_info[i].size; +		if (flash_info[i].flash_id == FLASH_UNKNOWN) { +#ifndef CONFIG_SYS_FLASH_QUIET_TEST +			printf ("## Unknown flash on Bank %d " +				"- Size = 0x%08lx = %ld MB\n", +				i+1, flash_info[i].size, +				flash_info[i].size >> 20); +#endif /* CONFIG_SYS_FLASH_QUIET_TEST */ +		} +#ifdef CONFIG_SYS_FLASH_PROTECTION +		else if ((s != NULL) && (strcmp(s, "yes") == 0)) { +			/* +			 * Only the U-Boot image and it's environment +			 * is protected, all other sectors are +			 * unprotected (unlocked) if flash hardware +			 * protection is used (CONFIG_SYS_FLASH_PROTECTION) +			 * and the environment variable "unlock" is +			 * set to "yes". +			 */ +			if (flash_info[i].legacy_unlock) { +				int k; + +				/* +				 * Disable legacy_unlock temporarily, +				 * since flash_real_protect would +				 * relock all other sectors again +				 * otherwise. +				 */ +				flash_info[i].legacy_unlock = 0; + +				/* +				 * Legacy unlocking (e.g. Intel J3) -> +				 * unlock only one sector. This will +				 * unlock all sectors. +				 */ +				flash_real_protect (&flash_info[i], 0, 0); + +				flash_info[i].legacy_unlock = 1; + +				/* +				 * Manually mark other sectors as +				 * unlocked (unprotected) +				 */ +				for (k = 1; k < flash_info[i].sector_count; k++) +					flash_info[i].protect[k] = 0; +			} else { +				/* +				 * No legancy unlocking -> unlock all sectors +				 */ +				flash_protect (FLAG_PROTECT_CLEAR, +					       flash_info[i].start[0], +					       flash_info[i].start[0] +					       + flash_info[i].size - 1, +					       &flash_info[i]); +			} +		} +#endif /* CONFIG_SYS_FLASH_PROTECTION */ +	} + +	flash_protect_default(); +#ifdef CONFIG_FLASH_CFI_MTD +	cfi_mtd_init(); +#endif + +	return (size); +} diff --git a/roms/u-boot/drivers/mtd/cfi_mtd.c b/roms/u-boot/drivers/mtd/cfi_mtd.c new file mode 100644 index 00000000..ac805ff1 --- /dev/null +++ b/roms/u-boot/drivers/mtd/cfi_mtd.c @@ -0,0 +1,263 @@ +/* + * (C) Copyright 2008 Semihalf + * + * Written by: Piotr Ziecik <kosmo@semihalf.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <flash.h> +#include <malloc.h> + +#include <asm/errno.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/concat.h> +#include <mtd/cfi_flash.h> + +static struct mtd_info cfi_mtd_info[CFI_MAX_FLASH_BANKS]; +static char cfi_mtd_names[CFI_MAX_FLASH_BANKS][16]; +#ifdef CONFIG_MTD_CONCAT +static char c_mtd_name[16]; +#endif + +static int cfi_mtd_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	flash_info_t *fi = mtd->priv; +	size_t a_start = fi->start[0] + instr->addr; +	size_t a_end = a_start + instr->len; +	int s_first = -1; +	int s_last = -1; +	int error, sect; + +	for (sect = 0; sect < fi->sector_count; sect++) { +		if (a_start == fi->start[sect]) +			s_first = sect; + +		if (sect < fi->sector_count - 1) { +			if (a_end == fi->start[sect + 1]) { +				s_last = sect; +				break; +			} +		} else { +			s_last = sect; +			break; +		} +	} + +	if (s_first >= 0 && s_first <= s_last) { +		instr->state = MTD_ERASING; + +		flash_set_verbose(0); +		error = flash_erase(fi, s_first, s_last); +		flash_set_verbose(1); + +		if (error) { +			instr->state = MTD_ERASE_FAILED; +			return -EIO; +		} + +		instr->state = MTD_ERASE_DONE; +		mtd_erase_callback(instr); +		return 0; +	} + +	return -EINVAL; +} + +static int cfi_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, +	size_t *retlen, u_char *buf) +{ +	flash_info_t *fi = mtd->priv; +	u_char *f = (u_char*)(fi->start[0]) + from; + +	memcpy(buf, f, len); +	*retlen = len; + +	return 0; +} + +static int cfi_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, +	size_t *retlen, const u_char *buf) +{ +	flash_info_t *fi = mtd->priv; +	u_long t = fi->start[0] + to; +	int error; + +	flash_set_verbose(0); +	error = write_buff(fi, (u_char*)buf, t, len); +	flash_set_verbose(1); + +	if (!error) { +		*retlen = len; +		return 0; +	} + +	return -EIO; +} + +static void cfi_mtd_sync(struct mtd_info *mtd) +{ +	/* +	 * This function should wait until all pending operations +	 * finish. However this driver is fully synchronous, so +	 * this function returns immediately +	 */ +} + +static int cfi_mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	flash_info_t *fi = mtd->priv; + +	flash_set_verbose(0); +	flash_protect(FLAG_PROTECT_SET, fi->start[0] + ofs, +					fi->start[0] + ofs + len - 1, fi); +	flash_set_verbose(1); + +	return 0; +} + +static int cfi_mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	flash_info_t *fi = mtd->priv; + +	flash_set_verbose(0); +	flash_protect(FLAG_PROTECT_CLEAR, fi->start[0] + ofs, +					fi->start[0] + ofs + len - 1, fi); +	flash_set_verbose(1); + +	return 0; +} + +static int cfi_mtd_set_erasesize(struct mtd_info *mtd, flash_info_t *fi) +{ +	int sect_size = 0; +	int sect_size_old = 0; +	int sect; +	int regions = 0; +	int numblocks = 0; +	ulong offset; +	ulong base_addr; + +	/* +	 * First detect the number of eraseregions so that we can allocate +	 * the array of eraseregions correctly +	 */ +	for (sect = 0; sect < fi->sector_count; sect++) { +		if (sect_size_old != flash_sector_size(fi, sect)) +			regions++; +		sect_size_old = flash_sector_size(fi, sect); +	} + +	switch (regions) { +	case 0: +		return 1; +	case 1:	/* flash has uniform erase size */ +		mtd->numeraseregions = 0; +		mtd->erasesize = sect_size_old; +		return 0; +	} + +	mtd->numeraseregions = regions; +	mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info) * regions); + +	/* +	 * Now detect the largest sector and fill the eraseregions +	 */ +	regions = 0; +	base_addr = offset = fi->start[0]; +	sect_size_old = flash_sector_size(fi, 0); +	for (sect = 0; sect < fi->sector_count; sect++) { +		if (sect_size_old != flash_sector_size(fi, sect)) { +			mtd->eraseregions[regions].offset = offset - base_addr; +			mtd->eraseregions[regions].erasesize = sect_size_old; +			mtd->eraseregions[regions].numblocks = numblocks; +			/* Now start counting the next eraseregions */ +			numblocks = 0; +			regions++; +			offset = fi->start[sect]; +		} +		numblocks++; + +		/* +		 * Select the largest sector size as erasesize (e.g. for UBI) +		 */ +		if (flash_sector_size(fi, sect) > sect_size) +			sect_size = flash_sector_size(fi, sect); + +		sect_size_old = flash_sector_size(fi, sect); +	} + +	/* +	 * Set the last region +	 */ +	mtd->eraseregions[regions].offset = offset - base_addr; +	mtd->eraseregions[regions].erasesize = sect_size_old; +	mtd->eraseregions[regions].numblocks = numblocks; + +	mtd->erasesize = sect_size; + +	return 0; +} + +int cfi_mtd_init(void) +{ +	struct mtd_info *mtd; +	flash_info_t *fi; +	int error, i; +#ifdef CONFIG_MTD_CONCAT +	int devices_found = 0; +	struct mtd_info *mtd_list[CONFIG_SYS_MAX_FLASH_BANKS]; +#endif + +	for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) { +		fi = &flash_info[i]; +		mtd = &cfi_mtd_info[i]; + +		memset(mtd, 0, sizeof(struct mtd_info)); + +		error = cfi_mtd_set_erasesize(mtd, fi); +		if (error) +			continue; + +		sprintf(cfi_mtd_names[i], "nor%d", i); +		mtd->name		= cfi_mtd_names[i]; +		mtd->type		= MTD_NORFLASH; +		mtd->flags		= MTD_CAP_NORFLASH; +		mtd->size		= fi->size; +		mtd->writesize		= 1; + +		mtd->_erase		= cfi_mtd_erase; +		mtd->_read		= cfi_mtd_read; +		mtd->_write		= cfi_mtd_write; +		mtd->_sync		= cfi_mtd_sync; +		mtd->_lock		= cfi_mtd_lock; +		mtd->_unlock		= cfi_mtd_unlock; +		mtd->priv		= fi; + +		if (add_mtd_device(mtd)) +			return -ENOMEM; + +#ifdef CONFIG_MTD_CONCAT +		mtd_list[devices_found++] = mtd; +#endif +	} + +#ifdef CONFIG_MTD_CONCAT +	if (devices_found > 1) { +		/* +		 * We detected multiple devices. Concatenate them together. +		 */ +		sprintf(c_mtd_name, "nor%d", devices_found); +		mtd = mtd_concat_create(mtd_list, devices_found, c_mtd_name); + +		if (mtd == NULL) +			return -ENXIO; + +		if (add_mtd_device(mtd)) +			return -ENOMEM; +	} +#endif /* CONFIG_MTD_CONCAT */ + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/dataflash.c b/roms/u-boot/drivers/mtd/dataflash.c new file mode 100644 index 00000000..3fb6ed6d --- /dev/null +++ b/roms/u-boot/drivers/mtd/dataflash.c @@ -0,0 +1,447 @@ +/* + * LowLevel function for ATMEL DataFlash support + * Author : Hamid Ikdoumi (Atmel) + * + * SPDX-License-Identifier:	GPL-2.0+ + */ +#include <common.h> +#include <config.h> +#include <asm/hardware.h> +#include <dataflash.h> + +static AT91S_DataFlash DataFlashInst; + +extern void AT91F_SpiInit (void); +extern int AT91F_DataflashProbe (int i, AT91PS_DataflashDesc pDesc); +extern int AT91F_DataFlashRead (AT91PS_DataFlash pDataFlash, +				unsigned long addr, +				unsigned long size, char *buffer); +extern int AT91F_DataFlashWrite( AT91PS_DataFlash pDataFlash, +				unsigned char *src, +				int dest, +				int size ); + +int AT91F_DataflashInit (void) +{ +	int i, j; +	int dfcode; +	int part; +	int found[CONFIG_SYS_MAX_DATAFLASH_BANKS]; +	unsigned char protected; + +	AT91F_SpiInit (); + +	for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) { +		found[i] = 0; +		dataflash_info[i].Desc.state = IDLE; +		dataflash_info[i].id = 0; +		dataflash_info[i].Device.pages_number = 0; +		dfcode = AT91F_DataflashProbe (cs[i].cs, +				&dataflash_info[i].Desc); + +		switch (dfcode) { +		case AT45DB021: +			dataflash_info[i].Device.pages_number = 1024; +			dataflash_info[i].Device.pages_size = 264; +			dataflash_info[i].Device.page_offset = 9; +			dataflash_info[i].Device.byte_mask = 0x300; +			dataflash_info[i].Device.cs = cs[i].cs; +			dataflash_info[i].Desc.DataFlash_state = IDLE; +			dataflash_info[i].logical_address = cs[i].addr; +			dataflash_info[i].id = dfcode; +			found[i] += dfcode;; +			break; + +		case AT45DB081: +			dataflash_info[i].Device.pages_number = 4096; +			dataflash_info[i].Device.pages_size = 264; +			dataflash_info[i].Device.page_offset = 9; +			dataflash_info[i].Device.byte_mask = 0x300; +			dataflash_info[i].Device.cs = cs[i].cs; +			dataflash_info[i].Desc.DataFlash_state = IDLE; +			dataflash_info[i].logical_address = cs[i].addr; +			dataflash_info[i].id = dfcode; +			found[i] += dfcode;; +			break; + +		case AT45DB161: +			dataflash_info[i].Device.pages_number = 4096; +			dataflash_info[i].Device.pages_size = 528; +			dataflash_info[i].Device.page_offset = 10; +			dataflash_info[i].Device.byte_mask = 0x300; +			dataflash_info[i].Device.cs = cs[i].cs; +			dataflash_info[i].Desc.DataFlash_state = IDLE; +			dataflash_info[i].logical_address = cs[i].addr; +			dataflash_info[i].id = dfcode; +			found[i] += dfcode;; +			break; + +		case AT45DB321: +			dataflash_info[i].Device.pages_number = 8192; +			dataflash_info[i].Device.pages_size = 528; +			dataflash_info[i].Device.page_offset = 10; +			dataflash_info[i].Device.byte_mask = 0x300; +			dataflash_info[i].Device.cs = cs[i].cs; +			dataflash_info[i].Desc.DataFlash_state = IDLE; +			dataflash_info[i].logical_address = cs[i].addr; +			dataflash_info[i].id = dfcode; +			found[i] += dfcode;; +			break; + +		case AT45DB642: +			dataflash_info[i].Device.pages_number = 8192; +			dataflash_info[i].Device.pages_size = 1056; +			dataflash_info[i].Device.page_offset = 11; +			dataflash_info[i].Device.byte_mask = 0x700; +			dataflash_info[i].Device.cs = cs[i].cs; +			dataflash_info[i].Desc.DataFlash_state = IDLE; +			dataflash_info[i].logical_address = cs[i].addr; +			dataflash_info[i].id = dfcode; +			found[i] += dfcode;; +			break; + +		case AT45DB128: +			dataflash_info[i].Device.pages_number = 16384; +			dataflash_info[i].Device.pages_size = 1056; +			dataflash_info[i].Device.page_offset = 11; +			dataflash_info[i].Device.byte_mask = 0x700; +			dataflash_info[i].Device.cs = cs[i].cs; +			dataflash_info[i].Desc.DataFlash_state = IDLE; +			dataflash_info[i].logical_address = cs[i].addr; +			dataflash_info[i].id = dfcode; +			found[i] += dfcode;; +			break; + +		default: +			dfcode = 0; +			break; +		} +		/* set the last area end to the dataflash size*/ +		dataflash_info[i].end_address = +				(dataflash_info[i].Device.pages_number * +				dataflash_info[i].Device.pages_size) - 1; + +		part = 0; +		/* set the area addresses */ +		for(j = 0; j < NB_DATAFLASH_AREA; j++) { +			if(found[i]!=0) { +				dataflash_info[i].Device.area_list[j].start = +					area_list[part].start + +					dataflash_info[i].logical_address; +				if(area_list[part].end == 0xffffffff) { +					dataflash_info[i].Device.area_list[j].end = +						dataflash_info[i].end_address + +						dataflash_info[i].logical_address; +				} else { +					dataflash_info[i].Device.area_list[j].end = +						area_list[part].end + +						dataflash_info[i].logical_address; +				} +				protected = area_list[part].protected; +				/* Set the environment according to the label...*/ +				if(protected == FLAG_PROTECT_INVALID) { +					dataflash_info[i].Device.area_list[j].protected = +						FLAG_PROTECT_INVALID; +				} else { +					dataflash_info[i].Device.area_list[j].protected = +						protected; +				} +				strcpy((char*)(dataflash_info[i].Device.area_list[j].label), +						(const char *)area_list[part].label); +			} +			part++; +		} +	} +	return found[0]; +} + +void AT91F_DataflashSetEnv (void) +{ +	int i, j; +	int part; +	unsigned char env; +	unsigned char s[32];	/* Will fit a long int in hex */ +	unsigned long start; + +	for (i = 0, part= 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) { +		for(j = 0; j < NB_DATAFLASH_AREA; j++) { +			env = area_list[part].setenv; +			/* Set the environment according to the label...*/ +			if((env & FLAG_SETENV) == FLAG_SETENV) { +				start = dataflash_info[i].Device.area_list[j].start; +				sprintf((char*) s,"%lX",start); +				setenv((char*) area_list[part].label,(char*) s); +			} +			part++; +		} +	} +} + +void dataflash_print_info (void) +{ +	int i, j; + +	for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) { +		if (dataflash_info[i].id != 0) { +			printf("DataFlash:"); +			switch (dataflash_info[i].id) { +			case AT45DB021: +				printf("AT45DB021\n"); +				break; +			case AT45DB161: +				printf("AT45DB161\n"); +				break; + +			case AT45DB321: +				printf("AT45DB321\n"); +				break; + +			case AT45DB642: +				printf("AT45DB642\n"); +				break; +			case AT45DB128: +				printf("AT45DB128\n"); +				break; +			} + +			printf("Nb pages: %6d\n" +				"Page Size: %6d\n" +				"Size=%8d bytes\n" +				"Logical address: 0x%08X\n", +				(unsigned int) dataflash_info[i].Device.pages_number, +				(unsigned int) dataflash_info[i].Device.pages_size, +				(unsigned int) dataflash_info[i].Device.pages_number * +				dataflash_info[i].Device.pages_size, +				(unsigned int) dataflash_info[i].logical_address); +			for (j = 0; j < NB_DATAFLASH_AREA; j++) { +				switch(dataflash_info[i].Device.area_list[j].protected) { +				case	FLAG_PROTECT_SET: +				case	FLAG_PROTECT_CLEAR: +					printf("Area %i:\t%08lX to %08lX %s", j, +						dataflash_info[i].Device.area_list[j].start, +						dataflash_info[i].Device.area_list[j].end, +						(dataflash_info[i].Device.area_list[j].protected==FLAG_PROTECT_SET) ? "(RO)" : "    "); +						printf(" %s\n",	dataflash_info[i].Device.area_list[j].label); +					break; +				case	FLAG_PROTECT_INVALID: +					break; +				} +			} +		} +	} +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : AT91F_DataflashSelect				     */ +/* Object              : Select the correct device			     */ +/*---------------------------------------------------------------------------*/ +AT91PS_DataFlash AT91F_DataflashSelect (AT91PS_DataFlash pFlash, +				unsigned long *addr) +{ +	char addr_valid = 0; +	int i; + +	for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) +		if ( dataflash_info[i].id +			&& ((((int) *addr) & 0xFF000000) == +			dataflash_info[i].logical_address)) { +			addr_valid = 1; +			break; +		} +	if (!addr_valid) { +		pFlash = (AT91PS_DataFlash) 0; +		return pFlash; +	} +	pFlash->pDataFlashDesc = &(dataflash_info[i].Desc); +	pFlash->pDevice = &(dataflash_info[i].Device); +	*addr -= dataflash_info[i].logical_address; +	return (pFlash); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : addr_dataflash					     */ +/* Object              : Test if address is valid			     */ +/*---------------------------------------------------------------------------*/ +int addr_dataflash (unsigned long addr) +{ +	int addr_valid = 0; +	int i; + +	for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) { +		if ((((int) addr) & 0xFF000000) == +			dataflash_info[i].logical_address) { +			addr_valid = 1; +			break; +		} +	} + +	return addr_valid; +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : size_dataflash					     */ +/* Object              : Test if address is valid regarding the size	     */ +/*---------------------------------------------------------------------------*/ +int size_dataflash (AT91PS_DataFlash pdataFlash, unsigned long addr, +			unsigned long size) +{ +	/* is outside the dataflash */ +	if (((int)addr & 0x0FFFFFFF) > (pdataFlash->pDevice->pages_size * +		pdataFlash->pDevice->pages_number)) return 0; +	/* is too large for the dataflash */ +	if (size > ((pdataFlash->pDevice->pages_size * +		pdataFlash->pDevice->pages_number) - +		((int)addr & 0x0FFFFFFF))) return 0; + +	return 1; +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : prot_dataflash					     */ +/* Object              : Test if destination area is protected		     */ +/*---------------------------------------------------------------------------*/ +int prot_dataflash (AT91PS_DataFlash pdataFlash, unsigned long addr) +{ +	int area; + +	/* find area */ +	for (area = 0; area < NB_DATAFLASH_AREA; area++) { +		if ((addr >= pdataFlash->pDevice->area_list[area].start) && +			(addr < pdataFlash->pDevice->area_list[area].end)) +			break; +	} +	if (area == NB_DATAFLASH_AREA) +		return -1; + +	/*test protection value*/ +	if (pdataFlash->pDevice->area_list[area].protected == FLAG_PROTECT_SET) +		return 0; +	if (pdataFlash->pDevice->area_list[area].protected == FLAG_PROTECT_INVALID) +		return 0; + +	return 1; +} + +/*--------------------------------------------------------------------------*/ +/* Function Name       : dataflash_real_protect				    */ +/* Object              : protect/unprotect area				    */ +/*--------------------------------------------------------------------------*/ +int dataflash_real_protect (int flag, unsigned long start_addr, +				unsigned long end_addr) +{ +	int i,j, area1, area2, addr_valid = 0; + +	/* find dataflash */ +	for (i = 0; i < CONFIG_SYS_MAX_DATAFLASH_BANKS; i++) { +		if ((((int) start_addr) & 0xF0000000) == +			dataflash_info[i].logical_address) { +				addr_valid = 1; +				break; +		} +	} +	if (!addr_valid) { +		return -1; +	} +	/* find start area */ +	for (area1 = 0; area1 < NB_DATAFLASH_AREA; area1++) { +		if (start_addr == dataflash_info[i].Device.area_list[area1].start) +			break; +	} +	if (area1 == NB_DATAFLASH_AREA) return -1; +	/* find end area */ +	for (area2 = 0; area2 < NB_DATAFLASH_AREA; area2++) { +		if (end_addr == dataflash_info[i].Device.area_list[area2].end) +			break; +	} +	if (area2 == NB_DATAFLASH_AREA) +		return -1; + +	/*set protection value*/ +	for(j = area1; j < area2 + 1 ; j++) +		if(dataflash_info[i].Device.area_list[j].protected +				!= FLAG_PROTECT_INVALID) { +			if (flag == 0) { +				dataflash_info[i].Device.area_list[j].protected +					= FLAG_PROTECT_CLEAR; +			} else { +				dataflash_info[i].Device.area_list[j].protected +					= FLAG_PROTECT_SET; +			} +		} + +	return (area2 - area1 + 1); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : read_dataflash					     */ +/* Object              : dataflash memory read				     */ +/*---------------------------------------------------------------------------*/ +int read_dataflash (unsigned long addr, unsigned long size, char *result) +{ +	unsigned long AddrToRead = addr; +	AT91PS_DataFlash pFlash = &DataFlashInst; + +	pFlash = AT91F_DataflashSelect (pFlash, &AddrToRead); + +	if (pFlash == 0) +		return ERR_UNKNOWN_FLASH_TYPE; + +	if (size_dataflash(pFlash,addr,size) == 0) +		return ERR_INVAL; + +	return (AT91F_DataFlashRead (pFlash, AddrToRead, size, result)); +} + +/*---------------------------------------------------------------------------*/ +/* Function Name       : write_dataflash				     */ +/* Object              : write a block in dataflash			     */ +/*---------------------------------------------------------------------------*/ +int write_dataflash (unsigned long addr_dest, unsigned long addr_src, +			unsigned long size) +{ +	unsigned long AddrToWrite = addr_dest; +	AT91PS_DataFlash pFlash = &DataFlashInst; + +	pFlash = AT91F_DataflashSelect (pFlash, &AddrToWrite); + +	if (pFlash == 0) +		return ERR_UNKNOWN_FLASH_TYPE; + +	if (size_dataflash(pFlash,addr_dest,size) == 0) +		return ERR_INVAL; + +	if (prot_dataflash(pFlash,addr_dest) == 0) +		return ERR_PROTECTED; + +	if (AddrToWrite == -1) +		return -1; + +	return AT91F_DataFlashWrite (pFlash, (uchar *)addr_src, +						AddrToWrite, size); +} + +void dataflash_perror (int err) +{ +	switch (err) { +	case ERR_OK: +		break; +	case ERR_TIMOUT: +		printf("Timeout writing to DataFlash\n"); +		break; +	case ERR_PROTECTED: +		printf("Can't write to protected/invalid DataFlash sectors\n"); +		break; +	case ERR_INVAL: +		printf("Outside available DataFlash\n"); +		break; +	case ERR_UNKNOWN_FLASH_TYPE: +		printf("Unknown Type of DataFlash\n"); +		break; +	case ERR_PROG_ERROR: +		printf("General DataFlash Programming Error\n"); +		break; +	default: +		printf("%s[%d] FIXME: rc=%d\n", __FILE__, __LINE__, err); +		break; +	} +} diff --git a/roms/u-boot/drivers/mtd/ftsmc020.c b/roms/u-boot/drivers/mtd/ftsmc020.c new file mode 100644 index 00000000..e2e80822 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ftsmc020.c @@ -0,0 +1,38 @@ +/* + * (C) Copyright 2009 Faraday Technology + * Po-Yu Chuang <ratbert@faraday-tech.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <config.h> +#include <common.h> +#include <asm/io.h> +#include <faraday/ftsmc020.h> + +struct ftsmc020_config { +	unsigned int	config; +	unsigned int	timing; +}; + +static void ftsmc020_setup_bank(unsigned int bank, struct ftsmc020_config *cfg) +{ +	struct ftsmc020 *smc = (struct ftsmc020 *)CONFIG_FTSMC020_BASE; + +	if (bank > 3) { +		printf("bank # %u invalid\n", bank); +		return; +	} + +	writel(cfg->config, &smc->bank[bank].cr); +	writel(cfg->timing, &smc->bank[bank].tpr); +} + +void ftsmc020_init(void) +{ +	struct ftsmc020_config config[] = CONFIG_SYS_FTSMC020_CONFIGS; +	int i; + +	for (i = 0; i < ARRAY_SIZE(config); i++) +		ftsmc020_setup_bank(i, &config[i]); +} diff --git a/roms/u-boot/drivers/mtd/jedec_flash.c b/roms/u-boot/drivers/mtd/jedec_flash.c new file mode 100644 index 00000000..593b9b84 --- /dev/null +++ b/roms/u-boot/drivers/mtd/jedec_flash.c @@ -0,0 +1,442 @@ +/* + * (C) Copyright 2007 + * Michael Schwingen, <michael@schwingen.org> + * + * based in great part on jedec_probe.c from linux kernel: + * (C) 2000 Red Hat. GPL'd. + * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +/* The DEBUG define must be before common to enable debugging */ +/*#define DEBUG*/ + +#include <common.h> +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/byteorder.h> +#include <environment.h> + +#define P_ID_AMD_STD CFI_CMDSET_AMD_LEGACY + +/* AMD */ +#define AM29DL800BB	0x22CB +#define AM29DL800BT	0x224A + +#define AM29F400BB	0x22AB +#define AM29F800BB	0x2258 +#define AM29F800BT	0x22D6 +#define AM29LV400BB	0x22BA +#define AM29LV400BT	0x22B9 +#define AM29LV800BB	0x225B +#define AM29LV800BT	0x22DA +#define AM29LV160DT	0x22C4 +#define AM29LV160DB	0x2249 +#define AM29F017D	0x003D +#define AM29F016D	0x00AD +#define AM29F080	0x00D5 +#define AM29F040	0x00A4 +#define AM29LV040B	0x004F +#define AM29F032B	0x0041 +#define AM29F002T	0x00B0 + +/* SST */ +#define SST39LF800	0x2781 +#define SST39LF160	0x2782 +#define SST39VF1601	0x234b +#define SST39LF512	0x00D4 +#define SST39LF010	0x00D5 +#define SST39LF020	0x00D6 +#define SST39LF040	0x00D7 +#define SST39SF010A	0x00B5 +#define SST39SF020A	0x00B6 + +/* STM */ +#define STM29F400BB	0x00D6 + +/* MXIC */ +#define MX29LV040	0x004F + +/* WINBOND */ +#define W39L040A	0x00D6 + +/* AMIC */ +#define A29L040		0x0092 + +/* EON */ +#define EN29LV040A	0x004F + +/* + * Unlock address sets for AMD command sets. + * Intel command sets use the MTD_UADDR_UNNECESSARY. + * Each identifier, except MTD_UADDR_UNNECESSARY, and + * MTD_UADDR_NO_SUPPORT must be defined below in unlock_addrs[]. + * MTD_UADDR_NOT_SUPPORTED must be 0 so that structure + * initialization need not require initializing all of the + * unlock addresses for all bit widths. + */ +enum uaddr { +	MTD_UADDR_NOT_SUPPORTED = 0,	/* data width not supported */ +	MTD_UADDR_0x0555_0x02AA, +	MTD_UADDR_0x0555_0x0AAA, +	MTD_UADDR_0x5555_0x2AAA, +	MTD_UADDR_0x0AAA_0x0555, +	MTD_UADDR_DONT_CARE,		/* Requires an arbitrary address */ +	MTD_UADDR_UNNECESSARY,		/* Does not require any address */ +}; + + +struct unlock_addr { +	u32 addr1; +	u32 addr2; +}; + + +/* + * I don't like the fact that the first entry in unlock_addrs[] + * exists, but is for MTD_UADDR_NOT_SUPPORTED - and, therefore, + * should not be used.  The  problem is that structures with + * initializers have extra fields initialized to 0.  It is _very_ + * desireable to have the unlock address entries for unsupported + * data widths automatically initialized - that means that + * MTD_UADDR_NOT_SUPPORTED must be 0 and the first entry here + * must go unused. + */ +static const struct unlock_addr  unlock_addrs[] = { +	[MTD_UADDR_NOT_SUPPORTED] = { +		.addr1 = 0xffff, +		.addr2 = 0xffff +	}, + +	[MTD_UADDR_0x0555_0x02AA] = { +		.addr1 = 0x0555, +		.addr2 = 0x02aa +	}, + +	[MTD_UADDR_0x0555_0x0AAA] = { +		.addr1 = 0x0555, +		.addr2 = 0x0aaa +	}, + +	[MTD_UADDR_0x5555_0x2AAA] = { +		.addr1 = 0x5555, +		.addr2 = 0x2aaa +	}, + +	[MTD_UADDR_0x0AAA_0x0555] = { +		.addr1 = 0x0AAA, +		.addr2 = 0x0555 +	}, + +	[MTD_UADDR_DONT_CARE] = { +		.addr1 = 0x0000,      /* Doesn't matter which address */ +		.addr2 = 0x0000       /* is used - must be last entry */ +	}, + +	[MTD_UADDR_UNNECESSARY] = { +		.addr1 = 0x0000, +		.addr2 = 0x0000 +	} +}; + + +struct amd_flash_info { +	const __u16 mfr_id; +	const __u16 dev_id; +	const char *name; +	const int DevSize; +	const int NumEraseRegions; +	const int CmdSet; +	const __u8 uaddr[4];		/* unlock addrs for 8, 16, 32, 64 */ +	const ulong regions[6]; +}; + +#define ERASEINFO(size,blocks) (size<<8)|(blocks-1) + +#define SIZE_64KiB  16 +#define SIZE_128KiB 17 +#define SIZE_256KiB 18 +#define SIZE_512KiB 19 +#define SIZE_1MiB   20 +#define SIZE_2MiB   21 +#define SIZE_4MiB   22 +#define SIZE_8MiB   23 + +static const struct amd_flash_info jedec_table[] = { +#ifdef CONFIG_SYS_FLASH_LEGACY_256Kx8 +	{ +		.mfr_id		= (u16)SST_MANUFACT, +		.dev_id		= SST39LF020, +		.name		= "SST 39LF020", +		.uaddr		= { +			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ +		}, +		.DevSize	= SIZE_256KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x01000,64), +		} +	}, +#endif +#ifdef CONFIG_SYS_FLASH_LEGACY_512Kx8 +	{ +		.mfr_id		= (u16)AMD_MANUFACT, +		.dev_id		= AM29LV040B, +		.name		= "AMD AM29LV040B", +		.uaddr		= { +			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x10000,8), +		} +	}, +	{ +		.mfr_id		= (u16)SST_MANUFACT, +		.dev_id		= SST39LF040, +		.name		= "SST 39LF040", +		.uaddr		= { +			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x01000,128), +		} +	}, +	{ +		.mfr_id		= (u16)STM_MANUFACT, +		.dev_id		= STM_ID_M29W040B, +		.name		= "ST Micro M29W040B", +		.uaddr		= { +			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x10000,8), +		} +	}, +	{ +		.mfr_id		= (u16)MX_MANUFACT, +		.dev_id		= MX29LV040, +		.name		= "MXIC MX29LV040", +		.uaddr		= { +			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x10000, 8), +		} +	}, +	{ +		.mfr_id		= (u16)WINB_MANUFACT, +		.dev_id		= W39L040A, +		.name		= "WINBOND W39L040A", +		.uaddr		= { +			[0] = MTD_UADDR_0x5555_0x2AAA /* x8 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x10000, 8), +		} +	}, +	{ +		.mfr_id		= (u16)AMIC_MANUFACT, +		.dev_id		= A29L040, +		.name		= "AMIC A29L040", +		.uaddr		= { +			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x10000, 8), +		} +	}, +	{ +		.mfr_id		= (u16)EON_MANUFACT, +		.dev_id		= EN29LV040A, +		.name		= "EON EN29LV040A", +		.uaddr		= { +			[0] = MTD_UADDR_0x0555_0x02AA /* x8 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= P_ID_AMD_STD, +		.NumEraseRegions= 1, +		.regions	= { +			ERASEINFO(0x10000, 8), +		} +	}, +#endif +#ifdef CONFIG_SYS_FLASH_LEGACY_512Kx16 +	{ +		.mfr_id		= (u16)AMD_MANUFACT, +		.dev_id		= AM29F400BB, +		.name		= "AMD AM29F400BB", +		.uaddr		= { +			[1] = MTD_UADDR_0x0555_0x02AA /* x16 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= CFI_CMDSET_AMD_LEGACY, +		.NumEraseRegions= 4, +		.regions	= { +			ERASEINFO(0x04000, 1), +			ERASEINFO(0x02000, 2), +			ERASEINFO(0x08000, 1), +			ERASEINFO(0x10000, 7), +		} +	}, +	{ +		.mfr_id		= (u16)AMD_MANUFACT, +		.dev_id		= AM29LV400BB, +		.name		= "AMD AM29LV400BB", +		.uaddr		= { +			[1] = MTD_UADDR_0x0555_0x02AA /* x16 */ +		}, +		.DevSize	= SIZE_512KiB, +		.CmdSet		= CFI_CMDSET_AMD_LEGACY, +		.NumEraseRegions= 4, +		.regions	= { +			ERASEINFO(0x04000,1), +			ERASEINFO(0x02000,2), +			ERASEINFO(0x08000,1), +			ERASEINFO(0x10000,7), +		} +	}, +	{ +		.mfr_id		= (u16)AMD_MANUFACT, +		.dev_id		= AM29LV800BB, +		.name		= "AMD AM29LV800BB", +		.uaddr		= { +			[1] = MTD_UADDR_0x0555_0x02AA /* x16 */ +		}, +		.DevSize	= SIZE_1MiB, +		.CmdSet		= CFI_CMDSET_AMD_LEGACY, +		.NumEraseRegions= 4, +		.regions	= { +			ERASEINFO(0x04000, 1), +			ERASEINFO(0x02000, 2), +			ERASEINFO(0x08000, 1), +			ERASEINFO(0x10000, 15), +		} +	}, +	{ +		.mfr_id		= (u16)STM_MANUFACT, +		.dev_id		= STM29F400BB, +		.name		= "ST Micro M29F400BB", +		.uaddr		= { +			[1] = MTD_UADDR_0x0555_0x02AA /* x16 */ +		}, +		.DevSize		= SIZE_512KiB, +		.CmdSet			= CFI_CMDSET_AMD_LEGACY, +		.NumEraseRegions	= 4, +		.regions		= { +			ERASEINFO(0x04000, 1), +			ERASEINFO(0x02000, 2), +			ERASEINFO(0x08000, 1), +			ERASEINFO(0x10000, 7), +		} +	}, +#endif +}; + +static inline void fill_info(flash_info_t *info, const struct amd_flash_info *jedec_entry, ulong base) +{ +	int i,j; +	int sect_cnt; +	int size_ratio; +	int total_size; +	enum uaddr uaddr_idx; + +	size_ratio = info->portwidth / info->chipwidth; + +	debug("Found JEDEC Flash: %s\n", jedec_entry->name); +	info->vendor = jedec_entry->CmdSet; +	/* Todo: do we need device-specific timeouts? */ +	info->erase_blk_tout = 30000; +	info->buffer_write_tout = 1000; +	info->write_tout = 100; +	info->name = jedec_entry->name; + +	/* copy unlock addresses from device table to CFI info struct. This +	   is just here because the addresses are in the table anyway - if +	   the flash is not detected due to wrong unlock addresses, +	   flash_detect_legacy would have to try all of them before we even +	   get here. */ +	switch(info->chipwidth) { +	case FLASH_CFI_8BIT: +		uaddr_idx = jedec_entry->uaddr[0]; +		break; +	case FLASH_CFI_16BIT: +		uaddr_idx = jedec_entry->uaddr[1]; +		break; +	case FLASH_CFI_32BIT: +		uaddr_idx = jedec_entry->uaddr[2]; +		break; +	default: +		uaddr_idx = MTD_UADDR_NOT_SUPPORTED; +		break; +	} + +	debug("unlock address index %d\n", uaddr_idx); +	info->addr_unlock1 = unlock_addrs[uaddr_idx].addr1; +	info->addr_unlock2 = unlock_addrs[uaddr_idx].addr2; +	debug("unlock addresses are 0x%lx/0x%lx\n", +		info->addr_unlock1, info->addr_unlock2); + +	sect_cnt = 0; +	total_size = 0; +	for (i = 0; i < jedec_entry->NumEraseRegions; i++) { +		ulong erase_region_size = jedec_entry->regions[i] >> 8; +		ulong erase_region_count = (jedec_entry->regions[i] & 0xff) + 1; + +		total_size += erase_region_size * erase_region_count; +		debug("erase_region_count = %ld erase_region_size = %ld\n", +		       erase_region_count, erase_region_size); +		for (j = 0; j < erase_region_count; j++) { +			if (sect_cnt >= CONFIG_SYS_MAX_FLASH_SECT) { +				printf("ERROR: too many flash sectors\n"); +				break; +			} +			info->start[sect_cnt] = base; +			base += (erase_region_size * size_ratio); +			sect_cnt++; +		} +	} +	info->sector_count = sect_cnt; +	info->size = total_size * size_ratio; +} + +/*----------------------------------------------------------------------- + * match jedec ids against table. If a match is found, fill flash_info entry + */ +int jedec_flash_match(flash_info_t *info, ulong base) +{ +	int ret = 0; +	int i; +	ulong mask = 0xFFFF; +	if (info->chipwidth == 1) +		mask = 0xFF; + +	for (i = 0; i < ARRAY_SIZE(jedec_table); i++) { +		if ((jedec_table[i].mfr_id & mask) == (info->manufacturer_id & mask) && +		    (jedec_table[i].dev_id & mask) == (info->device_id & mask)) { +			fill_info(info, &jedec_table[i], base); +			ret = 1; +			break; +		} +	} +	return ret; +} diff --git a/roms/u-boot/drivers/mtd/mtdconcat.c b/roms/u-boot/drivers/mtd/mtdconcat.c new file mode 100644 index 00000000..31e4289b --- /dev/null +++ b/roms/u-boot/drivers/mtd/mtdconcat.c @@ -0,0 +1,773 @@ +/* + * MTD device concatenation layer + * + * (C) 2002 Robert Kaiser <rkaiser@sysgo.de> + * + * NAND support by Christian Gan <cgan@iders.ca> + * + * This code is GPL + */ + +#include <linux/mtd/mtd.h> +#include <linux/compat.h> +#include <linux/mtd/concat.h> +#include <ubi_uboot.h> + +/* + * Our storage structure: + * Subdev points to an array of pointers to struct mtd_info objects + * which is allocated along with this structure + * + */ +struct mtd_concat { +	struct mtd_info mtd; +	int num_subdev; +	struct mtd_info **subdev; +}; + +/* + * how to calculate the size required for the above structure, + * including the pointer array subdev points to: + */ +#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)	\ +	((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *))) + +/* + * Given a pointer to the MTD object in the mtd_concat structure, + * we can retrieve the pointer to that structure with this macro. + */ +#define CONCAT(x)  ((struct mtd_concat *)(x)) + +/* + * MTD methods which look up the relevant subdevice, translate the + * effective address and pass through to the subdevice. + */ + +static int +concat_read(struct mtd_info *mtd, loff_t from, size_t len, +	    size_t * retlen, u_char * buf) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	int ret = 0, err; +	int i; + +	*retlen = 0; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; +		size_t size, retsize; + +		if (from >= subdev->size) { +			/* Not destined for this subdev */ +			size = 0; +			from -= subdev->size; +			continue; +		} +		if (from + len > subdev->size) +			/* First part goes into this subdev */ +			size = subdev->size - from; +		else +			/* Entire transaction goes into this subdev */ +			size = len; + +		err = mtd_read(subdev, from, size, &retsize, buf); + +		/* Save information about bitflips! */ +		if (unlikely(err)) { +			if (mtd_is_eccerr(err)) { +				mtd->ecc_stats.failed++; +				ret = err; +			} else if (mtd_is_bitflip(err)) { +				mtd->ecc_stats.corrected++; +				/* Do not overwrite -EBADMSG !! */ +				if (!ret) +					ret = err; +			} else +				return err; +		} + +		*retlen += retsize; +		len -= size; +		if (len == 0) +			return ret; + +		buf += size; +		from = 0; +	} +	return -EINVAL; +} + +static int +concat_write(struct mtd_info *mtd, loff_t to, size_t len, +	     size_t * retlen, const u_char * buf) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	int err = -EINVAL; +	int i; + +	*retlen = 0; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; +		size_t size, retsize; + +		if (to >= subdev->size) { +			size = 0; +			to -= subdev->size; +			continue; +		} +		if (to + len > subdev->size) +			size = subdev->size - to; +		else +			size = len; + +		err = mtd_write(subdev, to, size, &retsize, buf); +		if (err) +			break; + +		*retlen += retsize; +		len -= size; +		if (len == 0) +			break; + +		err = -EINVAL; +		buf += size; +		to = 0; +	} +	return err; +} + +static int +concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	struct mtd_oob_ops devops = *ops; +	int i, err, ret = 0; + +	ops->retlen = ops->oobretlen = 0; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; + +		if (from >= subdev->size) { +			from -= subdev->size; +			continue; +		} + +		/* partial read ? */ +		if (from + devops.len > subdev->size) +			devops.len = subdev->size - from; + +		err = mtd_read_oob(subdev, from, &devops); +		ops->retlen += devops.retlen; +		ops->oobretlen += devops.oobretlen; + +		/* Save information about bitflips! */ +		if (unlikely(err)) { +			if (mtd_is_eccerr(err)) { +				mtd->ecc_stats.failed++; +				ret = err; +			} else if (mtd_is_bitflip(err)) { +				mtd->ecc_stats.corrected++; +				/* Do not overwrite -EBADMSG !! */ +				if (!ret) +					ret = err; +			} else +				return err; +		} + +		if (devops.datbuf) { +			devops.len = ops->len - ops->retlen; +			if (!devops.len) +				return ret; +			devops.datbuf += devops.retlen; +		} +		if (devops.oobbuf) { +			devops.ooblen = ops->ooblen - ops->oobretlen; +			if (!devops.ooblen) +				return ret; +			devops.oobbuf += ops->oobretlen; +		} + +		from = 0; +	} +	return -EINVAL; +} + +static int +concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	struct mtd_oob_ops devops = *ops; +	int i, err; + +	if (!(mtd->flags & MTD_WRITEABLE)) +		return -EROFS; + +	ops->retlen = 0; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; + +		if (to >= subdev->size) { +			to -= subdev->size; +			continue; +		} + +		/* partial write ? */ +		if (to + devops.len > subdev->size) +			devops.len = subdev->size - to; + +		err = mtd_write_oob(subdev, to, &devops); +		ops->retlen += devops.retlen; +		if (err) +			return err; + +		if (devops.datbuf) { +			devops.len = ops->len - ops->retlen; +			if (!devops.len) +				return 0; +			devops.datbuf += devops.retlen; +		} +		if (devops.oobbuf) { +			devops.ooblen = ops->ooblen - ops->oobretlen; +			if (!devops.ooblen) +				return 0; +			devops.oobbuf += devops.oobretlen; +		} +		to = 0; +	} +	return -EINVAL; +} + +static void concat_erase_callback(struct erase_info *instr) +{ +	/* Nothing to do here in U-Boot */ +} + +static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase) +{ +	int err; +	wait_queue_head_t waitq; +	DECLARE_WAITQUEUE(wait, current); + +	/* +	 * This code was stol^H^H^H^Hinspired by mtdchar.c +	 */ +	init_waitqueue_head(&waitq); + +	erase->mtd = mtd; +	erase->callback = concat_erase_callback; +	erase->priv = (unsigned long) &waitq; + +	/* +	 * FIXME: Allow INTERRUPTIBLE. Which means +	 * not having the wait_queue head on the stack. +	 */ +	err = mtd_erase(mtd, erase); +	if (!err) { +		set_current_state(TASK_UNINTERRUPTIBLE); +		add_wait_queue(&waitq, &wait); +		if (erase->state != MTD_ERASE_DONE +		    && erase->state != MTD_ERASE_FAILED) +			schedule(); +		remove_wait_queue(&waitq, &wait); +		set_current_state(TASK_RUNNING); + +		err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0; +	} +	return err; +} + +static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	struct mtd_info *subdev; +	int i, err; +	uint64_t length, offset = 0; +	struct erase_info *erase; + +	/* +	 * Check for proper erase block alignment of the to-be-erased area. +	 * It is easier to do this based on the super device's erase +	 * region info rather than looking at each particular sub-device +	 * in turn. +	 */ +	if (!concat->mtd.numeraseregions) { +		/* the easy case: device has uniform erase block size */ +		if (instr->addr & (concat->mtd.erasesize - 1)) +			return -EINVAL; +		if (instr->len & (concat->mtd.erasesize - 1)) +			return -EINVAL; +	} else { +		/* device has variable erase size */ +		struct mtd_erase_region_info *erase_regions = +		    concat->mtd.eraseregions; + +		/* +		 * Find the erase region where the to-be-erased area begins: +		 */ +		for (i = 0; i < concat->mtd.numeraseregions && +		     instr->addr >= erase_regions[i].offset; i++) ; +		--i; + +		/* +		 * Now erase_regions[i] is the region in which the +		 * to-be-erased area begins. Verify that the starting +		 * offset is aligned to this region's erase size: +		 */ +		if (instr->addr & (erase_regions[i].erasesize - 1)) +			return -EINVAL; + +		/* +		 * now find the erase region where the to-be-erased area ends: +		 */ +		for (; i < concat->mtd.numeraseregions && +		     (instr->addr + instr->len) >= erase_regions[i].offset; +		     ++i) ; +		--i; +		/* +		 * check if the ending offset is aligned to this region's erase size +		 */ +		if ((instr->addr + instr->len) & (erase_regions[i].erasesize - +						  1)) +			return -EINVAL; +	} + +	/* make a local copy of instr to avoid modifying the caller's struct */ +	erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); + +	if (!erase) +		return -ENOMEM; + +	*erase = *instr; +	length = instr->len; + +	/* +	 * find the subdevice where the to-be-erased area begins, adjust +	 * starting offset to be relative to the subdevice start +	 */ +	for (i = 0; i < concat->num_subdev; i++) { +		subdev = concat->subdev[i]; +		if (subdev->size <= erase->addr) { +			erase->addr -= subdev->size; +			offset += subdev->size; +		} else { +			break; +		} +	} + +	/* must never happen since size limit has been verified above */ +	BUG_ON(i >= concat->num_subdev); + +	/* now do the erase: */ +	err = 0; +	for (; length > 0; i++) { +		/* loop for all subdevices affected by this request */ +		subdev = concat->subdev[i];	/* get current subdevice */ + +		/* limit length to subdevice's size: */ +		if (erase->addr + length > subdev->size) +			erase->len = subdev->size - erase->addr; +		else +			erase->len = length; + +		length -= erase->len; +		if ((err = concat_dev_erase(subdev, erase))) { +			/* sanity check: should never happen since +			 * block alignment has been checked above */ +			BUG_ON(err == -EINVAL); +			if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN) +				instr->fail_addr = erase->fail_addr + offset; +			break; +		} +		/* +		 * erase->addr specifies the offset of the area to be +		 * erased *within the current subdevice*. It can be +		 * non-zero only the first time through this loop, i.e. +		 * for the first subdevice where blocks need to be erased. +		 * All the following erases must begin at the start of the +		 * current subdevice, i.e. at offset zero. +		 */ +		erase->addr = 0; +		offset += subdev->size; +	} +	instr->state = erase->state; +	kfree(erase); +	if (err) +		return err; + +	if (instr->callback) +		instr->callback(instr); +	return 0; +} + +static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	int i, err = -EINVAL; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; +		uint64_t size; + +		if (ofs >= subdev->size) { +			size = 0; +			ofs -= subdev->size; +			continue; +		} +		if (ofs + len > subdev->size) +			size = subdev->size - ofs; +		else +			size = len; + +		err = mtd_lock(subdev, ofs, size); + +		if (err) +			break; + +		len -= size; +		if (len == 0) +			break; + +		err = -EINVAL; +		ofs = 0; +	} + +	return err; +} + +static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	int i, err = 0; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; +		uint64_t size; + +		if (ofs >= subdev->size) { +			size = 0; +			ofs -= subdev->size; +			continue; +		} +		if (ofs + len > subdev->size) +			size = subdev->size - ofs; +		else +			size = len; + +		err = mtd_unlock(subdev, ofs, size); + +		if (err) +			break; + +		len -= size; +		if (len == 0) +			break; + +		err = -EINVAL; +		ofs = 0; +	} + +	return err; +} + +static void concat_sync(struct mtd_info *mtd) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	int i; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; +		mtd_sync(subdev); +	} +} + +static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	int i, res = 0; + +	if (!mtd_can_have_bb(concat->subdev[0])) +		return res; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; + +		if (ofs >= subdev->size) { +			ofs -= subdev->size; +			continue; +		} + +		res = mtd_block_isbad(subdev, ofs); +		break; +	} + +	return res; +} + +static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct mtd_concat *concat = CONCAT(mtd); +	int i, err = -EINVAL; + +	if (!mtd_can_have_bb(concat->subdev[0])) +		return 0; + +	for (i = 0; i < concat->num_subdev; i++) { +		struct mtd_info *subdev = concat->subdev[i]; + +		if (ofs >= subdev->size) { +			ofs -= subdev->size; +			continue; +		} + +		err = mtd_block_markbad(subdev, ofs); +		if (!err) +			mtd->ecc_stats.badblocks++; +		break; +	} + +	return err; +} + +/* + * This function constructs a virtual MTD device by concatenating + * num_devs MTD devices. A pointer to the new device object is + * stored to *new_dev upon success. This function does _not_ + * register any devices: this is the caller's responsibility. + */ +struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],	/* subdevices to concatenate */ +				   int num_devs,	/* number of subdevices      */ +				   const char *name) +{				/* name for the new device   */ +	int i; +	size_t size; +	struct mtd_concat *concat; +	uint32_t max_erasesize, curr_erasesize; +	int num_erase_region; + +	debug("Concatenating MTD devices:\n"); +	for (i = 0; i < num_devs; i++) +		debug("(%d): \"%s\"\n", i, subdev[i]->name); +	debug("into device \"%s\"\n", name); + +	/* allocate the device structure */ +	size = SIZEOF_STRUCT_MTD_CONCAT(num_devs); +	concat = kzalloc(size, GFP_KERNEL); +	if (!concat) { +		printk +		    ("memory allocation error while creating concatenated device \"%s\"\n", +		     name); +		return NULL; +	} +	concat->subdev = (struct mtd_info **) (concat + 1); + +	/* +	 * Set up the new "super" device's MTD object structure, check for +	 * incompatibilites between the subdevices. +	 */ +	concat->mtd.type = subdev[0]->type; +	concat->mtd.flags = subdev[0]->flags; +	concat->mtd.size = subdev[0]->size; +	concat->mtd.erasesize = subdev[0]->erasesize; +	concat->mtd.writesize = subdev[0]->writesize; +	concat->mtd.subpage_sft = subdev[0]->subpage_sft; +	concat->mtd.oobsize = subdev[0]->oobsize; +	concat->mtd.oobavail = subdev[0]->oobavail; +	if (subdev[0]->_read_oob) +		concat->mtd._read_oob = concat_read_oob; +	if (subdev[0]->_write_oob) +		concat->mtd._write_oob = concat_write_oob; +	if (subdev[0]->_block_isbad) +		concat->mtd._block_isbad = concat_block_isbad; +	if (subdev[0]->_block_markbad) +		concat->mtd._block_markbad = concat_block_markbad; + +	concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; + +	concat->subdev[0] = subdev[0]; + +	for (i = 1; i < num_devs; i++) { +		if (concat->mtd.type != subdev[i]->type) { +			kfree(concat); +			printk("Incompatible device type on \"%s\"\n", +			       subdev[i]->name); +			return NULL; +		} +		if (concat->mtd.flags != subdev[i]->flags) { +			/* +			 * Expect all flags except MTD_WRITEABLE to be +			 * equal on all subdevices. +			 */ +			if ((concat->mtd.flags ^ subdev[i]-> +			     flags) & ~MTD_WRITEABLE) { +				kfree(concat); +				printk("Incompatible device flags on \"%s\"\n", +				       subdev[i]->name); +				return NULL; +			} else +				/* if writeable attribute differs, +				   make super device writeable */ +				concat->mtd.flags |= +				    subdev[i]->flags & MTD_WRITEABLE; +		} + +		concat->mtd.size += subdev[i]->size; +		concat->mtd.ecc_stats.badblocks += +			subdev[i]->ecc_stats.badblocks; +		if (concat->mtd.writesize   !=  subdev[i]->writesize || +		    concat->mtd.subpage_sft != subdev[i]->subpage_sft || +		    concat->mtd.oobsize    !=  subdev[i]->oobsize || +		    !concat->mtd._read_oob  != !subdev[i]->_read_oob || +		    !concat->mtd._write_oob != !subdev[i]->_write_oob) { +			kfree(concat); +			printk("Incompatible OOB or ECC data on \"%s\"\n", +			       subdev[i]->name); +			return NULL; +		} +		concat->subdev[i] = subdev[i]; + +	} + +	concat->mtd.ecclayout = subdev[0]->ecclayout; + +	concat->num_subdev = num_devs; +	concat->mtd.name = name; + +	concat->mtd._erase = concat_erase; +	concat->mtd._read = concat_read; +	concat->mtd._write = concat_write; +	concat->mtd._sync = concat_sync; +	concat->mtd._lock = concat_lock; +	concat->mtd._unlock = concat_unlock; + +	/* +	 * Combine the erase block size info of the subdevices: +	 * +	 * first, walk the map of the new device and see how +	 * many changes in erase size we have +	 */ +	max_erasesize = curr_erasesize = subdev[0]->erasesize; +	num_erase_region = 1; +	for (i = 0; i < num_devs; i++) { +		if (subdev[i]->numeraseregions == 0) { +			/* current subdevice has uniform erase size */ +			if (subdev[i]->erasesize != curr_erasesize) { +				/* if it differs from the last subdevice's erase size, count it */ +				++num_erase_region; +				curr_erasesize = subdev[i]->erasesize; +				if (curr_erasesize > max_erasesize) +					max_erasesize = curr_erasesize; +			} +		} else { +			/* current subdevice has variable erase size */ +			int j; +			for (j = 0; j < subdev[i]->numeraseregions; j++) { + +				/* walk the list of erase regions, count any changes */ +				if (subdev[i]->eraseregions[j].erasesize != +				    curr_erasesize) { +					++num_erase_region; +					curr_erasesize = +					    subdev[i]->eraseregions[j]. +					    erasesize; +					if (curr_erasesize > max_erasesize) +						max_erasesize = curr_erasesize; +				} +			} +		} +	} + +	if (num_erase_region == 1) { +		/* +		 * All subdevices have the same uniform erase size. +		 * This is easy: +		 */ +		concat->mtd.erasesize = curr_erasesize; +		concat->mtd.numeraseregions = 0; +	} else { +		uint64_t tmp64; + +		/* +		 * erase block size varies across the subdevices: allocate +		 * space to store the data describing the variable erase regions +		 */ +		struct mtd_erase_region_info *erase_region_p; +		uint64_t begin, position; + +		concat->mtd.erasesize = max_erasesize; +		concat->mtd.numeraseregions = num_erase_region; +		concat->mtd.eraseregions = erase_region_p = +		    kmalloc(num_erase_region * +			    sizeof (struct mtd_erase_region_info), GFP_KERNEL); +		if (!erase_region_p) { +			kfree(concat); +			printk +			    ("memory allocation error while creating erase region list" +			     " for device \"%s\"\n", name); +			return NULL; +		} + +		/* +		 * walk the map of the new device once more and fill in +		 * in erase region info: +		 */ +		curr_erasesize = subdev[0]->erasesize; +		begin = position = 0; +		for (i = 0; i < num_devs; i++) { +			if (subdev[i]->numeraseregions == 0) { +				/* current subdevice has uniform erase size */ +				if (subdev[i]->erasesize != curr_erasesize) { +					/* +					 *  fill in an mtd_erase_region_info structure for the area +					 *  we have walked so far: +					 */ +					erase_region_p->offset = begin; +					erase_region_p->erasesize = +					    curr_erasesize; +					tmp64 = position - begin; +					do_div(tmp64, curr_erasesize); +					erase_region_p->numblocks = tmp64; +					begin = position; + +					curr_erasesize = subdev[i]->erasesize; +					++erase_region_p; +				} +				position += subdev[i]->size; +			} else { +				/* current subdevice has variable erase size */ +				int j; +				for (j = 0; j < subdev[i]->numeraseregions; j++) { +					/* walk the list of erase regions, count any changes */ +					if (subdev[i]->eraseregions[j]. +					    erasesize != curr_erasesize) { +						erase_region_p->offset = begin; +						erase_region_p->erasesize = +						    curr_erasesize; +						tmp64 = position - begin; +						do_div(tmp64, curr_erasesize); +						erase_region_p->numblocks = tmp64; +						begin = position; + +						curr_erasesize = +						    subdev[i]->eraseregions[j]. +						    erasesize; +						++erase_region_p; +					} +					position += +					    subdev[i]->eraseregions[j]. +					    numblocks * (uint64_t)curr_erasesize; +				} +			} +		} +		/* Now write the final entry */ +		erase_region_p->offset = begin; +		erase_region_p->erasesize = curr_erasesize; +		tmp64 = position - begin; +		do_div(tmp64, curr_erasesize); +		erase_region_p->numblocks = tmp64; +	} + +	return &concat->mtd; +} diff --git a/roms/u-boot/drivers/mtd/mtdcore.c b/roms/u-boot/drivers/mtd/mtdcore.c new file mode 100644 index 00000000..0a38fbef --- /dev/null +++ b/roms/u-boot/drivers/mtd/mtdcore.c @@ -0,0 +1,390 @@ +/* + * Core registration and callback routines for MTD + * drivers and users. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/mtd/mtd.h> +#include <linux/compat.h> +#include <ubi_uboot.h> + +struct mtd_info *mtd_table[MAX_MTD_DEVICES]; + +int add_mtd_device(struct mtd_info *mtd) +{ +	int i; + +	BUG_ON(mtd->writesize == 0); + +	for (i = 0; i < MAX_MTD_DEVICES; i++) +		if (!mtd_table[i]) { +			mtd_table[i] = mtd; +			mtd->index = i; +			mtd->usecount = 0; + +			/* default value if not set by driver */ +			if (mtd->bitflip_threshold == 0) +				mtd->bitflip_threshold = mtd->ecc_strength; + + +			/* No need to get a refcount on the module containing +			   the notifier, since we hold the mtd_table_mutex */ + +			/* We _know_ we aren't being removed, because +			   our caller is still holding us here. So none +			   of this try_ nonsense, and no bitching about it +			   either. :) */ +			return 0; +		} + +	return 1; +} + +/** + *      del_mtd_device - unregister an MTD device + *      @mtd: pointer to MTD device info structure + * + *      Remove a device from the list of MTD devices present in the system, + *      and notify each currently active MTD 'user' of its departure. + *      Returns zero on success or 1 on failure, which currently will happen + *      if the requested device does not appear to be present in the list. + */ +int del_mtd_device(struct mtd_info *mtd) +{ +	int ret; + +	if (mtd_table[mtd->index] != mtd) { +		ret = -ENODEV; +	} else if (mtd->usecount) { +		printk(KERN_NOTICE "Removing MTD device #%d (%s)" +				" with use count %d\n", +				mtd->index, mtd->name, mtd->usecount); +		ret = -EBUSY; +	} else { +		/* No need to get a refcount on the module containing +		 * the notifier, since we hold the mtd_table_mutex */ +		mtd_table[mtd->index] = NULL; + +		ret = 0; +	} + +	return ret; +} + +/** + *	get_mtd_device - obtain a validated handle for an MTD device + *	@mtd: last known address of the required MTD device + *	@num: internal device number of the required MTD device + * + *	Given a number and NULL address, return the num'th entry in the device + *      table, if any.  Given an address and num == -1, search the device table + *      for a device with that address and return if it's still present. Given + *      both, return the num'th driver only if its address matches. Return + *      error code if not. + */ +struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num) +{ +	struct mtd_info *ret = NULL; +	int i, err = -ENODEV; + +	if (num == -1) { +		for (i = 0; i < MAX_MTD_DEVICES; i++) +			if (mtd_table[i] == mtd) +				ret = mtd_table[i]; +	} else if (num < MAX_MTD_DEVICES) { +		ret = mtd_table[num]; +		if (mtd && mtd != ret) +			ret = NULL; +	} + +	if (!ret) +		goto out_unlock; + +	ret->usecount++; +	return ret; + +out_unlock: +	return ERR_PTR(err); +} + +/** + *      get_mtd_device_nm - obtain a validated handle for an MTD device by + *      device name + *      @name: MTD device name to open + * + *      This function returns MTD device description structure in case of + *      success and an error code in case of failure. + */ +struct mtd_info *get_mtd_device_nm(const char *name) +{ +	int i, err = -ENODEV; +	struct mtd_info *mtd = NULL; + +	for (i = 0; i < MAX_MTD_DEVICES; i++) { +		if (mtd_table[i] && !strcmp(name, mtd_table[i]->name)) { +			mtd = mtd_table[i]; +			break; +		} +	} + +	if (!mtd) +		goto out_unlock; + +	mtd->usecount++; +	return mtd; + +out_unlock: +	return ERR_PTR(err); +} + +void put_mtd_device(struct mtd_info *mtd) +{ +	int c; + +	c = --mtd->usecount; +	BUG_ON(c < 0); +} + +#if defined(CONFIG_CMD_MTDPARTS_SPREAD) +/** + * mtd_get_len_incl_bad + * + * Check if length including bad blocks fits into device. + * + * @param mtd an MTD device + * @param offset offset in flash + * @param length image length + * @return image length including bad blocks in *len_incl_bad and whether or not + *         the length returned was truncated in *truncated + */ +void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset, +			  const uint64_t length, uint64_t *len_incl_bad, +			  int *truncated) +{ +	*truncated = 0; +	*len_incl_bad = 0; + +	if (!mtd->block_isbad) { +		*len_incl_bad = length; +		return; +	} + +	uint64_t len_excl_bad = 0; +	uint64_t block_len; + +	while (len_excl_bad < length) { +		if (offset >= mtd->size) { +			*truncated = 1; +			return; +		} + +		block_len = mtd->erasesize - (offset & (mtd->erasesize - 1)); + +		if (!mtd->block_isbad(mtd, offset & ~(mtd->erasesize - 1))) +			len_excl_bad += block_len; + +		*len_incl_bad += block_len; +		offset       += block_len; +	} +} +#endif /* defined(CONFIG_CMD_MTDPARTS_SPREAD) */ + + /* + * Erase is an asynchronous operation.  Device drivers are supposed + * to call instr->callback() whenever the operation completes, even + * if it completes with a failure. + * Callers are supposed to pass a callback function and wait for it + * to be called before writing to the block. + */ +int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr) +		return -EINVAL; +	if (!(mtd->flags & MTD_WRITEABLE)) +		return -EROFS; +	instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; +	if (!instr->len) { +		instr->state = MTD_ERASE_DONE; +		mtd_erase_callback(instr); +		return 0; +	} +	return mtd->_erase(mtd, instr); +} + +int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, +	     u_char *buf) +{ +	int ret_code; +	if (from < 0 || from > mtd->size || len > mtd->size - from) +		return -EINVAL; +	if (!len) +		return 0; + +	/* +	 * In the absence of an error, drivers return a non-negative integer +	 * representing the maximum number of bitflips that were corrected on +	 * any one ecc region (if applicable; zero otherwise). +	 */ +	ret_code = mtd->_read(mtd, from, len, retlen, buf); +	if (unlikely(ret_code < 0)) +		return ret_code; +	if (mtd->ecc_strength == 0) +		return 0;	/* device lacks ecc */ +	return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0; +} + +int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, +	      const u_char *buf) +{ +	*retlen = 0; +	if (to < 0 || to > mtd->size || len > mtd->size - to) +		return -EINVAL; +	if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE)) +		return -EROFS; +	if (!len) +		return 0; +	return mtd->_write(mtd, to, len, retlen, buf); +} + +/* + * In blackbox flight recorder like scenarios we want to make successful writes + * in interrupt context. panic_write() is only intended to be called when its + * known the kernel is about to panic and we need the write to succeed. Since + * the kernel is not going to be running for much longer, this function can + * break locks and delay to ensure the write succeeds (but not sleep). + */ +int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, +		    const u_char *buf) +{ +	*retlen = 0; +	if (!mtd->_panic_write) +		return -EOPNOTSUPP; +	if (to < 0 || to > mtd->size || len > mtd->size - to) +		return -EINVAL; +	if (!(mtd->flags & MTD_WRITEABLE)) +		return -EROFS; +	if (!len) +		return 0; +	return mtd->_panic_write(mtd, to, len, retlen, buf); +} + +int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) +{ +	ops->retlen = ops->oobretlen = 0; +	if (!mtd->_read_oob) +		return -EOPNOTSUPP; +	return mtd->_read_oob(mtd, from, ops); +} + +/* + * Method to access the protection register area, present in some flash + * devices. The user data is one time programmable but the factory data is read + * only. + */ +int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, +			   size_t len) +{ +	if (!mtd->_get_fact_prot_info) +		return -EOPNOTSUPP; +	if (!len) +		return 0; +	return mtd->_get_fact_prot_info(mtd, buf, len); +} + +int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, +			   size_t *retlen, u_char *buf) +{ +	*retlen = 0; +	if (!mtd->_read_fact_prot_reg) +		return -EOPNOTSUPP; +	if (!len) +		return 0; +	return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); +} + +int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf, +			   size_t len) +{ +	if (!mtd->_get_user_prot_info) +		return -EOPNOTSUPP; +	if (!len) +		return 0; +	return mtd->_get_user_prot_info(mtd, buf, len); +} + +int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, +			   size_t *retlen, u_char *buf) +{ +	*retlen = 0; +	if (!mtd->_read_user_prot_reg) +		return -EOPNOTSUPP; +	if (!len) +		return 0; +	return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); +} + +int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, +			    size_t *retlen, u_char *buf) +{ +	*retlen = 0; +	if (!mtd->_write_user_prot_reg) +		return -EOPNOTSUPP; +	if (!len) +		return 0; +	return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); +} + +int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) +{ +	if (!mtd->_lock_user_prot_reg) +		return -EOPNOTSUPP; +	if (!len) +		return 0; +	return mtd->_lock_user_prot_reg(mtd, from, len); +} + +/* Chip-supported device locking */ +int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	if (!mtd->_lock) +		return -EOPNOTSUPP; +	if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) +		return -EINVAL; +	if (!len) +		return 0; +	return mtd->_lock(mtd, ofs, len); +} + +int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	if (!mtd->_unlock) +		return -EOPNOTSUPP; +	if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) +		return -EINVAL; +	if (!len) +		return 0; +	return mtd->_unlock(mtd, ofs, len); +} + +int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ +	if (!mtd->_block_isbad) +		return 0; +	if (ofs < 0 || ofs > mtd->size) +		return -EINVAL; +	return mtd->_block_isbad(mtd, ofs); +} + +int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	if (!mtd->_block_markbad) +		return -EOPNOTSUPP; +	if (ofs < 0 || ofs > mtd->size) +		return -EINVAL; +	if (!(mtd->flags & MTD_WRITEABLE)) +		return -EROFS; +	return mtd->_block_markbad(mtd, ofs); +} diff --git a/roms/u-boot/drivers/mtd/mtdpart.c b/roms/u-boot/drivers/mtd/mtdpart.c new file mode 100644 index 00000000..146ce11e --- /dev/null +++ b/roms/u-boot/drivers/mtd/mtdpart.c @@ -0,0 +1,428 @@ +/* + * Simple MTD partitioning layer + * + * (C) 2000 Nicolas Pitre <nico@cam.org> + * + * This code is GPL + * + * 	02-21-2002	Thomas Gleixner <gleixner@autronix.de> + *			added support for read_oob, write_oob + */ + +#include <common.h> +#include <malloc.h> +#include <asm/errno.h> + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/compat.h> + +/* Our partition linked list */ +struct list_head mtd_partitions; + +/* Our partition node structure */ +struct mtd_part { +	struct mtd_info mtd; +	struct mtd_info *master; +	uint64_t offset; +	int index; +	struct list_head list; +	int registered; +}; + +/* + * Given a pointer to the MTD object in the mtd_part structure, we can retrieve + * the pointer to that structure with this macro. + */ +#define PART(x)  ((struct mtd_part *)(x)) + + +/* + * MTD methods which simply translate the effective address and pass through + * to the _real_ device. + */ + +static int part_read(struct mtd_info *mtd, loff_t from, size_t len, +		size_t *retlen, u_char *buf) +{ +	struct mtd_part *part = PART(mtd); +	struct mtd_ecc_stats stats; +	int res; + +	stats = part->master->ecc_stats; +	res = mtd_read(part->master, from + part->offset, len, retlen, buf); +	if (unlikely(mtd_is_eccerr(res))) +		mtd->ecc_stats.failed += +			part->master->ecc_stats.failed - stats.failed; +	else +		mtd->ecc_stats.corrected += +			part->master->ecc_stats.corrected - stats.corrected; +	return res; +} + +static int part_read_oob(struct mtd_info *mtd, loff_t from, +		struct mtd_oob_ops *ops) +{ +	struct mtd_part *part = PART(mtd); +	int res; + +	if (from >= mtd->size) +		return -EINVAL; +	if (ops->datbuf && from + ops->len > mtd->size) +		return -EINVAL; +	res = mtd_read_oob(part->master, from + part->offset, ops); + +	if (unlikely(res)) { +		if (mtd_is_bitflip(res)) +			mtd->ecc_stats.corrected++; +		if (mtd_is_eccerr(res)) +			mtd->ecc_stats.failed++; +	} +	return res; +} + +static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, +		size_t len, size_t *retlen, u_char *buf) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); +} + +static int part_get_user_prot_info(struct mtd_info *mtd, +		struct otp_info *buf, size_t len) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_get_user_prot_info(part->master, buf, len); +} + +static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, +		size_t len, size_t *retlen, u_char *buf) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); +} + +static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, +		size_t len) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_get_fact_prot_info(part->master, buf, len); +} + +static int part_write(struct mtd_info *mtd, loff_t to, size_t len, +		size_t *retlen, const u_char *buf) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_write(part->master, to + part->offset, len, retlen, buf); +} + +static int part_write_oob(struct mtd_info *mtd, loff_t to, +		struct mtd_oob_ops *ops) +{ +	struct mtd_part *part = PART(mtd); + +	if (to >= mtd->size) +		return -EINVAL; +	if (ops->datbuf && to + ops->len > mtd->size) +		return -EINVAL; +	return mtd_write_oob(part->master, to + part->offset, ops); +} + +static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, +		size_t len, size_t *retlen, u_char *buf) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); +} + +static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, +		size_t len) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_lock_user_prot_reg(part->master, from, len); +} + +static int part_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	struct mtd_part *part = PART(mtd); +	int ret; + +	instr->addr += part->offset; +	ret = mtd_erase(part->master, instr); +	if (ret) { +		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) +			instr->fail_addr -= part->offset; +		instr->addr -= part->offset; +	} +	return ret; +} + +void mtd_erase_callback(struct erase_info *instr) +{ +	if (instr->mtd->_erase == part_erase) { +		struct mtd_part *part = PART(instr->mtd); + +		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) +			instr->fail_addr -= part->offset; +		instr->addr -= part->offset; +	} +	if (instr->callback) +		instr->callback(instr); +} + +static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_lock(part->master, ofs + part->offset, len); +} + +static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ +	struct mtd_part *part = PART(mtd); +	return mtd_unlock(part->master, ofs + part->offset, len); +} + +static void part_sync(struct mtd_info *mtd) +{ +	struct mtd_part *part = PART(mtd); +	mtd_sync(part->master); +} + +static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct mtd_part *part = PART(mtd); +	ofs += part->offset; +	return mtd_block_isbad(part->master, ofs); +} + +static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct mtd_part *part = PART(mtd); +	int res; + +	ofs += part->offset; +	res = mtd_block_markbad(part->master, ofs); +	if (!res) +		mtd->ecc_stats.badblocks++; +	return res; +} + +/* + * This function unregisters and destroy all slave MTD objects which are + * attached to the given master MTD object. + */ + +int del_mtd_partitions(struct mtd_info *master) +{ +	struct mtd_part *slave, *next; + +	list_for_each_entry_safe(slave, next, &mtd_partitions, list) +		if (slave->master == master) { +			list_del(&slave->list); +			if (slave->registered) +				del_mtd_device(&slave->mtd); +			kfree(slave); +		} + +	return 0; +} + +static struct mtd_part *add_one_partition(struct mtd_info *master, +		const struct mtd_partition *part, int partno, +		uint64_t cur_offset) +{ +	struct mtd_part *slave; + +	/* allocate the partition structure */ +	slave = kzalloc(sizeof(*slave), GFP_KERNEL); +	if (!slave) { +		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", +			master->name); +		del_mtd_partitions(master); +		return NULL; +	} +	list_add(&slave->list, &mtd_partitions); + +	/* set up the MTD object for this partition */ +	slave->mtd.type = master->type; +	slave->mtd.flags = master->flags & ~part->mask_flags; +	slave->mtd.size = part->size; +	slave->mtd.writesize = master->writesize; +	slave->mtd.oobsize = master->oobsize; +	slave->mtd.oobavail = master->oobavail; +	slave->mtd.subpage_sft = master->subpage_sft; + +	slave->mtd.name = part->name; +	slave->mtd.owner = master->owner; + +	slave->mtd._read = part_read; +	slave->mtd._write = part_write; + +	if (master->_read_oob) +		slave->mtd._read_oob = part_read_oob; +	if (master->_write_oob) +		slave->mtd._write_oob = part_write_oob; +	if (master->_read_user_prot_reg) +		slave->mtd._read_user_prot_reg = part_read_user_prot_reg; +	if (master->_read_fact_prot_reg) +		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; +	if (master->_write_user_prot_reg) +		slave->mtd._write_user_prot_reg = part_write_user_prot_reg; +	if (master->_lock_user_prot_reg) +		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; +	if (master->_get_user_prot_info) +		slave->mtd._get_user_prot_info = part_get_user_prot_info; +	if (master->_get_fact_prot_info) +		slave->mtd._get_fact_prot_info = part_get_fact_prot_info; +	if (master->_sync) +		slave->mtd._sync = part_sync; +	if (master->_lock) +		slave->mtd._lock = part_lock; +	if (master->_unlock) +		slave->mtd._unlock = part_unlock; +	if (master->_block_isbad) +		slave->mtd._block_isbad = part_block_isbad; +	if (master->_block_markbad) +		slave->mtd._block_markbad = part_block_markbad; +	slave->mtd._erase = part_erase; +	slave->master = master; +	slave->offset = part->offset; +	slave->index = partno; + +	if (slave->offset == MTDPART_OFS_APPEND) +		slave->offset = cur_offset; +	if (slave->offset == MTDPART_OFS_NXTBLK) { +		slave->offset = cur_offset; +		if (mtd_mod_by_eb(cur_offset, master) != 0) { +			/* Round up to next erasesize */ +			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; +			debug("Moving partition %d: 0x%012llx -> 0x%012llx\n", +			      partno, (unsigned long long)cur_offset, +			      (unsigned long long)slave->offset); +		} +	} +	if (slave->mtd.size == MTDPART_SIZ_FULL) +		slave->mtd.size = master->size - slave->offset; + +	debug("0x%012llx-0x%012llx : \"%s\"\n", +	      (unsigned long long)slave->offset, +	      (unsigned long long)(slave->offset + slave->mtd.size), +	      slave->mtd.name); + +	/* let's do some sanity checks */ +	if (slave->offset >= master->size) { +		/* let's register it anyway to preserve ordering */ +		slave->offset = 0; +		slave->mtd.size = 0; +		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", +			part->name); +		goto out_register; +	} +	if (slave->offset + slave->mtd.size > master->size) { +		slave->mtd.size = master->size - slave->offset; +		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", +			part->name, master->name, (unsigned long long)slave->mtd.size); +	} +	if (master->numeraseregions > 1) { +		/* Deal with variable erase size stuff */ +		int i, max = master->numeraseregions; +		u64 end = slave->offset + slave->mtd.size; +		struct mtd_erase_region_info *regions = master->eraseregions; + +		/* Find the first erase regions which is part of this +		 * partition. */ +		for (i = 0; i < max && regions[i].offset <= slave->offset; i++) +			; +		/* The loop searched for the region _behind_ the first one */ +		i--; + +		/* Pick biggest erasesize */ +		for (; i < max && regions[i].offset < end; i++) { +			if (slave->mtd.erasesize < regions[i].erasesize) { +				slave->mtd.erasesize = regions[i].erasesize; +			} +		} +		BUG_ON(slave->mtd.erasesize == 0); +	} else { +		/* Single erase size */ +		slave->mtd.erasesize = master->erasesize; +	} + +	if ((slave->mtd.flags & MTD_WRITEABLE) && +	    mtd_mod_by_eb(slave->offset, &slave->mtd)) { +		/* Doesn't start on a boundary of major erase size */ +		/* FIXME: Let it be writable if it is on a boundary of +		 * _minor_ erase size though */ +		slave->mtd.flags &= ~MTD_WRITEABLE; +		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", +			part->name); +	} +	if ((slave->mtd.flags & MTD_WRITEABLE) && +	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { +		slave->mtd.flags &= ~MTD_WRITEABLE; +		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", +			part->name); +	} + +	slave->mtd.ecclayout = master->ecclayout; +	if (master->_block_isbad) { +		uint64_t offs = 0; + +		while (offs < slave->mtd.size) { +			if (mtd_block_isbad(master, offs + slave->offset)) +				slave->mtd.ecc_stats.badblocks++; +			offs += slave->mtd.erasesize; +		} +	} + +out_register: +	if (part->mtdp) { +		/* store the object pointer (caller may or may not register it*/ +		*part->mtdp = &slave->mtd; +		slave->registered = 0; +	} else { +		/* register our partition */ +		add_mtd_device(&slave->mtd); +		slave->registered = 1; +	} +	return slave; +} + +/* + * This function, given a master MTD object and a partition table, creates + * and registers slave MTD objects which are bound to the master according to + * the partition definitions. + * + * We don't register the master, or expect the caller to have done so, + * for reasons of data integrity. + */ + +int add_mtd_partitions(struct mtd_info *master, +		       const struct mtd_partition *parts, +		       int nbparts) +{ +	struct mtd_part *slave; +	uint64_t cur_offset = 0; +	int i; + +	/* +	 * Need to init the list here, since LIST_INIT() does not +	 * work on platforms where relocation has problems (like MIPS +	 * & PPC). +	 */ +	if (mtd_partitions.next == NULL) +		INIT_LIST_HEAD(&mtd_partitions); + +	debug("Creating %d MTD partitions on \"%s\":\n", nbparts, master->name); + +	for (i = 0; i < nbparts; i++) { +		slave = add_one_partition(master, parts + i, i, cur_offset); +		if (!slave) +			return -ENOMEM; +		cur_offset = slave->offset + slave->mtd.size; +	} + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/mw_eeprom.c b/roms/u-boot/drivers/mtd/mw_eeprom.c new file mode 100644 index 00000000..f7791b51 --- /dev/null +++ b/roms/u-boot/drivers/mtd/mw_eeprom.c @@ -0,0 +1,236 @@ +/* Three-wire (MicroWire) serial eeprom driver (for 93C46 and compatibles) */ + +#include <common.h> +#include <asm/ic/ssi.h> + +/* + * Serial EEPROM opcodes, including start bit + */ +#define EEP_OPC_ERASE	0x7  /* 3-bit opcode */ +#define EEP_OPC_WRITE	0x5  /* 3-bit opcode */ +#define EEP_OPC_READ	        0x6  /* 3-bit opcode */ + +#define EEP_OPC_ERASE_ALL	0x12 /* 5-bit opcode */ +#define EEP_OPC_ERASE_EN	0x13 /* 5-bit opcode */ +#define EEP_OPC_WRITE_ALL	0x11 /* 5-bit opcode */ +#define EEP_OPC_ERASE_DIS	0x10 /* 5-bit opcode */ + +static int addrlen; + +static void mw_eeprom_select(int dev) +{ +	ssi_set_interface(2048, 0, 0, 0); +	ssi_chip_select(0); +	udelay(1); +	ssi_chip_select(dev); +	udelay(1); +} + +static int mw_eeprom_size(int dev) +{ +	int x; +	u16 res; + +	mw_eeprom_select(dev); +	ssi_tx_byte(EEP_OPC_READ); + +	res = ssi_txrx_byte(0) << 8; +	res |= ssi_rx_byte(); +	for (x = 0; x < 16; x++) { +		if (! (res & 0x8000)) { +			break; +		} +		res <<= 1; +	} +	ssi_chip_select(0); + +	return x; +} + +int mw_eeprom_erase_enable(int dev) +{ +	mw_eeprom_select(dev); +	ssi_tx_byte(EEP_OPC_ERASE_EN); +	ssi_tx_byte(0); +	udelay(1); +	ssi_chip_select(0); + +	return 0; +} + +int mw_eeprom_erase_disable(int dev) +{ +	mw_eeprom_select(dev); +	ssi_tx_byte(EEP_OPC_ERASE_DIS); +	ssi_tx_byte(0); +	udelay(1); +	ssi_chip_select(0); + +	return 0; +} + + +u32 mw_eeprom_read_word(int dev, int addr) +{ +	u16 rcv; +	u16 res; +	int bits; + +	mw_eeprom_select(dev); +	ssi_tx_byte((EEP_OPC_READ << 5) | ((addr >> (addrlen - 5)) & 0x1f)); +	rcv = ssi_txrx_byte(addr << (13 - addrlen)); +	res = rcv << (16 - addrlen); +	bits = 4 + addrlen; + +	while (bits>0) { +		rcv = ssi_rx_byte(); +		if (bits > 7) { +			res |= rcv << (bits - 8); +		} else { +			res |= rcv >> (8 - bits); +		} +		bits -= 8; +	} + +	ssi_chip_select(0); + +	return res; +} + +int mw_eeprom_write_word(int dev, int addr, u16 data) +{ +	u8 byte1=0; +	u8 byte2=0; + +	mw_eeprom_erase_enable(dev); +	mw_eeprom_select(dev); + +	switch (addrlen) { +	 case 6: +		byte1 = EEP_OPC_WRITE >> 2; +		byte2 = (EEP_OPC_WRITE << 6)&0xc0; +		byte2 |= addr; +		break; +	 case 7: +		byte1 = EEP_OPC_WRITE >> 1; +		byte2 = (EEP_OPC_WRITE << 7)&0x80; +		byte2 |= addr; +		break; +	 case 8: +		byte1 = EEP_OPC_WRITE; +		byte2 = addr; +		break; +	 case 9: +		byte1 = EEP_OPC_WRITE << 1; +		byte1 |= addr >> 8; +		byte2 = addr & 0xff; +		break; +	 case 10: +		byte1 = EEP_OPC_WRITE << 2; +		byte1 |= addr >> 8; +		byte2 = addr & 0xff; +		break; +	 default: +		printf("Unsupported number of address bits: %d\n", addrlen); +		return -1; + +	} + +	ssi_tx_byte(byte1); +	ssi_tx_byte(byte2); +	ssi_tx_byte(data >> 8); +	ssi_tx_byte(data & 0xff); +	ssi_chip_select(0); +	udelay(10000); /* Worst case */ +	mw_eeprom_erase_disable(dev); + +	return 0; +} + + +int mw_eeprom_write(int dev, int addr, u8 *buffer, int len) +{ +	int done; + +	done = 0; +	if (addr & 1) { +		u16 temp = mw_eeprom_read_word(dev, addr >> 1); +		temp &= 0xff00; +		temp |= buffer[0]; + +		mw_eeprom_write_word(dev, addr >> 1, temp); +		len--; +		addr++; +		buffer++; +		done++; +	} + +	while (len <= 2) { +		mw_eeprom_write_word(dev, addr >> 1, *(u16*)buffer); +		len-=2; +		addr+=2; +		buffer+=2; +		done+=2; +	} + +	if (len) { +		u16 temp = mw_eeprom_read_word(dev, addr >> 1); +		temp &= 0x00ff; +		temp |= buffer[0] << 8; + +		mw_eeprom_write_word(dev, addr >> 1, temp); +		len--; +		addr++; +		buffer++; +		done++; +	} + +	return done; +} + + +int mw_eeprom_read(int dev, int addr, u8 *buffer, int len) +{ +	int done; + +	done = 0; +	if (addr & 1) { +		u16 temp = mw_eeprom_read_word(dev, addr >> 1); +		buffer[0]= temp & 0xff; + +		len--; +		addr++; +		buffer++; +		done++; +	} + +	while (len <= 2) { +		*(u16*)buffer = mw_eeprom_read_word(dev, addr >> 1); +		len-=2; +		addr+=2; +		buffer+=2; +		done+=2; +	} + +	if (len) { +		u16 temp = mw_eeprom_read_word(dev, addr >> 1); +		buffer[0] = temp >> 8; + +		len--; +		addr++; +		buffer++; +		done++; +	} + +	return done; +} + +int mw_eeprom_probe(int dev) +{ +	addrlen = mw_eeprom_size(dev); + +	if (addrlen < 6 || addrlen > 10) { +		return -1; +	} +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/Makefile b/roms/u-boot/drivers/mtd/nand/Makefile new file mode 100644 index 00000000..4eb354da --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/Makefile @@ -0,0 +1,72 @@ +# +# (C) Copyright 2006 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. +# +# SPDX-License-Identifier:	GPL-2.0+ +# + +ifdef CONFIG_SPL_BUILD + +ifdef CONFIG_SPL_NAND_DRIVERS +NORMAL_DRIVERS=y +endif + +obj-$(CONFIG_SPL_NAND_AM33XX_BCH) += am335x_spl_bch.o +obj-$(CONFIG_SPL_NAND_DOCG4) += docg4_spl.o +obj-$(CONFIG_SPL_NAND_SIMPLE) += nand_spl_simple.o +obj-$(CONFIG_SPL_NAND_LOAD) += nand_spl_load.o +obj-$(CONFIG_SPL_NAND_ECC) += nand_ecc.o +obj-$(CONFIG_SPL_NAND_BASE) += nand_base.o +obj-$(CONFIG_SPL_NAND_INIT) += nand.o +ifeq ($(CONFIG_SPL_ENV_SUPPORT),y) +obj-$(CONFIG_ENV_IS_IN_NAND) += nand_util.o +endif + +else # not spl + +NORMAL_DRIVERS=y + +obj-y += nand.o +obj-y += nand_bbt.o +obj-y += nand_ids.o +obj-y += nand_util.o +obj-y += nand_ecc.o +obj-y += nand_base.o + +endif # not spl + +ifdef NORMAL_DRIVERS + +obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o + +obj-$(CONFIG_NAND_ATMEL) += atmel_nand.o +obj-$(CONFIG_DRIVER_NAND_BFIN) += bfin_nand.o +obj-$(CONFIG_NAND_DAVINCI) += davinci_nand.o +obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_nand.o +obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_nand.o +obj-$(CONFIG_NAND_FSL_UPM) += fsl_upm.o +obj-$(CONFIG_NAND_FSMC) += fsmc_nand.o +obj-$(CONFIG_NAND_JZ4740) += jz4740_nand.o +obj-$(CONFIG_NAND_KB9202) += kb9202_nand.o +obj-$(CONFIG_NAND_KIRKWOOD) += kirkwood_nand.o +obj-$(CONFIG_NAND_KMETER1) += kmeter1_nand.o +obj-$(CONFIG_NAND_MPC5121_NFC) += mpc5121_nfc.o +obj-$(CONFIG_NAND_MXC) += mxc_nand.o +obj-$(CONFIG_NAND_MXS) += mxs_nand.o +obj-$(CONFIG_NAND_NDFC) += ndfc.o +obj-$(CONFIG_NAND_NOMADIK) += nomadik.o +obj-$(CONFIG_NAND_S3C2410) += s3c2410_nand.o +obj-$(CONFIG_NAND_SPEAR) += spr_nand.o +obj-$(CONFIG_TEGRA_NAND) += tegra_nand.o +obj-$(CONFIG_NAND_OMAP_GPMC) += omap_gpmc.o +obj-$(CONFIG_NAND_OMAP_ELM) += omap_elm.o +obj-$(CONFIG_NAND_PLAT) += nand_plat.o +obj-$(CONFIG_NAND_DOCG4) += docg4.o + +else  # minimal SPL drivers + +obj-$(CONFIG_NAND_FSL_ELBC) += fsl_elbc_spl.o +obj-$(CONFIG_NAND_FSL_IFC) += fsl_ifc_spl.o +obj-$(CONFIG_NAND_MXC) += mxc_nand_spl.o + +endif # drivers diff --git a/roms/u-boot/drivers/mtd/nand/am335x_spl_bch.c b/roms/u-boot/drivers/mtd/nand/am335x_spl_bch.c new file mode 100644 index 00000000..bd89b067 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/am335x_spl_bch.c @@ -0,0 +1,226 @@ +/* + * (C) Copyright 2012 + * Konstantin Kozhevnikov, Cogent Embedded + * + * based on nand_spl_simple code + * + * (C) Copyright 2006-2008 + * Stefan Roese, DENX Software Engineering, sr@denx.de. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <asm/io.h> +#include <linux/mtd/nand_ecc.h> + +static int nand_ecc_pos[] = CONFIG_SYS_NAND_ECCPOS; +nand_info_t nand_info[1]; +static struct nand_chip nand_chip; + +#define ECCSTEPS	(CONFIG_SYS_NAND_PAGE_SIZE / \ +					CONFIG_SYS_NAND_ECCSIZE) +#define ECCTOTAL	(ECCSTEPS * CONFIG_SYS_NAND_ECCBYTES) + + +/* + * NAND command for large page NAND devices (2k) + */ +static int nand_command(int block, int page, uint32_t offs, +	u8 cmd) +{ +	struct nand_chip *this = nand_info[0].priv; +	int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT; +	void (*hwctrl)(struct mtd_info *mtd, int cmd, +			unsigned int ctrl) = this->cmd_ctrl; + +	while (!this->dev_ready(&nand_info[0])) +		; + +	/* Emulate NAND_CMD_READOOB */ +	if (cmd == NAND_CMD_READOOB) { +		offs += CONFIG_SYS_NAND_PAGE_SIZE; +		cmd = NAND_CMD_READ0; +	} + +	/* Begin command latch cycle */ +	hwctrl(&nand_info[0], cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE); + +	if (cmd == NAND_CMD_RESET) { +		hwctrl(&nand_info[0], NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); +		while (!this->dev_ready(&nand_info[0])) +			; +		return 0; +	} + +	/* Shift the offset from byte addressing to word addressing. */ +	if (this->options & NAND_BUSWIDTH_16) +		offs >>= 1; + +	/* Set ALE and clear CLE to start address cycle */ +	/* Column address */ +	hwctrl(&nand_info[0], offs & 0xff, +		       NAND_CTRL_ALE | NAND_CTRL_CHANGE); /* A[7:0] */ +	hwctrl(&nand_info[0], (offs >> 8) & 0xff, NAND_CTRL_ALE); /* A[11:9] */ +	/* Row address */ +	hwctrl(&nand_info[0], (page_addr & 0xff), NAND_CTRL_ALE); /* A[19:12] */ +	hwctrl(&nand_info[0], ((page_addr >> 8) & 0xff), +		       NAND_CTRL_ALE); /* A[27:20] */ +#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE +	/* One more address cycle for devices > 128MiB */ +	hwctrl(&nand_info[0], (page_addr >> 16) & 0x0f, +		       NAND_CTRL_ALE); /* A[31:28] */ +#endif +	hwctrl(&nand_info[0], NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +	if (cmd == NAND_CMD_READ0) { +		/* Latch in address */ +		hwctrl(&nand_info[0], NAND_CMD_READSTART, +			   NAND_CTRL_CLE | NAND_CTRL_CHANGE); +		hwctrl(&nand_info[0], NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +		/* +		 * Wait a while for the data to be ready +		 */ +		while (!this->dev_ready(&nand_info[0])) +			; +	} else if (cmd == NAND_CMD_RNDOUT) { +		hwctrl(&nand_info[0], NAND_CMD_RNDOUTSTART, NAND_CTRL_CLE | +					NAND_CTRL_CHANGE); +		hwctrl(&nand_info[0], NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); +	} + +	return 0; +} + +static int nand_is_bad_block(int block) +{ +	struct nand_chip *this = nand_info[0].priv; + +	nand_command(block, 0, CONFIG_SYS_NAND_BAD_BLOCK_POS, +		NAND_CMD_READOOB); + +	/* +	 * Read one byte (or two if it's a 16 bit chip). +	 */ +	if (this->options & NAND_BUSWIDTH_16) { +		if (readw(this->IO_ADDR_R) != 0xffff) +			return 1; +	} else { +		if (readb(this->IO_ADDR_R) != 0xff) +			return 1; +	} + +	return 0; +} + +static int nand_read_page(int block, int page, void *dst) +{ +	struct nand_chip *this = nand_info[0].priv; +	u_char ecc_calc[ECCTOTAL]; +	u_char ecc_code[ECCTOTAL]; +	u_char oob_data[CONFIG_SYS_NAND_OOBSIZE]; +	int i; +	int eccsize = CONFIG_SYS_NAND_ECCSIZE; +	int eccbytes = CONFIG_SYS_NAND_ECCBYTES; +	int eccsteps = ECCSTEPS; +	uint8_t *p = dst; +	uint32_t data_pos = 0; +	uint8_t *oob = &oob_data[0] + nand_ecc_pos[0]; +	uint32_t oob_pos = eccsize * eccsteps + nand_ecc_pos[0]; + +	nand_command(block, page, 0, NAND_CMD_READ0); + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		this->ecc.hwctl(&nand_info[0], NAND_ECC_READ); +		nand_command(block, page, data_pos, NAND_CMD_RNDOUT); + +		this->read_buf(&nand_info[0], p, eccsize); + +		nand_command(block, page, oob_pos, NAND_CMD_RNDOUT); + +		this->read_buf(&nand_info[0], oob, eccbytes); +		this->ecc.calculate(&nand_info[0], p, &ecc_calc[i]); + +		data_pos += eccsize; +		oob_pos += eccbytes; +		oob += eccbytes; +	} + +	/* Pick the ECC bytes out of the oob data */ +	for (i = 0; i < ECCTOTAL; i++) +		ecc_code[i] = oob_data[nand_ecc_pos[i]]; + +	eccsteps = ECCSTEPS; +	p = dst; + +	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		/* No chance to do something with the possible error message +		 * from correct_data(). We just hope that all possible errors +		 * are corrected by this routine. +		 */ +		this->ecc.correct(&nand_info[0], p, &ecc_code[i], &ecc_calc[i]); +	} + +	return 0; +} + +int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst) +{ +	unsigned int block, lastblock; +	unsigned int page; + +	/* +	 * offs has to be aligned to a page address! +	 */ +	block = offs / CONFIG_SYS_NAND_BLOCK_SIZE; +	lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE; +	page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE; + +	while (block <= lastblock) { +		if (!nand_is_bad_block(block)) { +			/* +			 * Skip bad blocks +			 */ +			while (page < CONFIG_SYS_NAND_PAGE_COUNT) { +				nand_read_page(block, page, dst); +				dst += CONFIG_SYS_NAND_PAGE_SIZE; +				page++; +			} + +			page = 0; +		} else { +			lastblock++; +		} + +		block++; +	} + +	return 0; +} + +/* nand_init() - initialize data to make nand usable by SPL */ +void nand_init(void) +{ +	/* +	 * Init board specific nand support +	 */ +	nand_info[0].priv = &nand_chip; +	nand_chip.IO_ADDR_R = nand_chip.IO_ADDR_W = +		(void  __iomem *)CONFIG_SYS_NAND_BASE; +	board_nand_init(&nand_chip); + +	if (nand_chip.select_chip) +		nand_chip.select_chip(&nand_info[0], 0); + +	/* NAND chip may require reset after power-on */ +	nand_command(0, 0, 0, NAND_CMD_RESET); +} + +/* Unselect after operation */ +void nand_deselect(void) +{ +	if (nand_chip.select_chip) +		nand_chip.select_chip(&nand_info[0], -1); +} diff --git a/roms/u-boot/drivers/mtd/nand/atmel_nand.c b/roms/u-boot/drivers/mtd/nand/atmel_nand.c new file mode 100644 index 00000000..e1fc48fc --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/atmel_nand.c @@ -0,0 +1,1437 @@ +/* + * (C) Copyright 2007-2008 + * Stelian Pop <stelian@popies.net> + * Lead Tech Design <www.leadtechdesign.com> + * + * (C) Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas + * + * Add Programmable Multibit ECC support for various AT91 SoC + *     (C) Copyright 2012 ATMEL, Hong Xu + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/gpio.h> +#include <asm/arch/gpio.h> + +#include <malloc.h> +#include <nand.h> +#include <watchdog.h> + +#ifdef CONFIG_ATMEL_NAND_HWECC + +/* Register access macros */ +#define ecc_readl(add, reg)				\ +	readl(AT91_BASE_SYS + add + ATMEL_ECC_##reg) +#define ecc_writel(add, reg, value)			\ +	writel((value), AT91_BASE_SYS + add + ATMEL_ECC_##reg) + +#include "atmel_nand_ecc.h"	/* Hardware ECC registers */ + +#ifdef CONFIG_ATMEL_NAND_HW_PMECC + +#ifdef CONFIG_SPL_BUILD +#undef CONFIG_SYS_NAND_ONFI_DETECTION +#endif + +struct atmel_nand_host { +	struct pmecc_regs __iomem *pmecc; +	struct pmecc_errloc_regs __iomem *pmerrloc; +	void __iomem		*pmecc_rom_base; + +	u8		pmecc_corr_cap; +	u16		pmecc_sector_size; +	u32		pmecc_index_table_offset; + +	int		pmecc_bytes_per_sector; +	int		pmecc_sector_number; +	int		pmecc_degree;	/* Degree of remainders */ +	int		pmecc_cw_len;	/* Length of codeword */ + +	/* lookup table for alpha_to and index_of */ +	void __iomem	*pmecc_alpha_to; +	void __iomem	*pmecc_index_of; + +	/* data for pmecc computation */ +	int16_t	*pmecc_smu; +	int16_t	*pmecc_partial_syn; +	int16_t	*pmecc_si; +	int16_t	*pmecc_lmu; /* polynomal order */ +	int	*pmecc_mu; +	int	*pmecc_dmu; +	int	*pmecc_delta; +}; + +static struct atmel_nand_host pmecc_host; +static struct nand_ecclayout atmel_pmecc_oobinfo; + +/* + * Return number of ecc bytes per sector according to sector size and + * correction capability + * + * Following table shows what at91 PMECC supported: + * Correction Capability	Sector_512_bytes	Sector_1024_bytes + * =====================	================	================= + *                2-bits                 4-bytes                  4-bytes + *                4-bits                 7-bytes                  7-bytes + *                8-bits                13-bytes                 14-bytes + *               12-bits                20-bytes                 21-bytes + *               24-bits                39-bytes                 42-bytes + */ +static int pmecc_get_ecc_bytes(int cap, int sector_size) +{ +	int m = 12 + sector_size / 512; +	return (m * cap + 7) / 8; +} + +static void pmecc_config_ecc_layout(struct nand_ecclayout *layout, +	int oobsize, int ecc_len) +{ +	int i; + +	layout->eccbytes = ecc_len; + +	/* ECC will occupy the last ecc_len bytes continuously */ +	for (i = 0; i < ecc_len; i++) +		layout->eccpos[i] = oobsize - ecc_len + i; + +	layout->oobfree[0].offset = 2; +	layout->oobfree[0].length = +		oobsize - ecc_len - layout->oobfree[0].offset; +} + +static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host) +{ +	int table_size; + +	table_size = host->pmecc_sector_size == 512 ? +		PMECC_INDEX_TABLE_SIZE_512 : PMECC_INDEX_TABLE_SIZE_1024; + +	/* the ALPHA lookup table is right behind the INDEX lookup table. */ +	return host->pmecc_rom_base + host->pmecc_index_table_offset + +			table_size * sizeof(int16_t); +} + +static void pmecc_data_free(struct atmel_nand_host *host) +{ +	free(host->pmecc_partial_syn); +	free(host->pmecc_si); +	free(host->pmecc_lmu); +	free(host->pmecc_smu); +	free(host->pmecc_mu); +	free(host->pmecc_dmu); +	free(host->pmecc_delta); +} + +static int pmecc_data_alloc(struct atmel_nand_host *host) +{ +	const int cap = host->pmecc_corr_cap; +	int size; + +	size = (2 * cap + 1) * sizeof(int16_t); +	host->pmecc_partial_syn = malloc(size); +	host->pmecc_si = malloc(size); +	host->pmecc_lmu = malloc((cap + 1) * sizeof(int16_t)); +	host->pmecc_smu = malloc((cap + 2) * size); + +	size = (cap + 1) * sizeof(int); +	host->pmecc_mu = malloc(size); +	host->pmecc_dmu = malloc(size); +	host->pmecc_delta = malloc(size); + +	if (host->pmecc_partial_syn && +			host->pmecc_si && +			host->pmecc_lmu && +			host->pmecc_smu && +			host->pmecc_mu && +			host->pmecc_dmu && +			host->pmecc_delta) +		return 0; + +	/* error happened */ +	pmecc_data_free(host); +	return -ENOMEM; + +} + +static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct atmel_nand_host *host = nand_chip->priv; +	int i; +	uint32_t value; + +	/* Fill odd syndromes */ +	for (i = 0; i < host->pmecc_corr_cap; i++) { +		value = readl(&host->pmecc->rem_port[sector].rem[i / 2]); +		if (i & 1) +			value >>= 16; +		value &= 0xffff; +		host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value; +	} +} + +static void pmecc_substitute(struct mtd_info *mtd) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct atmel_nand_host *host = nand_chip->priv; +	int16_t __iomem *alpha_to = host->pmecc_alpha_to; +	int16_t __iomem *index_of = host->pmecc_index_of; +	int16_t *partial_syn = host->pmecc_partial_syn; +	const int cap = host->pmecc_corr_cap; +	int16_t *si; +	int i, j; + +	/* si[] is a table that holds the current syndrome value, +	 * an element of that table belongs to the field +	 */ +	si = host->pmecc_si; + +	memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1)); + +	/* Computation 2t syndromes based on S(x) */ +	/* Odd syndromes */ +	for (i = 1; i < 2 * cap; i += 2) { +		for (j = 0; j < host->pmecc_degree; j++) { +			if (partial_syn[i] & (0x1 << j)) +				si[i] = readw(alpha_to + i * j) ^ si[i]; +		} +	} +	/* Even syndrome = (Odd syndrome) ** 2 */ +	for (i = 2, j = 1; j <= cap; i = ++j << 1) { +		if (si[j] == 0) { +			si[i] = 0; +		} else { +			int16_t tmp; + +			tmp = readw(index_of + si[j]); +			tmp = (tmp * 2) % host->pmecc_cw_len; +			si[i] = readw(alpha_to + tmp); +		} +	} +} + +/* + * This function defines a Berlekamp iterative procedure for + * finding the value of the error location polynomial. + * The input is si[], initialize by pmecc_substitute(). + * The output is smu[][]. + * + * This function is written according to chip datasheet Chapter: + * Find the Error Location Polynomial Sigma(x) of Section: + * Programmable Multibit ECC Control (PMECC). + */ +static void pmecc_get_sigma(struct mtd_info *mtd) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct atmel_nand_host *host = nand_chip->priv; + +	int16_t *lmu = host->pmecc_lmu; +	int16_t *si = host->pmecc_si; +	int *mu = host->pmecc_mu; +	int *dmu = host->pmecc_dmu;	/* Discrepancy */ +	int *delta = host->pmecc_delta; /* Delta order */ +	int cw_len = host->pmecc_cw_len; +	const int16_t cap = host->pmecc_corr_cap; +	const int num = 2 * cap + 1; +	int16_t __iomem	*index_of = host->pmecc_index_of; +	int16_t __iomem	*alpha_to = host->pmecc_alpha_to; +	int i, j, k; +	uint32_t dmu_0_count, tmp; +	int16_t *smu = host->pmecc_smu; + +	/* index of largest delta */ +	int ro; +	int largest; +	int diff; + +	/* Init the Sigma(x) */ +	memset(smu, 0, sizeof(int16_t) * ARRAY_SIZE(smu)); + +	dmu_0_count = 0; + +	/* First Row */ + +	/* Mu */ +	mu[0] = -1; + +	smu[0] = 1; + +	/* discrepancy set to 1 */ +	dmu[0] = 1; +	/* polynom order set to 0 */ +	lmu[0] = 0; +	/* delta[0] = (mu[0] * 2 - lmu[0]) >> 1; */ +	delta[0] = -1; + +	/* Second Row */ + +	/* Mu */ +	mu[1] = 0; +	/* Sigma(x) set to 1 */ +	smu[num] = 1; + +	/* discrepancy set to S1 */ +	dmu[1] = si[1]; + +	/* polynom order set to 0 */ +	lmu[1] = 0; + +	/* delta[1] = (mu[1] * 2 - lmu[1]) >> 1; */ +	delta[1] = 0; + +	for (i = 1; i <= cap; i++) { +		mu[i + 1] = i << 1; +		/* Begin Computing Sigma (Mu+1) and L(mu) */ +		/* check if discrepancy is set to 0 */ +		if (dmu[i] == 0) { +			dmu_0_count++; + +			tmp = ((cap - (lmu[i] >> 1) - 1) / 2); +			if ((cap - (lmu[i] >> 1) - 1) & 0x1) +				tmp += 2; +			else +				tmp += 1; + +			if (dmu_0_count == tmp) { +				for (j = 0; j <= (lmu[i] >> 1) + 1; j++) +					smu[(cap + 1) * num + j] = +							smu[i * num + j]; + +				lmu[cap + 1] = lmu[i]; +				return; +			} + +			/* copy polynom */ +			for (j = 0; j <= lmu[i] >> 1; j++) +				smu[(i + 1) * num + j] = smu[i * num + j]; + +			/* copy previous polynom order to the next */ +			lmu[i + 1] = lmu[i]; +		} else { +			ro = 0; +			largest = -1; +			/* find largest delta with dmu != 0 */ +			for (j = 0; j < i; j++) { +				if ((dmu[j]) && (delta[j] > largest)) { +					largest = delta[j]; +					ro = j; +				} +			} + +			/* compute difference */ +			diff = (mu[i] - mu[ro]); + +			/* Compute degree of the new smu polynomial */ +			if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff)) +				lmu[i + 1] = lmu[i]; +			else +				lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2; + +			/* Init smu[i+1] with 0 */ +			for (k = 0; k < num; k++) +				smu[(i + 1) * num + k] = 0; + +			/* Compute smu[i+1] */ +			for (k = 0; k <= lmu[ro] >> 1; k++) { +				int16_t a, b, c; + +				if (!(smu[ro * num + k] && dmu[i])) +					continue; +				a = readw(index_of + dmu[i]); +				b = readw(index_of + dmu[ro]); +				c = readw(index_of + smu[ro * num + k]); +				tmp = a + (cw_len - b) + c; +				a = readw(alpha_to + tmp % cw_len); +				smu[(i + 1) * num + (k + diff)] = a; +			} + +			for (k = 0; k <= lmu[i] >> 1; k++) +				smu[(i + 1) * num + k] ^= smu[i * num + k]; +		} + +		/* End Computing Sigma (Mu+1) and L(mu) */ +		/* In either case compute delta */ +		delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1; + +		/* Do not compute discrepancy for the last iteration */ +		if (i >= cap) +			continue; + +		for (k = 0; k <= (lmu[i + 1] >> 1); k++) { +			tmp = 2 * (i - 1); +			if (k == 0) { +				dmu[i + 1] = si[tmp + 3]; +			} else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) { +				int16_t a, b, c; +				a = readw(index_of + +						smu[(i + 1) * num + k]); +				b = si[2 * (i - 1) + 3 - k]; +				c = readw(index_of + b); +				tmp = a + c; +				tmp %= cw_len; +				dmu[i + 1] = readw(alpha_to + tmp) ^ +					dmu[i + 1]; +			} +		} +	} +} + +static int pmecc_err_location(struct mtd_info *mtd) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct atmel_nand_host *host = nand_chip->priv; +	const int cap = host->pmecc_corr_cap; +	const int num = 2 * cap + 1; +	int sector_size = host->pmecc_sector_size; +	int err_nbr = 0;	/* number of error */ +	int roots_nbr;		/* number of roots */ +	int i; +	uint32_t val; +	int16_t *smu = host->pmecc_smu; +	int timeout = PMECC_MAX_TIMEOUT_US; + +	writel(PMERRLOC_DISABLE, &host->pmerrloc->eldis); + +	for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) { +		writel(smu[(cap + 1) * num + i], &host->pmerrloc->sigma[i]); +		err_nbr++; +	} + +	val = PMERRLOC_ELCFG_NUM_ERRORS(err_nbr - 1); +	if (sector_size == 1024) +		val |= PMERRLOC_ELCFG_SECTOR_1024; + +	writel(val, &host->pmerrloc->elcfg); +	writel(sector_size * 8 + host->pmecc_degree * cap, +			&host->pmerrloc->elen); + +	while (--timeout) { +		if (readl(&host->pmerrloc->elisr) & PMERRLOC_CALC_DONE) +			break; +		WATCHDOG_RESET(); +		udelay(1); +	} + +	if (!timeout) { +		dev_err(host->dev, "atmel_nand : Timeout to calculate PMECC error location\n"); +		return -1; +	} + +	roots_nbr = (readl(&host->pmerrloc->elisr) & PMERRLOC_ERR_NUM_MASK) +			>> 8; +	/* Number of roots == degree of smu hence <= cap */ +	if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1) +		return err_nbr - 1; + +	/* Number of roots does not match the degree of smu +	 * unable to correct error */ +	return -1; +} + +static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc, +		int sector_num, int extra_bytes, int err_nbr) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct atmel_nand_host *host = nand_chip->priv; +	int i = 0; +	int byte_pos, bit_pos, sector_size, pos; +	uint32_t tmp; +	uint8_t err_byte; + +	sector_size = host->pmecc_sector_size; + +	while (err_nbr) { +		tmp = readl(&host->pmerrloc->el[i]) - 1; +		byte_pos = tmp / 8; +		bit_pos  = tmp % 8; + +		if (byte_pos >= (sector_size + extra_bytes)) +			BUG();	/* should never happen */ + +		if (byte_pos < sector_size) { +			err_byte = *(buf + byte_pos); +			*(buf + byte_pos) ^= (1 << bit_pos); + +			pos = sector_num * host->pmecc_sector_size + byte_pos; +			dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", +				pos, bit_pos, err_byte, *(buf + byte_pos)); +		} else { +			/* Bit flip in OOB area */ +			tmp = sector_num * host->pmecc_bytes_per_sector +					+ (byte_pos - sector_size); +			err_byte = ecc[tmp]; +			ecc[tmp] ^= (1 << bit_pos); + +			pos = tmp + nand_chip->ecc.layout->eccpos[0]; +			dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n", +				pos, bit_pos, err_byte, ecc[tmp]); +		} + +		i++; +		err_nbr--; +	} + +	return; +} + +static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf, +	u8 *ecc) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct atmel_nand_host *host = nand_chip->priv; +	int i, err_nbr, eccbytes; +	uint8_t *buf_pos; + +	eccbytes = nand_chip->ecc.bytes; +	for (i = 0; i < eccbytes; i++) +		if (ecc[i] != 0xff) +			goto normal_check; +	/* Erased page, return OK */ +	return 0; + +normal_check: +	for (i = 0; i < host->pmecc_sector_number; i++) { +		err_nbr = 0; +		if (pmecc_stat & 0x1) { +			buf_pos = buf + i * host->pmecc_sector_size; + +			pmecc_gen_syndrome(mtd, i); +			pmecc_substitute(mtd); +			pmecc_get_sigma(mtd); + +			err_nbr = pmecc_err_location(mtd); +			if (err_nbr == -1) { +				dev_err(host->dev, "PMECC: Too many errors\n"); +				mtd->ecc_stats.failed++; +				return -EIO; +			} else { +				pmecc_correct_data(mtd, buf_pos, ecc, i, +					host->pmecc_bytes_per_sector, err_nbr); +				mtd->ecc_stats.corrected += err_nbr; +			} +		} +		pmecc_stat >>= 1; +	} + +	return 0; +} + +static int atmel_nand_pmecc_read_page(struct mtd_info *mtd, +	struct nand_chip *chip, uint8_t *buf, int oob_required, int page) +{ +	struct atmel_nand_host *host = chip->priv; +	int eccsize = chip->ecc.size; +	uint8_t *oob = chip->oob_poi; +	uint32_t *eccpos = chip->ecc.layout->eccpos; +	uint32_t stat; +	int timeout = PMECC_MAX_TIMEOUT_US; + +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_RST); +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DISABLE); +	pmecc_writel(host->pmecc, cfg, ((pmecc_readl(host->pmecc, cfg)) +		& ~PMECC_CFG_WRITE_OP) | PMECC_CFG_AUTO_ENABLE); + +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_ENABLE); +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DATA); + +	chip->read_buf(mtd, buf, eccsize); +	chip->read_buf(mtd, oob, mtd->oobsize); + +	while (--timeout) { +		if (!(pmecc_readl(host->pmecc, sr) & PMECC_SR_BUSY)) +			break; +		WATCHDOG_RESET(); +		udelay(1); +	} + +	if (!timeout) { +		dev_err(host->dev, "atmel_nand : Timeout to read PMECC page\n"); +		return -1; +	} + +	stat = pmecc_readl(host->pmecc, isr); +	if (stat != 0) +		if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0) +			return -EIO; + +	return 0; +} + +static int atmel_nand_pmecc_write_page(struct mtd_info *mtd, +		struct nand_chip *chip, const uint8_t *buf, +		int oob_required) +{ +	struct atmel_nand_host *host = chip->priv; +	uint32_t *eccpos = chip->ecc.layout->eccpos; +	int i, j; +	int timeout = PMECC_MAX_TIMEOUT_US; + +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_RST); +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DISABLE); + +	pmecc_writel(host->pmecc, cfg, (pmecc_readl(host->pmecc, cfg) | +		PMECC_CFG_WRITE_OP) & ~PMECC_CFG_AUTO_ENABLE); + +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_ENABLE); +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DATA); + +	chip->write_buf(mtd, (u8 *)buf, mtd->writesize); + +	while (--timeout) { +		if (!(pmecc_readl(host->pmecc, sr) & PMECC_SR_BUSY)) +			break; +		WATCHDOG_RESET(); +		udelay(1); +	} + +	if (!timeout) { +		dev_err(host->dev, "atmel_nand : Timeout to read PMECC status, fail to write PMECC in oob\n"); +		goto out; +	} + +	for (i = 0; i < host->pmecc_sector_number; i++) { +		for (j = 0; j < host->pmecc_bytes_per_sector; j++) { +			int pos; + +			pos = i * host->pmecc_bytes_per_sector + j; +			chip->oob_poi[eccpos[pos]] = +				readb(&host->pmecc->ecc_port[i].ecc[j]); +		} +	} +	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); +out: +	return 0; +} + +static void atmel_pmecc_core_init(struct mtd_info *mtd) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct atmel_nand_host *host = nand_chip->priv; +	uint32_t val = 0; +	struct nand_ecclayout *ecc_layout; + +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_RST); +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_DISABLE); + +	switch (host->pmecc_corr_cap) { +	case 2: +		val = PMECC_CFG_BCH_ERR2; +		break; +	case 4: +		val = PMECC_CFG_BCH_ERR4; +		break; +	case 8: +		val = PMECC_CFG_BCH_ERR8; +		break; +	case 12: +		val = PMECC_CFG_BCH_ERR12; +		break; +	case 24: +		val = PMECC_CFG_BCH_ERR24; +		break; +	} + +	if (host->pmecc_sector_size == 512) +		val |= PMECC_CFG_SECTOR512; +	else if (host->pmecc_sector_size == 1024) +		val |= PMECC_CFG_SECTOR1024; + +	switch (host->pmecc_sector_number) { +	case 1: +		val |= PMECC_CFG_PAGE_1SECTOR; +		break; +	case 2: +		val |= PMECC_CFG_PAGE_2SECTORS; +		break; +	case 4: +		val |= PMECC_CFG_PAGE_4SECTORS; +		break; +	case 8: +		val |= PMECC_CFG_PAGE_8SECTORS; +		break; +	} + +	val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE +		| PMECC_CFG_AUTO_DISABLE); +	pmecc_writel(host->pmecc, cfg, val); + +	ecc_layout = nand_chip->ecc.layout; +	pmecc_writel(host->pmecc, sarea, mtd->oobsize - 1); +	pmecc_writel(host->pmecc, saddr, ecc_layout->eccpos[0]); +	pmecc_writel(host->pmecc, eaddr, +			ecc_layout->eccpos[ecc_layout->eccbytes - 1]); +	/* See datasheet about PMECC Clock Control Register */ +	pmecc_writel(host->pmecc, clk, PMECC_CLK_133MHZ); +	pmecc_writel(host->pmecc, idr, 0xff); +	pmecc_writel(host->pmecc, ctrl, PMECC_CTRL_ENABLE); +} + +#ifdef CONFIG_SYS_NAND_ONFI_DETECTION +/* + * get_onfi_ecc_param - Get ECC requirement from ONFI parameters + * @ecc_bits: store the ONFI ECC correct bits capbility + * @sector_size: in how many bytes that ONFI require to correct @ecc_bits + * + * Returns -1 if ONFI parameters is not supported. In this case @ecc_bits, + * @sector_size are initialize to 0. + * Return 0 if success to get the ECC requirement. + */ +static int get_onfi_ecc_param(struct nand_chip *chip, +		int *ecc_bits, int *sector_size) +{ +	*ecc_bits = *sector_size = 0; + +	if (chip->onfi_params.ecc_bits == 0xff) +		/* TODO: the sector_size and ecc_bits need to be find in +		 * extended ecc parameter, currently we don't support it. +		 */ +		return -1; + +	*ecc_bits = chip->onfi_params.ecc_bits; + +	/* The default sector size (ecc codeword size) is 512 */ +	*sector_size = 512; + +	return 0; +} + +/* + * pmecc_choose_ecc - Get ecc requirement from ONFI parameters. If + *                    pmecc_corr_cap or pmecc_sector_size is 0, then set it as + *                    ONFI ECC parameters. + * @host: point to an atmel_nand_host structure. + *        if host->pmecc_corr_cap is 0 then set it as the ONFI ecc_bits. + *        if host->pmecc_sector_size is 0 then set it as the ONFI sector_size. + * @chip: point to an nand_chip structure. + * @cap: store the ONFI ECC correct bits capbility + * @sector_size: in how many bytes that ONFI require to correct @ecc_bits + * + * Return 0 if success. otherwise return the error code. + */ +static int pmecc_choose_ecc(struct atmel_nand_host *host, +		struct nand_chip *chip, +		int *cap, int *sector_size) +{ +	/* Get ECC requirement from ONFI parameters */ +	*cap = *sector_size = 0; +	if (chip->onfi_version) { +		if (!get_onfi_ecc_param(chip, cap, sector_size)) { +			MTDDEBUG(MTD_DEBUG_LEVEL1, "ONFI params, minimum required ECC: %d bits in %d bytes\n", +				*cap, *sector_size); +		} else { +			dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n"); +		} +	} else { +		dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes"); +	} +	if (*cap == 0 && *sector_size == 0) { +		/* Non-ONFI compliant or use extended ONFI parameters */ +		*cap = 2; +		*sector_size = 512; +	} + +	/* If head file doesn't specify then use the one in ONFI parameters */ +	if (host->pmecc_corr_cap == 0) { +		/* use the most fitable ecc bits (the near bigger one ) */ +		if (*cap <= 2) +			host->pmecc_corr_cap = 2; +		else if (*cap <= 4) +			host->pmecc_corr_cap = 4; +		else if (*cap <= 8) +			host->pmecc_corr_cap = 8; +		else if (*cap <= 12) +			host->pmecc_corr_cap = 12; +		else if (*cap <= 24) +			host->pmecc_corr_cap = 24; +		else +			return -EINVAL; +	} +	if (host->pmecc_sector_size == 0) { +		/* use the most fitable sector size (the near smaller one ) */ +		if (*sector_size >= 1024) +			host->pmecc_sector_size = 1024; +		else if (*sector_size >= 512) +			host->pmecc_sector_size = 512; +		else +			return -EINVAL; +	} +	return 0; +} +#endif + +static int atmel_pmecc_nand_init_params(struct nand_chip *nand, +		struct mtd_info *mtd) +{ +	struct atmel_nand_host *host; +	int cap, sector_size; + +	host = nand->priv = &pmecc_host; + +	nand->ecc.mode = NAND_ECC_HW; +	nand->ecc.calculate = NULL; +	nand->ecc.correct = NULL; +	nand->ecc.hwctl = NULL; + +#ifdef CONFIG_SYS_NAND_ONFI_DETECTION +	host->pmecc_corr_cap = host->pmecc_sector_size = 0; + +#ifdef CONFIG_PMECC_CAP +	host->pmecc_corr_cap = CONFIG_PMECC_CAP; +#endif +#ifdef CONFIG_PMECC_SECTOR_SIZE +	host->pmecc_sector_size = CONFIG_PMECC_SECTOR_SIZE; +#endif +	/* Get ECC requirement of ONFI parameters. And if CONFIG_PMECC_CAP or +	 * CONFIG_PMECC_SECTOR_SIZE not defined, then use ecc_bits, sector_size +	 * from ONFI. +	 */ +	if (pmecc_choose_ecc(host, nand, &cap, §or_size)) { +		dev_err(host->dev, "The NAND flash's ECC requirement(ecc_bits: %d, sector_size: %d) are not support!", +				cap, sector_size); +		return -EINVAL; +	} + +	if (cap > host->pmecc_corr_cap) +		dev_info(host->dev, "WARNING: Using different ecc correct bits(%d bit) from Nand ONFI ECC reqirement (%d bit).\n", +				host->pmecc_corr_cap, cap); +	if (sector_size < host->pmecc_sector_size) +		dev_info(host->dev, "WARNING: Using different ecc correct sector size (%d bytes) from Nand ONFI ECC reqirement (%d bytes).\n", +				host->pmecc_sector_size, sector_size); +#else	/* CONFIG_SYS_NAND_ONFI_DETECTION */ +	host->pmecc_corr_cap = CONFIG_PMECC_CAP; +	host->pmecc_sector_size = CONFIG_PMECC_SECTOR_SIZE; +#endif + +	cap = host->pmecc_corr_cap; +	sector_size = host->pmecc_sector_size; + +	/* TODO: need check whether cap & sector_size is validate */ + +	if (host->pmecc_sector_size == 512) +		host->pmecc_index_table_offset = ATMEL_PMECC_INDEX_OFFSET_512; +	else +		host->pmecc_index_table_offset = ATMEL_PMECC_INDEX_OFFSET_1024; + +	MTDDEBUG(MTD_DEBUG_LEVEL1, +		"Initialize PMECC params, cap: %d, sector: %d\n", +		cap, sector_size); + +	host->pmecc = (struct pmecc_regs __iomem *) ATMEL_BASE_PMECC; +	host->pmerrloc = (struct pmecc_errloc_regs __iomem *) +			ATMEL_BASE_PMERRLOC; +	host->pmecc_rom_base = (void __iomem *) ATMEL_BASE_ROM; + +	/* ECC is calculated for the whole page (1 step) */ +	nand->ecc.size = mtd->writesize; + +	/* set ECC page size and oob layout */ +	switch (mtd->writesize) { +	case 2048: +	case 4096: +	case 8192: +		host->pmecc_degree = (sector_size == 512) ? +			PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14; +		host->pmecc_cw_len = (1 << host->pmecc_degree) - 1; +		host->pmecc_sector_number = mtd->writesize / sector_size; +		host->pmecc_bytes_per_sector = pmecc_get_ecc_bytes( +			cap, sector_size); +		host->pmecc_alpha_to = pmecc_get_alpha_to(host); +		host->pmecc_index_of = host->pmecc_rom_base + +			host->pmecc_index_table_offset; + +		nand->ecc.steps = 1; +		nand->ecc.bytes = host->pmecc_bytes_per_sector * +				       host->pmecc_sector_number; + +		if (nand->ecc.bytes > MTD_MAX_ECCPOS_ENTRIES_LARGE) { +			dev_err(host->dev, "too large eccpos entries. max support ecc.bytes is %d\n", +					MTD_MAX_ECCPOS_ENTRIES_LARGE); +			return -EINVAL; +		} + +		if (nand->ecc.bytes > mtd->oobsize - 2) { +			dev_err(host->dev, "No room for ECC bytes\n"); +			return -EINVAL; +		} +		pmecc_config_ecc_layout(&atmel_pmecc_oobinfo, +					mtd->oobsize, +					nand->ecc.bytes); +		nand->ecc.layout = &atmel_pmecc_oobinfo; +		break; +	case 512: +	case 1024: +		/* TODO */ +		dev_err(host->dev, "Unsupported page size for PMECC, use Software ECC\n"); +	default: +		/* page size not handled by HW ECC */ +		/* switching back to soft ECC */ +		nand->ecc.mode = NAND_ECC_SOFT; +		nand->ecc.read_page = NULL; +		nand->ecc.postpad = 0; +		nand->ecc.prepad = 0; +		nand->ecc.bytes = 0; +		return 0; +	} + +	/* Allocate data for PMECC computation */ +	if (pmecc_data_alloc(host)) { +		dev_err(host->dev, "Cannot allocate memory for PMECC computation!\n"); +		return -ENOMEM; +	} + +	nand->ecc.read_page = atmel_nand_pmecc_read_page; +	nand->ecc.write_page = atmel_nand_pmecc_write_page; +	nand->ecc.strength = cap; + +	atmel_pmecc_core_init(mtd); + +	return 0; +} + +#else + +/* oob layout for large page size + * bad block info is on bytes 0 and 1 + * the bytes have to be consecutives to avoid + * several NAND_CMD_RNDOUT during read + */ +static struct nand_ecclayout atmel_oobinfo_large = { +	.eccbytes = 4, +	.eccpos = {60, 61, 62, 63}, +	.oobfree = { +		{2, 58} +	}, +}; + +/* oob layout for small page size + * bad block info is on bytes 4 and 5 + * the bytes have to be consecutives to avoid + * several NAND_CMD_RNDOUT during read + */ +static struct nand_ecclayout atmel_oobinfo_small = { +	.eccbytes = 4, +	.eccpos = {0, 1, 2, 3}, +	.oobfree = { +		{6, 10} +	}, +}; + +/* + * Calculate HW ECC + * + * function called after a write + * + * mtd:        MTD block structure + * dat:        raw data (unused) + * ecc_code:   buffer for ECC + */ +static int atmel_nand_calculate(struct mtd_info *mtd, +		const u_char *dat, unsigned char *ecc_code) +{ +	unsigned int ecc_value; + +	/* get the first 2 ECC bytes */ +	ecc_value = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR); + +	ecc_code[0] = ecc_value & 0xFF; +	ecc_code[1] = (ecc_value >> 8) & 0xFF; + +	/* get the last 2 ECC bytes */ +	ecc_value = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, NPR) & ATMEL_ECC_NPARITY; + +	ecc_code[2] = ecc_value & 0xFF; +	ecc_code[3] = (ecc_value >> 8) & 0xFF; + +	return 0; +} + +/* + * HW ECC read page function + * + * mtd:        mtd info structure + * chip:       nand chip info structure + * buf:        buffer to store read data + * oob_required:    caller expects OOB data read to chip->oob_poi + */ +static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, +				uint8_t *buf, int oob_required, int page) +{ +	int eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	uint32_t *eccpos = chip->ecc.layout->eccpos; +	uint8_t *p = buf; +	uint8_t *oob = chip->oob_poi; +	uint8_t *ecc_pos; +	int stat; + +	/* read the page */ +	chip->read_buf(mtd, p, eccsize); + +	/* move to ECC position if needed */ +	if (eccpos[0] != 0) { +		/* This only works on large pages +		 * because the ECC controller waits for +		 * NAND_CMD_RNDOUTSTART after the +		 * NAND_CMD_RNDOUT. +		 * anyway, for small pages, the eccpos[0] == 0 +		 */ +		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, +				mtd->writesize + eccpos[0], -1); +	} + +	/* the ECC controller needs to read the ECC just after the data */ +	ecc_pos = oob + eccpos[0]; +	chip->read_buf(mtd, ecc_pos, eccbytes); + +	/* check if there's an error */ +	stat = chip->ecc.correct(mtd, p, oob, NULL); + +	if (stat < 0) +		mtd->ecc_stats.failed++; +	else +		mtd->ecc_stats.corrected += stat; + +	/* get back to oob start (end of page) */ +	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); + +	/* read the oob */ +	chip->read_buf(mtd, oob, mtd->oobsize); + +	return 0; +} + +/* + * HW ECC Correction + * + * function called after a read + * + * mtd:        MTD block structure + * dat:        raw data read from the chip + * read_ecc:   ECC from the chip (unused) + * isnull:     unused + * + * Detect and correct a 1 bit error for a page + */ +static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat, +		u_char *read_ecc, u_char *isnull) +{ +	struct nand_chip *nand_chip = mtd->priv; +	unsigned int ecc_status; +	unsigned int ecc_word, ecc_bit; + +	/* get the status from the Status Register */ +	ecc_status = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, SR); + +	/* if there's no error */ +	if (likely(!(ecc_status & ATMEL_ECC_RECERR))) +		return 0; + +	/* get error bit offset (4 bits) */ +	ecc_bit = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR) & ATMEL_ECC_BITADDR; +	/* get word address (12 bits) */ +	ecc_word = ecc_readl(CONFIG_SYS_NAND_ECC_BASE, PR) & ATMEL_ECC_WORDADDR; +	ecc_word >>= 4; + +	/* if there are multiple errors */ +	if (ecc_status & ATMEL_ECC_MULERR) { +		/* check if it is a freshly erased block +		 * (filled with 0xff) */ +		if ((ecc_bit == ATMEL_ECC_BITADDR) +				&& (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) { +			/* the block has just been erased, return OK */ +			return 0; +		} +		/* it doesn't seems to be a freshly +		 * erased block. +		 * We can't correct so many errors */ +		dev_warn(host->dev, "atmel_nand : multiple errors detected." +				" Unable to correct.\n"); +		return -EIO; +	} + +	/* if there's a single bit error : we can correct it */ +	if (ecc_status & ATMEL_ECC_ECCERR) { +		/* there's nothing much to do here. +		 * the bit error is on the ECC itself. +		 */ +		dev_warn(host->dev, "atmel_nand : one bit error on ECC code." +				" Nothing to correct\n"); +		return 0; +	} + +	dev_warn(host->dev, "atmel_nand : one bit error on data." +			" (word offset in the page :" +			" 0x%x bit offset : 0x%x)\n", +			ecc_word, ecc_bit); +	/* correct the error */ +	if (nand_chip->options & NAND_BUSWIDTH_16) { +		/* 16 bits words */ +		((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit); +	} else { +		/* 8 bits words */ +		dat[ecc_word] ^= (1 << ecc_bit); +	} +	dev_warn(host->dev, "atmel_nand : error corrected\n"); +	return 1; +} + +/* + * Enable HW ECC : unused on most chips + */ +static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) +{ +} + +int atmel_hwecc_nand_init_param(struct nand_chip *nand, struct mtd_info *mtd) +{ +	nand->ecc.mode = NAND_ECC_HW; +	nand->ecc.calculate = atmel_nand_calculate; +	nand->ecc.correct = atmel_nand_correct; +	nand->ecc.hwctl = atmel_nand_hwctl; +	nand->ecc.read_page = atmel_nand_read_page; +	nand->ecc.bytes = 4; + +	if (nand->ecc.mode == NAND_ECC_HW) { +		/* ECC is calculated for the whole page (1 step) */ +		nand->ecc.size = mtd->writesize; + +		/* set ECC page size and oob layout */ +		switch (mtd->writesize) { +		case 512: +			nand->ecc.layout = &atmel_oobinfo_small; +			ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, +					ATMEL_ECC_PAGESIZE_528); +			break; +		case 1024: +			nand->ecc.layout = &atmel_oobinfo_large; +			ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, +					ATMEL_ECC_PAGESIZE_1056); +			break; +		case 2048: +			nand->ecc.layout = &atmel_oobinfo_large; +			ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, +					ATMEL_ECC_PAGESIZE_2112); +			break; +		case 4096: +			nand->ecc.layout = &atmel_oobinfo_large; +			ecc_writel(CONFIG_SYS_NAND_ECC_BASE, MR, +					ATMEL_ECC_PAGESIZE_4224); +			break; +		default: +			/* page size not handled by HW ECC */ +			/* switching back to soft ECC */ +			nand->ecc.mode = NAND_ECC_SOFT; +			nand->ecc.calculate = NULL; +			nand->ecc.correct = NULL; +			nand->ecc.hwctl = NULL; +			nand->ecc.read_page = NULL; +			nand->ecc.postpad = 0; +			nand->ecc.prepad = 0; +			nand->ecc.bytes = 0; +			break; +		} +	} + +	return 0; +} + +#endif /* CONFIG_ATMEL_NAND_HW_PMECC */ + +#endif /* CONFIG_ATMEL_NAND_HWECC */ + +static void at91_nand_hwcontrol(struct mtd_info *mtd, +					 int cmd, unsigned int ctrl) +{ +	struct nand_chip *this = mtd->priv; + +	if (ctrl & NAND_CTRL_CHANGE) { +		ulong IO_ADDR_W = (ulong) this->IO_ADDR_W; +		IO_ADDR_W &= ~(CONFIG_SYS_NAND_MASK_ALE +			     | CONFIG_SYS_NAND_MASK_CLE); + +		if (ctrl & NAND_CLE) +			IO_ADDR_W |= CONFIG_SYS_NAND_MASK_CLE; +		if (ctrl & NAND_ALE) +			IO_ADDR_W |= CONFIG_SYS_NAND_MASK_ALE; + +#ifdef CONFIG_SYS_NAND_ENABLE_PIN +		gpio_set_value(CONFIG_SYS_NAND_ENABLE_PIN, !(ctrl & NAND_NCE)); +#endif +		this->IO_ADDR_W = (void *) IO_ADDR_W; +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, this->IO_ADDR_W); +} + +#ifdef CONFIG_SYS_NAND_READY_PIN +static int at91_nand_ready(struct mtd_info *mtd) +{ +	return gpio_get_value(CONFIG_SYS_NAND_READY_PIN); +} +#endif + +#ifdef CONFIG_SPL_BUILD +/* The following code is for SPL */ +static nand_info_t mtd; +static struct nand_chip nand_chip; + +static int nand_command(int block, int page, uint32_t offs, u8 cmd) +{ +	struct nand_chip *this = mtd.priv; +	int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT; +	void (*hwctrl)(struct mtd_info *mtd, int cmd, +			unsigned int ctrl) = this->cmd_ctrl; + +	while (this->dev_ready(&mtd)) +		; + +	if (cmd == NAND_CMD_READOOB) { +		offs += CONFIG_SYS_NAND_PAGE_SIZE; +		cmd = NAND_CMD_READ0; +	} + +	hwctrl(&mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE); + +	if (this->options & NAND_BUSWIDTH_16) +		offs >>= 1; + +	hwctrl(&mtd, offs & 0xff, NAND_CTRL_ALE | NAND_CTRL_CHANGE); +	hwctrl(&mtd, (offs >> 8) & 0xff, NAND_CTRL_ALE); +	hwctrl(&mtd, (page_addr & 0xff), NAND_CTRL_ALE); +	hwctrl(&mtd, ((page_addr >> 8) & 0xff), NAND_CTRL_ALE); +#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE +	hwctrl(&mtd, (page_addr >> 16) & 0x0f, NAND_CTRL_ALE); +#endif +	hwctrl(&mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +	hwctrl(&mtd, NAND_CMD_READSTART, NAND_CTRL_CLE | NAND_CTRL_CHANGE); +	hwctrl(&mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +	while (this->dev_ready(&mtd)) +		; + +	return 0; +} + +static int nand_is_bad_block(int block) +{ +	struct nand_chip *this = mtd.priv; + +	nand_command(block, 0, CONFIG_SYS_NAND_BAD_BLOCK_POS, NAND_CMD_READOOB); + +	if (this->options & NAND_BUSWIDTH_16) { +		if (readw(this->IO_ADDR_R) != 0xffff) +			return 1; +	} else { +		if (readb(this->IO_ADDR_R) != 0xff) +			return 1; +	} + +	return 0; +} + +#ifdef CONFIG_SPL_NAND_ECC +static int nand_ecc_pos[] = CONFIG_SYS_NAND_ECCPOS; +#define ECCSTEPS (CONFIG_SYS_NAND_PAGE_SIZE / \ +		  CONFIG_SYS_NAND_ECCSIZE) +#define ECCTOTAL (ECCSTEPS * CONFIG_SYS_NAND_ECCBYTES) + +static int nand_read_page(int block, int page, void *dst) +{ +	struct nand_chip *this = mtd.priv; +	u_char ecc_calc[ECCTOTAL]; +	u_char ecc_code[ECCTOTAL]; +	u_char oob_data[CONFIG_SYS_NAND_OOBSIZE]; +	int eccsize = CONFIG_SYS_NAND_ECCSIZE; +	int eccbytes = CONFIG_SYS_NAND_ECCBYTES; +	int eccsteps = ECCSTEPS; +	int i; +	uint8_t *p = dst; +	nand_command(block, page, 0, NAND_CMD_READ0); + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		if (this->ecc.mode != NAND_ECC_SOFT) +			this->ecc.hwctl(&mtd, NAND_ECC_READ); +		this->read_buf(&mtd, p, eccsize); +		this->ecc.calculate(&mtd, p, &ecc_calc[i]); +	} +	this->read_buf(&mtd, oob_data, CONFIG_SYS_NAND_OOBSIZE); + +	for (i = 0; i < ECCTOTAL; i++) +		ecc_code[i] = oob_data[nand_ecc_pos[i]]; + +	eccsteps = ECCSTEPS; +	p = dst; + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) +		this->ecc.correct(&mtd, p, &ecc_code[i], &ecc_calc[i]); + +	return 0; +} +#else +static int nand_read_page(int block, int page, void *dst) +{ +	struct nand_chip *this = mtd.priv; + +	nand_command(block, page, 0, NAND_CMD_READ0); +	atmel_nand_pmecc_read_page(&mtd, this, dst, 0, page); + +	return 0; +} +#endif /* CONFIG_SPL_NAND_ECC */ + +int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst) +{ +	unsigned int block, lastblock; +	unsigned int page; + +	block = offs / CONFIG_SYS_NAND_BLOCK_SIZE; +	lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE; +	page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE; + +	while (block <= lastblock) { +		if (!nand_is_bad_block(block)) { +			while (page < CONFIG_SYS_NAND_PAGE_COUNT) { +				nand_read_page(block, page, dst); +				dst += CONFIG_SYS_NAND_PAGE_SIZE; +				page++; +			} + +			page = 0; +		} else { +			lastblock++; +		} + +		block++; +	} + +	return 0; +} + +int at91_nand_wait_ready(struct mtd_info *mtd) +{ +	struct nand_chip *this = mtd->priv; + +	udelay(this->chip_delay); + +	return 0; +} + +int board_nand_init(struct nand_chip *nand) +{ +	int ret = 0; + +	nand->ecc.mode = NAND_ECC_SOFT; +#ifdef CONFIG_SYS_NAND_DBW_16 +	nand->options = NAND_BUSWIDTH_16; +	nand->read_buf = nand_read_buf16; +#else +	nand->read_buf = nand_read_buf; +#endif +	nand->cmd_ctrl = at91_nand_hwcontrol; +#ifdef CONFIG_SYS_NAND_READY_PIN +	nand->dev_ready = at91_nand_ready; +#else +	nand->dev_ready = at91_nand_wait_ready; +#endif +	nand->chip_delay = 20; + +#ifdef CONFIG_ATMEL_NAND_HWECC +#ifdef CONFIG_ATMEL_NAND_HW_PMECC +	ret = atmel_pmecc_nand_init_params(nand, &mtd); +#endif +#endif + +	return ret; +} + +void nand_init(void) +{ +	mtd.writesize = CONFIG_SYS_NAND_PAGE_SIZE; +	mtd.oobsize = CONFIG_SYS_NAND_OOBSIZE; +	mtd.priv = &nand_chip; +	nand_chip.IO_ADDR_R = (void __iomem *)CONFIG_SYS_NAND_BASE; +	nand_chip.IO_ADDR_W = (void __iomem *)CONFIG_SYS_NAND_BASE; +	board_nand_init(&nand_chip); + +#ifdef CONFIG_SPL_NAND_ECC +	if (nand_chip.ecc.mode == NAND_ECC_SOFT) { +		nand_chip.ecc.calculate = nand_calculate_ecc; +		nand_chip.ecc.correct = nand_correct_data; +	} +#endif + +	if (nand_chip.select_chip) +		nand_chip.select_chip(&mtd, 0); +} + +void nand_deselect(void) +{ +	if (nand_chip.select_chip) +		nand_chip.select_chip(&mtd, -1); +} + +#else + +#ifndef CONFIG_SYS_NAND_BASE_LIST +#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE } +#endif +static struct nand_chip nand_chip[CONFIG_SYS_MAX_NAND_DEVICE]; +static ulong base_addr[CONFIG_SYS_MAX_NAND_DEVICE] = CONFIG_SYS_NAND_BASE_LIST; + +int atmel_nand_chip_init(int devnum, ulong base_addr) +{ +	int ret; +	struct mtd_info *mtd = &nand_info[devnum]; +	struct nand_chip *nand = &nand_chip[devnum]; + +	mtd->priv = nand; +	nand->IO_ADDR_R = nand->IO_ADDR_W = (void  __iomem *)base_addr; + +#ifdef CONFIG_NAND_ECC_BCH +	nand->ecc.mode = NAND_ECC_SOFT_BCH; +#else +	nand->ecc.mode = NAND_ECC_SOFT; +#endif +#ifdef CONFIG_SYS_NAND_DBW_16 +	nand->options = NAND_BUSWIDTH_16; +#endif +	nand->cmd_ctrl = at91_nand_hwcontrol; +#ifdef CONFIG_SYS_NAND_READY_PIN +	nand->dev_ready = at91_nand_ready; +#endif +	nand->chip_delay = 75; + +	ret = nand_scan_ident(mtd, CONFIG_SYS_NAND_MAX_CHIPS, NULL); +	if (ret) +		return ret; + +#ifdef CONFIG_ATMEL_NAND_HWECC +#ifdef CONFIG_ATMEL_NAND_HW_PMECC +	ret = atmel_pmecc_nand_init_params(nand, mtd); +#else +	ret = atmel_hwecc_nand_init_param(nand, mtd); +#endif +	if (ret) +		return ret; +#endif + +	ret = nand_scan_tail(mtd); +	if (!ret) +		nand_register(devnum); + +	return ret; +} + +void board_nand_init(void) +{ +	int i; +	for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) +		if (atmel_nand_chip_init(i, base_addr[i])) +			dev_err(host->dev, "atmel_nand: Fail to initialize #%d chip", +				i); +} +#endif /* CONFIG_SPL_BUILD */ diff --git a/roms/u-boot/drivers/mtd/nand/atmel_nand_ecc.h b/roms/u-boot/drivers/mtd/nand/atmel_nand_ecc.h new file mode 100644 index 00000000..55d7711c --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/atmel_nand_ecc.h @@ -0,0 +1,146 @@ +/* + * Error Corrected Code Controller (ECC) - System peripherals regsters. + * Based on AT91SAM9260 datasheet revision B. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#ifndef ATMEL_NAND_ECC_H +#define ATMEL_NAND_ECC_H + +#define ATMEL_ECC_CR		0x00			/* Control register */ +#define		ATMEL_ECC_RST		(1 << 0)		/* Reset parity */ + +#define ATMEL_ECC_MR		0x04			/* Mode register */ +#define		ATMEL_ECC_PAGESIZE	(3 << 0)		/* Page Size */ +#define			ATMEL_ECC_PAGESIZE_528		(0) +#define			ATMEL_ECC_PAGESIZE_1056		(1) +#define			ATMEL_ECC_PAGESIZE_2112		(2) +#define			ATMEL_ECC_PAGESIZE_4224		(3) + +#define ATMEL_ECC_SR		0x08			/* Status register */ +#define		ATMEL_ECC_RECERR		(1 << 0)		/* Recoverable Error */ +#define		ATMEL_ECC_ECCERR		(1 << 1)		/* ECC Single Bit Error */ +#define		ATMEL_ECC_MULERR		(1 << 2)		/* Multiple Errors */ + +#define ATMEL_ECC_PR		0x0c			/* Parity register */ +#define		ATMEL_ECC_BITADDR	(0xf << 0)		/* Bit Error Address */ +#define		ATMEL_ECC_WORDADDR	(0xfff << 4)		/* Word Error Address */ + +#define ATMEL_ECC_NPR		0x10			/* NParity register */ +#define		ATMEL_ECC_NPARITY	(0xffff << 0)		/* NParity */ + +/* Register access macros for PMECC */ +#define pmecc_readl(addr, reg) \ +	readl(&addr->reg) + +#define pmecc_writel(addr, reg, value) \ +	writel((value), &addr->reg) + +/* PMECC Register Definitions */ +#define PMECC_MAX_SECTOR_NUM			8 +struct pmecc_regs { +	u32 cfg;		/* 0x00 PMECC Configuration Register */ +	u32 sarea;		/* 0x04 PMECC Spare Area Size Register */ +	u32 saddr;		/* 0x08 PMECC Start Address Register */ +	u32 eaddr;		/* 0x0C PMECC End Address Register */ +	u32 clk;		/* 0x10 PMECC Clock Control Register */ +	u32 ctrl;		/* 0x14 PMECC Control Register */ +	u32 sr;			/* 0x18 PMECC Status Register */ +	u32 ier;		/* 0x1C PMECC Interrupt Enable Register */ +	u32 idr;		/* 0x20 PMECC Interrupt Disable Register */ +	u32 imr;		/* 0x24 PMECC Interrupt Mask Register */ +	u32 isr;		/* 0x28 PMECC Interrupt Status Register */ +	u32 reserved0[5];	/* 0x2C-0x3C Reserved */ + +	/* 0x40 + sector_num * (0x40), Redundancy Registers */ +	struct { +		u8 ecc[44];	/* PMECC Generated Redundancy Byte Per Sector */ +		u32 reserved1[5]; +	} ecc_port[PMECC_MAX_SECTOR_NUM]; + +	/* 0x240 + sector_num * (0x40) Remainder Registers */ +	struct { +		u32 rem[12]; +		u32 reserved2[4]; +	} rem_port[PMECC_MAX_SECTOR_NUM]; +	u32 reserved3[16];	/* 0x440-0x47C Reserved */ +}; + +/* For PMECC Configuration Register */ +#define		PMECC_CFG_BCH_ERR2		(0 << 0) +#define		PMECC_CFG_BCH_ERR4		(1 << 0) +#define		PMECC_CFG_BCH_ERR8		(2 << 0) +#define		PMECC_CFG_BCH_ERR12		(3 << 0) +#define		PMECC_CFG_BCH_ERR24		(4 << 0) + +#define		PMECC_CFG_SECTOR512		(0 << 4) +#define		PMECC_CFG_SECTOR1024		(1 << 4) + +#define		PMECC_CFG_PAGE_1SECTOR		(0 << 8) +#define		PMECC_CFG_PAGE_2SECTORS		(1 << 8) +#define		PMECC_CFG_PAGE_4SECTORS		(2 << 8) +#define		PMECC_CFG_PAGE_8SECTORS		(3 << 8) + +#define		PMECC_CFG_READ_OP		(0 << 12) +#define		PMECC_CFG_WRITE_OP		(1 << 12) + +#define		PMECC_CFG_SPARE_ENABLE		(1 << 16) +#define		PMECC_CFG_SPARE_DISABLE		(0 << 16) + +#define		PMECC_CFG_AUTO_ENABLE		(1 << 20) +#define		PMECC_CFG_AUTO_DISABLE		(0 << 20) + +/* For PMECC Clock Control Register */ +#define		PMECC_CLK_133MHZ		(2 << 0) + +/* For PMECC Control Register */ +#define		PMECC_CTRL_RST			(1 << 0) +#define		PMECC_CTRL_DATA			(1 << 1) +#define		PMECC_CTRL_USER			(1 << 2) +#define		PMECC_CTRL_ENABLE		(1 << 4) +#define		PMECC_CTRL_DISABLE		(1 << 5) + +/* For PMECC Status Register */ +#define		PMECC_SR_BUSY			(1 << 0) +#define		PMECC_SR_ENABLE			(1 << 4) + +/* PMERRLOC Register Definitions */ +struct pmecc_errloc_regs { +	u32 elcfg;	/* 0x00 Error Location Configuration Register */ +	u32 elprim;	/* 0x04 Error Location Primitive Register */ +	u32 elen;	/* 0x08 Error Location Enable Register */ +	u32 eldis;	/* 0x0C Error Location Disable Register */ +	u32 elsr;	/* 0x10 Error Location Status Register */ +	u32 elier;	/* 0x14 Error Location Interrupt Enable Register */ +	u32 elidr;	/* 0x08 Error Location Interrupt Disable Register */ +	u32 elimr;	/* 0x0C Error Location Interrupt Mask Register */ +	u32 elisr;	/* 0x20 Error Location Interrupt Status Register */ +	u32 reserved0;	/* 0x24 Reserved */ +	u32 sigma[25];	/* 0x28-0x88 Error Location Sigma Registers */ +	u32 el[24];	/* 0x8C-0xE8 Error Location Registers */ +	u32 reserved1[5];	/* 0xEC-0xFC Reserved */ +}; + +/* For Error Location Configuration Register */ +#define		PMERRLOC_ELCFG_SECTOR_512	(0 << 0) +#define		PMERRLOC_ELCFG_SECTOR_1024	(1 << 0) +#define		PMERRLOC_ELCFG_NUM_ERRORS(n)	((n) << 16) + +/* For Error Location Disable Register */ +#define		PMERRLOC_DISABLE		(1 << 0) + +/* For Error Location Interrupt Status Register */ +#define		PMERRLOC_ERR_NUM_MASK		(0x1f << 8) +#define		PMERRLOC_CALC_DONE		(1 << 0) + +/* Galois field dimension */ +#define PMECC_GF_DIMENSION_13			13 +#define PMECC_GF_DIMENSION_14			14 + +#define PMECC_INDEX_TABLE_SIZE_512		0x2000 +#define PMECC_INDEX_TABLE_SIZE_1024		0x4000 + +#define PMECC_MAX_TIMEOUT_US		(100 * 1000) + +#endif diff --git a/roms/u-boot/drivers/mtd/nand/bfin_nand.c b/roms/u-boot/drivers/mtd/nand/bfin_nand.c new file mode 100644 index 00000000..7e755e89 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/bfin_nand.c @@ -0,0 +1,393 @@ +/* + * Driver for Blackfin on-chip NAND controller. + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Copyright (c) 2007-2008 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +/* TODO: + * - move bit defines into mach-common/bits/nand.h + * - try and replace all IRQSTAT usage with STAT polling + * - have software ecc mode use same algo as hw ecc ? + */ + +#include <common.h> +#include <asm/io.h> + +#ifdef DEBUG +# define pr_stamp() printf("%s:%s:%i: here i am\n", __FILE__, __func__, __LINE__) +#else +# define pr_stamp() +#endif + +#include <nand.h> + +#include <asm/blackfin.h> +#include <asm/portmux.h> + +/* Bit masks for NFC_CTL */ + +#define                    WR_DLY  0xf        /* Write Strobe Delay */ +#define                    RD_DLY  0xf0       /* Read Strobe Delay */ +#define                    NWIDTH  0x100      /* NAND Data Width */ +#define                   PG_SIZE  0x200      /* Page Size */ + +/* Bit masks for NFC_STAT */ + +#define                     NBUSY  0x1        /* Not Busy */ +#define                   WB_FULL  0x2        /* Write Buffer Full */ +#define                PG_WR_STAT  0x4        /* Page Write Pending */ +#define                PG_RD_STAT  0x8        /* Page Read Pending */ +#define                  WB_EMPTY  0x10       /* Write Buffer Empty */ + +/* Bit masks for NFC_IRQSTAT */ + +#define                  NBUSYIRQ  0x1        /* Not Busy IRQ */ +#define                    WB_OVF  0x2        /* Write Buffer Overflow */ +#define                   WB_EDGE  0x4        /* Write Buffer Edge Detect */ +#define                    RD_RDY  0x8        /* Read Data Ready */ +#define                   WR_DONE  0x10       /* Page Write Done */ + +#define NAND_IS_512() (CONFIG_BFIN_NFC_CTL_VAL & 0x200) + +/* + * hardware specific access to control-lines + */ +static void bfin_nfc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	pr_stamp(); + +	if (cmd == NAND_CMD_NONE) +		return; + +	while (bfin_read_NFC_STAT() & WB_FULL) +		continue; + +	if (ctrl & NAND_CLE) +		bfin_write_NFC_CMD(cmd); +	else +		bfin_write_NFC_ADDR(cmd); +	SSYNC(); +} + +static int bfin_nfc_devready(struct mtd_info *mtd) +{ +	pr_stamp(); +	return (bfin_read_NFC_STAT() & NBUSY) ? 1 : 0; +} + +/* + * PIO mode for buffer writing and reading + */ +static void bfin_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ +	pr_stamp(); + +	int i; + +	/* +	 * Data reads are requested by first writing to NFC_DATA_RD +	* and then reading back from NFC_READ. +	*/ +	for (i = 0; i < len; ++i) { +		while (bfin_read_NFC_STAT() & WB_FULL) +			if (ctrlc()) +				return; + +		/* Contents do not matter */ +		bfin_write_NFC_DATA_RD(0x0000); +		SSYNC(); + +		while (!(bfin_read_NFC_IRQSTAT() & RD_RDY)) +			if (ctrlc()) +				return; + +		buf[i] = bfin_read_NFC_READ(); + +		bfin_write_NFC_IRQSTAT(RD_RDY); +	} +} + +static uint8_t bfin_nfc_read_byte(struct mtd_info *mtd) +{ +	pr_stamp(); + +	uint8_t val; +	bfin_nfc_read_buf(mtd, &val, 1); +	return val; +} + +static void bfin_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) +{ +	pr_stamp(); + +	int i; + +	for (i = 0; i < len; ++i) { +		while (bfin_read_NFC_STAT() & WB_FULL) +			if (ctrlc()) +				return; + +		bfin_write_NFC_DATA_WR(buf[i]); +	} + +	/* Wait for the buffer to drain before we return */ +	while (!(bfin_read_NFC_STAT() & WB_EMPTY)) +		if (ctrlc()) +			return; +} + +/* + * ECC functions + * These allow the bfin to use the controller's ECC + * generator block to ECC the data as it passes through + */ + +/* + * ECC error correction function + */ +static int bfin_nfc_correct_data_256(struct mtd_info *mtd, u_char *dat, +					u_char *read_ecc, u_char *calc_ecc) +{ +	u32 syndrome[5]; +	u32 calced, stored; +	unsigned short failing_bit, failing_byte; +	u_char data; + +	pr_stamp(); + +	calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16); +	stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16); + +	syndrome[0] = (calced ^ stored); + +	/* +	 * syndrome 0: all zero +	 * No error in data +	 * No action +	 */ +	if (!syndrome[0] || !calced || !stored) +		return 0; + +	/* +	 * sysdrome 0: only one bit is one +	 * ECC data was incorrect +	 * No action +	 */ +	if (hweight32(syndrome[0]) == 1) +		return 1; + +	syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF); +	syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF); +	syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF); +	syndrome[4] = syndrome[2] ^ syndrome[3]; + +	/* +	 * sysdrome 0: exactly 11 bits are one, each parity +	 * and parity' pair is 1 & 0 or 0 & 1. +	 * 1-bit correctable error +	 * Correct the error +	 */ +	if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) { +		failing_bit = syndrome[1] & 0x7; +		failing_byte = syndrome[1] >> 0x3; +		data = *(dat + failing_byte); +		data = data ^ (0x1 << failing_bit); +		*(dat + failing_byte) = data; + +		return 0; +	} + +	/* +	 * sysdrome 0: random data +	 * More than 1-bit error, non-correctable error +	 * Discard data, mark bad block +	 */ + +	return 1; +} + +static int bfin_nfc_correct_data(struct mtd_info *mtd, u_char *dat, +					u_char *read_ecc, u_char *calc_ecc) +{ +	int ret; + +	pr_stamp(); + +	ret = bfin_nfc_correct_data_256(mtd, dat, read_ecc, calc_ecc); + +	/* If page size is 512, correct second 256 bytes */ +	if (NAND_IS_512()) { +		dat += 256; +		read_ecc += 8; +		calc_ecc += 8; +		ret |= bfin_nfc_correct_data_256(mtd, dat, read_ecc, calc_ecc); +	} + +	return ret; +} + +static void reset_ecc(void) +{ +	bfin_write_NFC_RST(0x1); +	while (bfin_read_NFC_RST() & 1) +		continue; +} + +static void bfin_nfc_enable_hwecc(struct mtd_info *mtd, int mode) +{ +	reset_ecc(); +} + +static int bfin_nfc_calculate_ecc(struct mtd_info *mtd, +		const u_char *dat, u_char *ecc_code) +{ +	u16 ecc0, ecc1; +	u32 code[2]; +	u8 *p; + +	pr_stamp(); + +	/* first 4 bytes ECC code for 256 page size */ +	ecc0 = bfin_read_NFC_ECC0(); +	ecc1 = bfin_read_NFC_ECC1(); + +	code[0] = (ecc0 & 0x7FF) | ((ecc1 & 0x7FF) << 11); + +	/* first 3 bytes in ecc_code for 256 page size */ +	p = (u8 *) code; +	memcpy(ecc_code, p, 3); + +	/* second 4 bytes ECC code for 512 page size */ +	if (NAND_IS_512()) { +		ecc0 = bfin_read_NFC_ECC2(); +		ecc1 = bfin_read_NFC_ECC3(); +		code[1] = (ecc0 & 0x7FF) | ((ecc1 & 0x7FF) << 11); + +		/* second 3 bytes in ecc_code for second 256 +		 * bytes of 512 page size +		 */ +		p = (u8 *) (code + 1); +		memcpy((ecc_code + 3), p, 3); +	} + +	reset_ecc(); + +	return 0; +} + +#ifdef CONFIG_BFIN_NFC_BOOTROM_ECC +# define BOOTROM_ECC 1 +#else +# define BOOTROM_ECC 0 +#endif + +static uint8_t bbt_pattern[] = { 0xff }; + +static struct nand_bbt_descr bootrom_bbt = { +	.options = 0, +	.offs = 63, +	.len = 1, +	.pattern = bbt_pattern, +}; + +static struct nand_ecclayout bootrom_ecclayout = { +	.eccbytes = 24, +	.eccpos = { +		0x8 * 0, 0x8 * 0 + 1, 0x8 * 0 + 2, +		0x8 * 1, 0x8 * 1 + 1, 0x8 * 1 + 2, +		0x8 * 2, 0x8 * 2 + 1, 0x8 * 2 + 2, +		0x8 * 3, 0x8 * 3 + 1, 0x8 * 3 + 2, +		0x8 * 4, 0x8 * 4 + 1, 0x8 * 4 + 2, +		0x8 * 5, 0x8 * 5 + 1, 0x8 * 5 + 2, +		0x8 * 6, 0x8 * 6 + 1, 0x8 * 6 + 2, +		0x8 * 7, 0x8 * 7 + 1, 0x8 * 7 + 2 +	}, +	.oobfree = { +		{ 0x8 * 0 + 3, 5 }, +		{ 0x8 * 1 + 3, 5 }, +		{ 0x8 * 2 + 3, 5 }, +		{ 0x8 * 3 + 3, 5 }, +		{ 0x8 * 4 + 3, 5 }, +		{ 0x8 * 5 + 3, 5 }, +		{ 0x8 * 6 + 3, 5 }, +		{ 0x8 * 7 + 3, 5 }, +	} +}; + +/* + * Board-specific NAND initialization. The following members of the + * argument are board-specific (per include/linux/mtd/nand.h): + * - IO_ADDR_R?: address to read the 8 I/O lines of the flash device + * - IO_ADDR_W?: address to write the 8 I/O lines of the flash device + * - cmd_ctrl: hardwarespecific function for accesing control-lines + * - dev_ready: hardwarespecific function for  accesing device ready/busy line + * - enable_hwecc?: function to enable (reset)  hardware ecc generator. Must + *   only be provided if a hardware ECC is available + * - ecc.mode: mode of ecc, see defines + * - chip_delay: chip dependent delay for transfering data from array to + *   read regs (tR) + * - options: various chip options. They can partly be set to inform + *   nand_scan about special functionality. See the defines for further + *   explanation + * Members with a "?" were not set in the merged testing-NAND branch, + * so they are not set here either. + */ +int board_nand_init(struct nand_chip *chip) +{ +	const unsigned short pins[] = { +		P_NAND_CE, P_NAND_RB, P_NAND_D0, P_NAND_D1, P_NAND_D2, +		P_NAND_D3, P_NAND_D4, P_NAND_D5, P_NAND_D6, P_NAND_D7, +		P_NAND_WE, P_NAND_RE, P_NAND_CLE, P_NAND_ALE, 0, +	}; + +	pr_stamp(); + +	/* set width/ecc/timings/etc... */ +	bfin_write_NFC_CTL(CONFIG_BFIN_NFC_CTL_VAL); + +	/* clear interrupt status */ +	bfin_write_NFC_IRQMASK(0x0); +	bfin_write_NFC_IRQSTAT(0xffff); + +	/* enable GPIO function enable register */ +	peripheral_request_list(pins, "bfin_nand"); + +	chip->cmd_ctrl = bfin_nfc_cmd_ctrl; +	chip->read_buf = bfin_nfc_read_buf; +	chip->write_buf = bfin_nfc_write_buf; +	chip->read_byte = bfin_nfc_read_byte; + +#ifdef CONFIG_BFIN_NFC_NO_HW_ECC +# define ECC_HW 0 +#else +# define ECC_HW 1 +#endif +	if (ECC_HW) { +		if (BOOTROM_ECC) { +			chip->badblock_pattern = &bootrom_bbt; +			chip->ecc.layout = &bootrom_ecclayout; +		} +		if (!NAND_IS_512()) { +			chip->ecc.bytes = 3; +			chip->ecc.size = 256; +			chip->ecc.strength = 1; +		} else { +			chip->ecc.bytes = 6; +			chip->ecc.size = 512; +			chip->ecc.strength = 2; +		} +		chip->ecc.mode = NAND_ECC_HW; +		chip->ecc.calculate = bfin_nfc_calculate_ecc; +		chip->ecc.correct   = bfin_nfc_correct_data; +		chip->ecc.hwctl     = bfin_nfc_enable_hwecc; +	} else +		chip->ecc.mode = NAND_ECC_SOFT; +	chip->dev_ready = bfin_nfc_devready; +	chip->chip_delay = 0; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/davinci_nand.c b/roms/u-boot/drivers/mtd/nand/davinci_nand.c new file mode 100644 index 00000000..75b03a74 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/davinci_nand.c @@ -0,0 +1,653 @@ +/* + * NAND driver for TI DaVinci based boards. + * + * Copyright (C) 2007 Sergey Kubushyn <ksi@koi8.net> + * + * Based on Linux DaVinci NAND driver by TI. Original copyright follows: + */ + +/* + * + * linux/drivers/mtd/nand/nand_davinci.c + * + * NAND Flash Driver + * + * Copyright (C) 2006 Texas Instruments. + * + * ---------------------------------------------------------------------------- + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * ---------------------------------------------------------------------------- + * + *  Overview: + *   This is a device driver for the NAND flash device found on the + *   DaVinci board which utilizes the Samsung k9k2g08 part. + * + Modifications: + ver. 1.0: Feb 2005, Vinod/Sudhakar + - + */ + +#include <common.h> +#include <asm/io.h> +#include <nand.h> +#include <asm/arch/nand_defs.h> +#include <asm/arch/emif_defs.h> + +/* Definitions for 4-bit hardware ECC */ +#define NAND_TIMEOUT			10240 +#define NAND_ECC_BUSY			0xC +#define NAND_4BITECC_MASK		0x03FF03FF +#define EMIF_NANDFSR_ECC_STATE_MASK  	0x00000F00 +#define ECC_STATE_NO_ERR		0x0 +#define ECC_STATE_TOO_MANY_ERRS		0x1 +#define ECC_STATE_ERR_CORR_COMP_P	0x2 +#define ECC_STATE_ERR_CORR_COMP_N	0x3 + +/* + * Exploit the little endianness of the ARM to do multi-byte transfers + * per device read. This can perform over twice as quickly as individual + * byte transfers when buffer alignment is conducive. + * + * NOTE: This only works if the NAND is not connected to the 2 LSBs of + * the address bus. On Davinci EVM platforms this has always been true. + */ +static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ +	struct nand_chip *chip = mtd->priv; +	const u32 *nand = chip->IO_ADDR_R; + +	/* Make sure that buf is 32 bit aligned */ +	if (((int)buf & 0x3) != 0) { +		if (((int)buf & 0x1) != 0) { +			if (len) { +				*buf = readb(nand); +				buf += 1; +				len--; +			} +		} + +		if (((int)buf & 0x3) != 0) { +			if (len >= 2) { +				*(u16 *)buf = readw(nand); +				buf += 2; +				len -= 2; +			} +		} +	} + +	/* copy aligned data */ +	while (len >= 4) { +		*(u32 *)buf = __raw_readl(nand); +		buf += 4; +		len -= 4; +	} + +	/* mop up any remaining bytes */ +	if (len) { +		if (len >= 2) { +			*(u16 *)buf = readw(nand); +			buf += 2; +			len -= 2; +		} + +		if (len) +			*buf = readb(nand); +	} +} + +static void nand_davinci_write_buf(struct mtd_info *mtd, const uint8_t *buf, +				   int len) +{ +	struct nand_chip *chip = mtd->priv; +	const u32 *nand = chip->IO_ADDR_W; + +	/* Make sure that buf is 32 bit aligned */ +	if (((int)buf & 0x3) != 0) { +		if (((int)buf & 0x1) != 0) { +			if (len) { +				writeb(*buf, nand); +				buf += 1; +				len--; +			} +		} + +		if (((int)buf & 0x3) != 0) { +			if (len >= 2) { +				writew(*(u16 *)buf, nand); +				buf += 2; +				len -= 2; +			} +		} +	} + +	/* copy aligned data */ +	while (len >= 4) { +		__raw_writel(*(u32 *)buf, nand); +		buf += 4; +		len -= 4; +	} + +	/* mop up any remaining bytes */ +	if (len) { +		if (len >= 2) { +			writew(*(u16 *)buf, nand); +			buf += 2; +			len -= 2; +		} + +		if (len) +			writeb(*buf, nand); +	} +} + +static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd, +		unsigned int ctrl) +{ +	struct		nand_chip *this = mtd->priv; +	u_int32_t	IO_ADDR_W = (u_int32_t)this->IO_ADDR_W; + +	if (ctrl & NAND_CTRL_CHANGE) { +		IO_ADDR_W &= ~(MASK_ALE|MASK_CLE); + +		if (ctrl & NAND_CLE) +			IO_ADDR_W |= MASK_CLE; +		if (ctrl & NAND_ALE) +			IO_ADDR_W |= MASK_ALE; +		this->IO_ADDR_W = (void __iomem *) IO_ADDR_W; +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, IO_ADDR_W); +} + +#ifdef CONFIG_SYS_NAND_HW_ECC + +static u_int32_t nand_davinci_readecc(struct mtd_info *mtd) +{ +	u_int32_t	ecc = 0; + +	ecc = __raw_readl(&(davinci_emif_regs->nandfecc[ +				CONFIG_SYS_NAND_CS - 2])); + +	return ecc; +} + +static void nand_davinci_enable_hwecc(struct mtd_info *mtd, int mode) +{ +	u_int32_t	val; + +	/* reading the ECC result register resets the ECC calculation */ +	nand_davinci_readecc(mtd); + +	val = __raw_readl(&davinci_emif_regs->nandfcr); +	val |= DAVINCI_NANDFCR_NAND_ENABLE(CONFIG_SYS_NAND_CS); +	val |= DAVINCI_NANDFCR_1BIT_ECC_START(CONFIG_SYS_NAND_CS); +	__raw_writel(val, &davinci_emif_regs->nandfcr); +} + +static int nand_davinci_calculate_ecc(struct mtd_info *mtd, const u_char *dat, +		u_char *ecc_code) +{ +	u_int32_t		tmp; + +	tmp = nand_davinci_readecc(mtd); + +	/* Squeeze 4 bytes ECC into 3 bytes by removing RESERVED bits +	 * and shifting. RESERVED bits are 31 to 28 and 15 to 12. */ +	tmp = (tmp & 0x00000fff) | ((tmp & 0x0fff0000) >> 4); + +	/* Invert so that erased block ECC is correct */ +	tmp = ~tmp; + +	*ecc_code++ = tmp; +	*ecc_code++ = tmp >>  8; +	*ecc_code++ = tmp >> 16; + +	/* NOTE:  the above code matches mainline Linux: +	 *	.PQR.stu ==> ~PQRstu +	 * +	 * MontaVista/TI kernels encode those bytes differently, use +	 * complicated (and allegedly sometimes-wrong) correction code, +	 * and usually shipped with U-Boot that uses software ECC: +	 *	.PQR.stu ==> PsQRtu +	 * +	 * If you need MV/TI compatible NAND I/O in U-Boot, it should +	 * be possible to (a) change the mangling above, (b) reverse +	 * that mangling in nand_davinci_correct_data() below. +	 */ + +	return 0; +} + +static int nand_davinci_correct_data(struct mtd_info *mtd, u_char *dat, +		u_char *read_ecc, u_char *calc_ecc) +{ +	struct nand_chip *this = mtd->priv; +	u_int32_t ecc_nand = read_ecc[0] | (read_ecc[1] << 8) | +					  (read_ecc[2] << 16); +	u_int32_t ecc_calc = calc_ecc[0] | (calc_ecc[1] << 8) | +					  (calc_ecc[2] << 16); +	u_int32_t diff = ecc_calc ^ ecc_nand; + +	if (diff) { +		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) { +			/* Correctable error */ +			if ((diff >> (12 + 3)) < this->ecc.size) { +				uint8_t find_bit = 1 << ((diff >> 12) & 7); +				uint32_t find_byte = diff >> (12 + 3); + +				dat[find_byte] ^= find_bit; +				MTDDEBUG(MTD_DEBUG_LEVEL0, "Correcting single " +					 "bit ECC error at offset: %d, bit: " +					 "%d\n", find_byte, find_bit); +				return 1; +			} else { +				return -1; +			} +		} else if (!(diff & (diff - 1))) { +			/* Single bit ECC error in the ECC itself, +			   nothing to fix */ +			MTDDEBUG(MTD_DEBUG_LEVEL0, "Single bit ECC error in " +				 "ECC.\n"); +			return 1; +		} else { +			/* Uncorrectable error */ +			MTDDEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n"); +			return -1; +		} +	} +	return 0; +} +#endif /* CONFIG_SYS_NAND_HW_ECC */ + +#ifdef CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST +static struct nand_ecclayout nand_davinci_4bit_layout_oobfirst = { +#if defined(CONFIG_SYS_NAND_PAGE_2K) +	.eccbytes = 40, +#ifdef CONFIG_NAND_6BYTES_OOB_FREE_10BYTES_ECC +	.eccpos = { +		6,   7,  8,  9, 10,	11, 12, 13, 14, 15, +		22, 23, 24, 25, 26,	27, 28, 29, 30, 31, +		38, 39, 40, 41, 42,	43, 44, 45, 46, 47, +		54, 55, 56, 57, 58,	59, 60, 61, 62, 63, +	}, +	.oobfree = { +		{2, 4}, {16, 6}, {32, 6}, {48, 6}, +	}, +#else +	.eccpos = { +		24, 25, 26, 27, 28, +		29, 30, 31, 32, 33, 34, 35, 36, 37, 38, +		39, 40, 41, 42, 43, 44, 45, 46, 47, 48, +		49, 50, 51, 52, 53, 54, 55, 56, 57, 58, +		59, 60, 61, 62, 63, +		}, +	.oobfree = { +		{.offset = 2, .length = 22, }, +	}, +#endif	/* #ifdef CONFIG_NAND_6BYTES_OOB_FREE_10BYTES_ECC */ +#elif defined(CONFIG_SYS_NAND_PAGE_4K) +	.eccbytes = 80, +	.eccpos = { +		48, 49, 50, 51, 52, 53, 54, 55, 56, 57, +		58, 59, 60, 61, 62, 63,	64, 65, 66, 67, +		68, 69, 70, 71, 72, 73, 74, 75, 76, 77, +		78, 79,	80, 81, 82, 83,	84, 85, 86, 87, +		88, 89, 90, 91, 92, 93,	94, 95, 96, 97, +		98, 99, 100, 101, 102, 103, 104, 105, 106, 107, +		108, 109, 110, 111, 112, 113, 114, 115, 116, 117, +		118, 119, 120, 121, 122, 123, 124, 125, 126, 127, +		}, +	.oobfree = { +		{.offset = 2, .length = 46, }, +	}, +#endif +}; + +static void nand_davinci_4bit_enable_hwecc(struct mtd_info *mtd, int mode) +{ +	u32 val; + +	switch (mode) { +	case NAND_ECC_WRITE: +	case NAND_ECC_READ: +		/* +		 * Start a new ECC calculation for reading or writing 512 bytes +		 * of data. +		 */ +		val = __raw_readl(&davinci_emif_regs->nandfcr); +		val &= ~DAVINCI_NANDFCR_4BIT_ECC_SEL_MASK; +		val |= DAVINCI_NANDFCR_NAND_ENABLE(CONFIG_SYS_NAND_CS); +		val |= DAVINCI_NANDFCR_4BIT_ECC_SEL(CONFIG_SYS_NAND_CS); +		val |= DAVINCI_NANDFCR_4BIT_ECC_START; +		__raw_writel(val, &davinci_emif_regs->nandfcr); +		break; +	case NAND_ECC_READSYN: +		val = __raw_readl(&davinci_emif_regs->nand4bitecc[0]); +		break; +	default: +		break; +	} +} + +static u32 nand_davinci_4bit_readecc(struct mtd_info *mtd, unsigned int ecc[4]) +{ +	int i; + +	for (i = 0; i < 4; i++) { +		ecc[i] = __raw_readl(&davinci_emif_regs->nand4bitecc[i]) & +			NAND_4BITECC_MASK; +	} + +	return 0; +} + +static int nand_davinci_4bit_calculate_ecc(struct mtd_info *mtd, +					   const uint8_t *dat, +					   uint8_t *ecc_code) +{ +	unsigned int hw_4ecc[4]; +	unsigned int i; + +	nand_davinci_4bit_readecc(mtd, hw_4ecc); + +	/*Convert 10 bit ecc value to 8 bit */ +	for (i = 0; i < 2; i++) { +		unsigned int hw_ecc_low = hw_4ecc[i * 2]; +		unsigned int hw_ecc_hi = hw_4ecc[(i * 2) + 1]; + +		/* Take first 8 bits from val1 (count1=0) or val5 (count1=1) */ +		*ecc_code++ = hw_ecc_low & 0xFF; + +		/* +		 * Take 2 bits as LSB bits from val1 (count1=0) or val5 +		 * (count1=1) and 6 bits from val2 (count1=0) or +		 * val5 (count1=1) +		 */ +		*ecc_code++ = +		    ((hw_ecc_low >> 8) & 0x3) | ((hw_ecc_low >> 14) & 0xFC); + +		/* +		 * Take 4 bits from val2 (count1=0) or val5 (count1=1) and +		 * 4 bits from val3 (count1=0) or val6 (count1=1) +		 */ +		*ecc_code++ = +		    ((hw_ecc_low >> 22) & 0xF) | ((hw_ecc_hi << 4) & 0xF0); + +		/* +		 * Take 6 bits from val3(count1=0) or val6 (count1=1) and +		 * 2 bits from val4 (count1=0) or  val7 (count1=1) +		 */ +		*ecc_code++ = +		    ((hw_ecc_hi >> 4) & 0x3F) | ((hw_ecc_hi >> 10) & 0xC0); + +		/* Take 8 bits from val4 (count1=0) or val7 (count1=1) */ +		*ecc_code++ = (hw_ecc_hi >> 18) & 0xFF; +	} + +	return 0; +} + +static int nand_davinci_4bit_correct_data(struct mtd_info *mtd, uint8_t *dat, +					  uint8_t *read_ecc, uint8_t *calc_ecc) +{ +	int i; +	unsigned int hw_4ecc[4]; +	unsigned int iserror; +	unsigned short *ecc16; +	unsigned int numerrors, erroraddress, errorvalue; +	u32 val; + +	/* +	 * Check for an ECC where all bytes are 0xFF.  If this is the case, we +	 * will assume we are looking at an erased page and we should ignore +	 * the ECC. +	 */ +	for (i = 0; i < 10; i++) { +		if (read_ecc[i] != 0xFF) +			break; +	} +	if (i == 10) +		return 0; + +	/* Convert 8 bit in to 10 bit */ +	ecc16 = (unsigned short *)&read_ecc[0]; + +	/* +	 * Write the parity values in the NAND Flash 4-bit ECC Load register. +	 * Write each parity value one at a time starting from 4bit_ecc_val8 +	 * to 4bit_ecc_val1. +	 */ + +	/*Take 2 bits from 8th byte and 8 bits from 9th byte */ +	__raw_writel(((ecc16[4]) >> 6) & 0x3FF, +			&davinci_emif_regs->nand4biteccload); + +	/* Take 4 bits from 7th byte and 6 bits from 8th byte */ +	__raw_writel((((ecc16[3]) >> 12) & 0xF) | ((((ecc16[4])) << 4) & 0x3F0), +			&davinci_emif_regs->nand4biteccload); + +	/* Take 6 bits from 6th byte and 4 bits from 7th byte */ +	__raw_writel((ecc16[3] >> 2) & 0x3FF, +			&davinci_emif_regs->nand4biteccload); + +	/* Take 8 bits from 5th byte and 2 bits from 6th byte */ +	__raw_writel(((ecc16[2]) >> 8) | ((((ecc16[3])) << 8) & 0x300), +			&davinci_emif_regs->nand4biteccload); + +	/*Take 2 bits from 3rd byte and 8 bits from 4th byte */ +	__raw_writel((((ecc16[1]) >> 14) & 0x3) | ((((ecc16[2])) << 2) & 0x3FC), +			&davinci_emif_regs->nand4biteccload); + +	/* Take 4 bits form 2nd bytes and 6 bits from 3rd bytes */ +	__raw_writel(((ecc16[1]) >> 4) & 0x3FF, +			&davinci_emif_regs->nand4biteccload); + +	/* Take 6 bits from 1st byte and 4 bits from 2nd byte */ +	__raw_writel((((ecc16[0]) >> 10) & 0x3F) | (((ecc16[1]) << 6) & 0x3C0), +			&davinci_emif_regs->nand4biteccload); + +	/* Take 10 bits from 0th and 1st bytes */ +	__raw_writel((ecc16[0]) & 0x3FF, +			&davinci_emif_regs->nand4biteccload); + +	/* +	 * Perform a dummy read to the EMIF Revision Code and Status register. +	 * This is required to ensure time for syndrome calculation after +	 * writing the ECC values in previous step. +	 */ + +	val = __raw_readl(&davinci_emif_regs->nandfsr); + +	/* +	 * Read the syndrome from the NAND Flash 4-Bit ECC 1-4 registers. +	 * A syndrome value of 0 means no bit errors. If the syndrome is +	 * non-zero then go further otherwise return. +	 */ +	nand_davinci_4bit_readecc(mtd, hw_4ecc); + +	if (!(hw_4ecc[0] | hw_4ecc[1] | hw_4ecc[2] | hw_4ecc[3])) +		return 0; + +	/* +	 * Clear any previous address calculation by doing a dummy read of an +	 * error address register. +	 */ +	val = __raw_readl(&davinci_emif_regs->nanderradd1); + +	/* +	 * Set the addr_calc_st bit(bit no 13) in the NAND Flash Control +	 * register to 1. +	 */ +	__raw_writel(DAVINCI_NANDFCR_4BIT_CALC_START, +			&davinci_emif_regs->nandfcr); + +	/* +	 * Wait for the corr_state field (bits 8 to 11) in the +	 * NAND Flash Status register to be not equal to 0x0, 0x1, 0x2, or 0x3. +	 * Otherwise ECC calculation has not even begun and the next loop might +	 * fail because of a false positive! +	 */ +	i = NAND_TIMEOUT; +	do { +		val = __raw_readl(&davinci_emif_regs->nandfsr); +		val &= 0xc00; +		i--; +	} while ((i > 0) && !val); + +	/* +	 * Wait for the corr_state field (bits 8 to 11) in the +	 * NAND Flash Status register to be equal to 0x0, 0x1, 0x2, or 0x3. +	 */ +	i = NAND_TIMEOUT; +	do { +		val = __raw_readl(&davinci_emif_regs->nandfsr); +		val &= 0xc00; +		i--; +	} while ((i > 0) && val); + +	iserror = __raw_readl(&davinci_emif_regs->nandfsr); +	iserror &= EMIF_NANDFSR_ECC_STATE_MASK; +	iserror = iserror >> 8; + +	/* +	 * ECC_STATE_TOO_MANY_ERRS (0x1) means errors cannot be +	 * corrected (five or more errors).  The number of errors +	 * calculated (err_num field) differs from the number of errors +	 * searched.  ECC_STATE_ERR_CORR_COMP_P (0x2) means error +	 * correction complete (errors on bit 8 or 9). +	 * ECC_STATE_ERR_CORR_COMP_N (0x3) means error correction +	 * complete (error exists). +	 */ + +	if (iserror == ECC_STATE_NO_ERR) { +		val = __raw_readl(&davinci_emif_regs->nanderrval1); +		return 0; +	} else if (iserror == ECC_STATE_TOO_MANY_ERRS) { +		val = __raw_readl(&davinci_emif_regs->nanderrval1); +		return -1; +	} + +	numerrors = ((__raw_readl(&davinci_emif_regs->nandfsr) >> 16) +			& 0x3) + 1; + +	/* Read the error address, error value and correct */ +	for (i = 0; i < numerrors; i++) { +		if (i > 1) { +			erroraddress = +			    ((__raw_readl(&davinci_emif_regs->nanderradd2) >> +			      (16 * (i & 1))) & 0x3FF); +			erroraddress = ((512 + 7) - erroraddress); +			errorvalue = +			    ((__raw_readl(&davinci_emif_regs->nanderrval2) >> +			      (16 * (i & 1))) & 0xFF); +		} else { +			erroraddress = +			    ((__raw_readl(&davinci_emif_regs->nanderradd1) >> +			      (16 * (i & 1))) & 0x3FF); +			erroraddress = ((512 + 7) - erroraddress); +			errorvalue = +			    ((__raw_readl(&davinci_emif_regs->nanderrval1) >> +			      (16 * (i & 1))) & 0xFF); +		} +		/* xor the corrupt data with error value */ +		if (erroraddress < 512) +			dat[erroraddress] ^= errorvalue; +	} + +	return numerrors; +} +#endif /* CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST */ + +static int nand_davinci_dev_ready(struct mtd_info *mtd) +{ +	return __raw_readl(&davinci_emif_regs->nandfsr) & 0x1; +} + +static void nand_flash_init(void) +{ +	/* This is for DM6446 EVM and *very* similar.  DO NOT GROW THIS! +	 * Instead, have your board_init() set EMIF timings, based on its +	 * knowledge of the clocks and what devices are hooked up ... and +	 * don't even do that unless no UBL handled it. +	 */ +#ifdef CONFIG_SOC_DM644X +	u_int32_t	acfg1 = 0x3ffffffc; + +	/*------------------------------------------------------------------* +	 *  NAND FLASH CHIP TIMEOUT @ 459 MHz                               * +	 *                                                                  * +	 *  AEMIF.CLK freq   = PLL1/6 = 459/6 = 76.5 MHz                    * +	 *  AEMIF.CLK period = 1/76.5 MHz = 13.1 ns                         * +	 *                                                                  * +	 *------------------------------------------------------------------*/ +	 acfg1 = 0 +		| (0 << 31)	/* selectStrobe */ +		| (0 << 30)	/* extWait */ +		| (1 << 26)	/* writeSetup	10 ns */ +		| (3 << 20)	/* writeStrobe	40 ns */ +		| (1 << 17)	/* writeHold	10 ns */ +		| (1 << 13)	/* readSetup	10 ns */ +		| (5 << 7)	/* readStrobe	60 ns */ +		| (1 << 4)	/* readHold	10 ns */ +		| (3 << 2)	/* turnAround	?? ns */ +		| (0 << 0)	/* asyncSize	8-bit bus */ +		; + +	__raw_writel(acfg1, &davinci_emif_regs->ab1cr); /* CS2 */ + +	/* NAND flash on CS2 */ +	__raw_writel(0x00000101, &davinci_emif_regs->nandfcr); +#endif +} + +void davinci_nand_init(struct nand_chip *nand) +{ +	nand->chip_delay  = 0; +#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT +	nand->bbt_options	  |= NAND_BBT_USE_FLASH; +#endif +#ifdef CONFIG_SYS_NAND_NO_SUBPAGE_WRITE +	nand->options	  |= NAND_NO_SUBPAGE_WRITE; +#endif +#ifdef CONFIG_SYS_NAND_HW_ECC +	nand->ecc.mode = NAND_ECC_HW; +	nand->ecc.size = 512; +	nand->ecc.bytes = 3; +	nand->ecc.strength = 1; +	nand->ecc.calculate = nand_davinci_calculate_ecc; +	nand->ecc.correct  = nand_davinci_correct_data; +	nand->ecc.hwctl  = nand_davinci_enable_hwecc; +#else +	nand->ecc.mode = NAND_ECC_SOFT; +#endif /* CONFIG_SYS_NAND_HW_ECC */ +#ifdef CONFIG_SYS_NAND_4BIT_HW_ECC_OOBFIRST +	nand->ecc.mode = NAND_ECC_HW_OOB_FIRST; +	nand->ecc.size = 512; +	nand->ecc.bytes = 10; +	nand->ecc.strength = 4; +	nand->ecc.calculate = nand_davinci_4bit_calculate_ecc; +	nand->ecc.correct = nand_davinci_4bit_correct_data; +	nand->ecc.hwctl = nand_davinci_4bit_enable_hwecc; +	nand->ecc.layout = &nand_davinci_4bit_layout_oobfirst; +#endif +	/* Set address of hardware control function */ +	nand->cmd_ctrl = nand_davinci_hwcontrol; + +	nand->read_buf = nand_davinci_read_buf; +	nand->write_buf = nand_davinci_write_buf; + +	nand->dev_ready = nand_davinci_dev_ready; + +	nand_flash_init(); +} + +int board_nand_init(struct nand_chip *chip) __attribute__((weak)); + +int board_nand_init(struct nand_chip *chip) +{ +	davinci_nand_init(chip); +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/docg4.c b/roms/u-boot/drivers/mtd/nand/docg4.c new file mode 100644 index 00000000..b9121c39 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/docg4.c @@ -0,0 +1,1028 @@ +/* + * drivers/mtd/nand/docg4.c + * + * Copyright (C) 2013 Mike Dunn <mikedunn@newsguy.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * mtd nand driver for M-Systems DiskOnChip G4 + * + * Tested on the Palm Treo 680.  The G4 is also present on Toshiba Portege, Asus + * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others. + * Should work on these as well.  Let me know! + * + * TODO: + * + *  Mechanism for management of password-protected areas + * + *  Hamming ecc when reading oob only + * + *  According to the M-Sys documentation, this device is also available in a + *  "dual-die" configuration having a 256MB capacity, but no mechanism for + *  detecting this variant is documented.  Currently this driver assumes 128MB + *  capacity. + * + *  Support for multiple cascaded devices ("floors").  Not sure which gadgets + *  contain multiple G4s in a cascaded configuration, if any. + */ + + +#include <common.h> +#include <asm/arch/hardware.h> +#include <asm/io.h> +#include <asm/bitops.h> +#include <asm/errno.h> +#include <malloc.h> +#include <nand.h> +#include <linux/bch.h> +#include <linux/bitrev.h> +#include <linux/mtd/docg4.h> + +/* + * The device has a nop register which M-Sys claims is for the purpose of + * inserting precise delays.  But beware; at least some operations fail if the + * nop writes are replaced with a generic delay! + */ +static inline void write_nop(void __iomem *docptr) +{ +	writew(0, docptr + DOC_NOP); +} + + +static int poll_status(void __iomem *docptr) +{ +	/* +	 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL +	 * register.  Operations known to take a long time (e.g., block erase) +	 * should sleep for a while before calling this. +	 */ + +	uint8_t flash_status; + +	/* hardware quirk requires reading twice initially */ +	flash_status = readb(docptr + DOC_FLASHCONTROL); + +	do { +		flash_status = readb(docptr + DOC_FLASHCONTROL); +	} while (!(flash_status & DOC_CTRL_FLASHREADY)); + +	return 0; +} + +static void write_addr(void __iomem *docptr, uint32_t docg4_addr) +{ +	/* write the four address bytes packed in docg4_addr to the device */ + +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +	docg4_addr >>= 8; +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +	docg4_addr >>= 8; +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +	docg4_addr >>= 8; +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +} + +/* + * This is a module parameter in the linux kernel version of this driver.  It is + * hard-coded to 'off' for u-boot.  This driver uses oob to mark bad blocks. + * This can be problematic when dealing with data not intended for the mtd/nand + * subsystem.  For example, on boards that boot from the docg4 and use the IPL + * to load an spl + u-boot image, the blocks containing the image will be + * reported as "bad" because the oob of the first page of each block contains a + * magic number that the IPL looks for, which causes the badblock scan to + * erroneously add them to the bad block table.  To erase such a block, use + * u-boot's 'nand scrub'.  scrub is safe for the docg4.  The device does have a + * factory bad block table, but it is read-only, and is used in conjunction with + * oob bad block markers that are written by mtd/nand when a block is deemed to + * be bad.  To read data from "bad" blocks, use 'read.raw'.  Unfortunately, + * read.raw does not use ecc, which would still work fine on such misidentified + * bad blocks.  TODO: u-boot nand utilities need the ability to ignore bad + * blocks. + */ +static const int ignore_badblocks; /* remains false */ + +struct docg4_priv { +	int status; +	struct { +		unsigned int command; +		int column; +		int page; +	} last_command; +	uint8_t oob_buf[16]; +	uint8_t ecc_buf[7]; +	int oob_page; +	struct bch_control *bch; +}; +/* + * Oob bytes 0 - 6 are available to the user. + * Byte 7 is hamming ecc for first 7 bytes.  Bytes 8 - 14 are hw-generated ecc. + * Byte 15 (the last) is used by the driver as a "page written" flag. + */ +static struct nand_ecclayout docg4_oobinfo = { +	.eccbytes = 9, +	.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, +	.oobavail = 7, +	.oobfree = { {0, 7} } +}; + +static void reset(void __iomem *docptr) +{ +	/* full device reset */ + +	writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN, docptr + DOC_ASICMODE); +	writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN), +	       docptr + DOC_ASICMODECONFIRM); +	write_nop(docptr); + +	writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN, +	       docptr + DOC_ASICMODE); +	writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN), +	       docptr + DOC_ASICMODECONFIRM); + +	writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1); + +	poll_status(docptr); +} + +static void docg4_select_chip(struct mtd_info *mtd, int chip) +{ +	/* +	 * Select among multiple cascaded chips ("floors").  Multiple floors are +	 * not yet supported, so the only valid non-negative value is 0. +	 */ +	void __iomem *docptr = CONFIG_SYS_NAND_BASE; + +	if (chip < 0) +		return;		/* deselected */ + +	if (chip > 0) +		printf("multiple floors currently unsupported\n"); + +	writew(0, docptr + DOC_DEVICESELECT); +} + +static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf) +{ +	/* read the 7 hw-generated ecc bytes */ + +	int i; +	for (i = 0; i < 7; i++) { /* hw quirk; read twice */ +		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i)); +		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i)); +	} +} + +static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page) +{ +	/* +	 * Called after a page read when hardware reports bitflips. +	 * Up to four bitflips can be corrected. +	 */ + +	struct nand_chip *nand = mtd->priv; +	struct docg4_priv *doc = nand->priv; +	void __iomem *docptr = CONFIG_SYS_NAND_BASE; +	int i, numerrs; +	unsigned int errpos[4]; +	const uint8_t blank_read_hwecc[8] = { +		0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 }; + +	read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */ + +	/* check if read error is due to a blank page */ +	if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7)) +		return 0;	/* yes */ + +	/* skip additional check of "written flag" if ignore_badblocks */ +	if (!ignore_badblocks) { +		/* +		 * If the hw ecc bytes are not those of a blank page, there's +		 * still a chance that the page is blank, but was read with +		 * errors.  Check the "written flag" in last oob byte, which +		 * is set to zero when a page is written.  If more than half +		 * the bits are set, assume a blank page.  Unfortunately, the +		 * bit flips(s) are not reported in stats. +		 */ + +		if (doc->oob_buf[15]) { +			int bit, numsetbits = 0; +			unsigned long written_flag = doc->oob_buf[15]; + +			for (bit = 0; bit < 8; bit++) { +				if (written_flag & 0x01) +					numsetbits++; +				written_flag >>= 1; +			} +			if (numsetbits > 4) { /* assume blank */ +				printf("errors in blank page at offset %08x\n", +				       page * DOCG4_PAGE_SIZE); +				return 0; +			} +		} +	} + +	/* +	 * The hardware ecc unit produces oob_ecc ^ calc_ecc.  The kernel's bch +	 * algorithm is used to decode this.  However the hw operates on page +	 * data in a bit order that is the reverse of that of the bch alg, +	 * requiring that the bits be reversed on the result.  Thanks to Ivan +	 * Djelic for his analysis! +	 */ +	for (i = 0; i < 7; i++) +		doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]); + +	numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL, +			     doc->ecc_buf, NULL, errpos); + +	if (numerrs == -EBADMSG) { +		printf("uncorrectable errors at offset %08x\n", +		       page * DOCG4_PAGE_SIZE); +		return -EBADMSG; +	} + +	BUG_ON(numerrs < 0);	/* -EINVAL, or anything other than -EBADMSG */ + +	/* undo last step in BCH alg (modulo mirroring not needed) */ +	for (i = 0; i < numerrs; i++) +		errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7)); + +	/* fix the errors */ +	for (i = 0; i < numerrs; i++) { +		/* ignore if error within oob ecc bytes */ +		if (errpos[i] > DOCG4_USERDATA_LEN * 8) +			continue; + +		/* if error within oob area preceeding ecc bytes... */ +		if (errpos[i] > DOCG4_PAGE_SIZE * 8) +			__change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8, +				     (unsigned long *)doc->oob_buf); + +		else    /* error in page data */ +			__change_bit(errpos[i], (unsigned long *)buf); +	} + +	printf("%d error(s) corrected at offset %08x\n", +	       numerrs, page * DOCG4_PAGE_SIZE); + +	return numerrs; +} + +static int read_progstatus(struct docg4_priv *doc, void __iomem *docptr) +{ +	/* +	 * This apparently checks the status of programming.  Done after an +	 * erasure, and after page data is written.  On error, the status is +	 * saved, to be later retrieved by the nand infrastructure code. +	 */ + +	/* status is read from the I/O reg */ +	uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA); +	uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA); +	uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG); + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "docg4: %s: %02x %02x %02x\n", +	    __func__, status1, status2, status3); + +	if (status1 != DOCG4_PROGSTATUS_GOOD || +	    status2 != DOCG4_PROGSTATUS_GOOD_2 || +	    status3 != DOCG4_PROGSTATUS_GOOD_2) { +		doc->status = NAND_STATUS_FAIL; +		printf("read_progstatus failed: %02x, %02x, %02x\n", +		       status1, status2, status3); +		return -EIO; +	} +	return 0; +} + +static int pageprog(struct mtd_info *mtd) +{ +	/* +	 * Final step in writing a page.  Writes the contents of its +	 * internal buffer out to the flash array, or some such. +	 */ + +	struct nand_chip *nand = mtd->priv; +	struct docg4_priv *doc = nand->priv; +	void __iomem *docptr = CONFIG_SYS_NAND_BASE; +	int retval = 0; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "docg4: %s\n", __func__); + +	writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE); +	writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_nop(docptr); + +	/* Just busy-wait; usleep_range() slows things down noticeably. */ +	poll_status(docptr); + +	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE); +	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND); +	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); + +	retval = read_progstatus(doc, docptr); +	writew(0, docptr + DOC_DATAEND); +	write_nop(docptr); +	poll_status(docptr); +	write_nop(docptr); + +	return retval; +} + +static void sequence_reset(void __iomem *docptr) +{ +	/* common starting sequence for all operations */ + +	writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL); +	writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE); +	writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_nop(docptr); +	poll_status(docptr); +	write_nop(docptr); +} + +static void read_page_prologue(void __iomem *docptr, uint32_t docg4_addr) +{ +	/* first step in reading a page */ + +	sequence_reset(docptr); + +	writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE); +	writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); + +	write_addr(docptr, docg4_addr); + +	write_nop(docptr); +	writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_nop(docptr); + +	poll_status(docptr); +} + +static void write_page_prologue(void __iomem *docptr, uint32_t docg4_addr) +{ +	/* first step in writing a page */ + +	sequence_reset(docptr); +	writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE); +	writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_addr(docptr, docg4_addr); +	write_nop(docptr); +	write_nop(docptr); +	poll_status(docptr); +} + +static uint32_t mtd_to_docg4_address(int page, int column) +{ +	/* +	 * Convert mtd address to format used by the device, 32 bit packed. +	 * +	 * Some notes on G4 addressing... The M-Sys documentation on this device +	 * claims that pages are 2K in length, and indeed, the format of the +	 * address used by the device reflects that.  But within each page are +	 * four 512 byte "sub-pages", each with its own oob data that is +	 * read/written immediately after the 512 bytes of page data.  This oob +	 * data contains the ecc bytes for the preceeding 512 bytes. +	 * +	 * Rather than tell the mtd nand infrastructure that page size is 2k, +	 * with four sub-pages each, we engage in a little subterfuge and tell +	 * the infrastructure code that pages are 512 bytes in size.  This is +	 * done because during the course of reverse-engineering the device, I +	 * never observed an instance where an entire 2K "page" was read or +	 * written as a unit.  Each "sub-page" is always addressed individually, +	 * its data read/written, and ecc handled before the next "sub-page" is +	 * addressed. +	 * +	 * This requires us to convert addresses passed by the mtd nand +	 * infrastructure code to those used by the device. +	 * +	 * The address that is written to the device consists of four bytes: the +	 * first two are the 2k page number, and the second is the index into +	 * the page.  The index is in terms of 16-bit half-words and includes +	 * the preceeding oob data, so e.g., the index into the second +	 * "sub-page" is 0x108, and the full device address of the start of mtd +	 * page 0x201 is 0x00800108. +	 */ +	int g4_page = page / 4;	                      /* device's 2K page */ +	int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */ +	return (g4_page << 16) | g4_index;	      /* pack */ +} + +static void docg4_command(struct mtd_info *mtd, unsigned command, int column, +			  int page_addr) +{ +	/* handle standard nand commands */ + +	struct nand_chip *nand = mtd->priv; +	struct docg4_priv *doc = nand->priv; +	uint32_t g4_addr = mtd_to_docg4_address(page_addr, column); + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s %x, page_addr=%x, column=%x\n", +	    __func__, command, page_addr, column); + +	/* +	 * Save the command and its arguments.  This enables emulation of +	 * standard flash devices, and also some optimizations. +	 */ +	doc->last_command.command = command; +	doc->last_command.column = column; +	doc->last_command.page = page_addr; + +	switch (command) { +	case NAND_CMD_RESET: +		reset(CONFIG_SYS_NAND_BASE); +		break; + +	case NAND_CMD_READ0: +		read_page_prologue(CONFIG_SYS_NAND_BASE, g4_addr); +		break; + +	case NAND_CMD_STATUS: +		/* next call to read_byte() will expect a status */ +		break; + +	case NAND_CMD_SEQIN: +		write_page_prologue(CONFIG_SYS_NAND_BASE, g4_addr); + +		/* hack for deferred write of oob bytes */ +		if (doc->oob_page == page_addr) +			memcpy(nand->oob_poi, doc->oob_buf, 16); +		break; + +	case NAND_CMD_PAGEPROG: +		pageprog(mtd); +		break; + +	/* we don't expect these, based on review of nand_base.c */ +	case NAND_CMD_READOOB: +	case NAND_CMD_READID: +	case NAND_CMD_ERASE1: +	case NAND_CMD_ERASE2: +		printf("docg4_command: unexpected nand command 0x%x\n", +		       command); +		break; +	} +} + +static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *nand = mtd->priv; +	uint16_t *p = (uint16_t *)buf; +	len >>= 1; + +	for (i = 0; i < len; i++) +		p[i] = readw(nand->IO_ADDR_R); +} + +static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand, +			  int page) +{ +	struct docg4_priv *doc = nand->priv; +	void __iomem *docptr = CONFIG_SYS_NAND_BASE; +	uint16_t status; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: page %x\n", __func__, page); + +	/* +	 * Oob bytes are read as part of a normal page read.  If the previous +	 * nand command was a read of the page whose oob is now being read, just +	 * copy the oob bytes that we saved in a local buffer and avoid a +	 * separate oob read. +	 */ +	if (doc->last_command.command == NAND_CMD_READ0 && +	    doc->last_command.page == page) { +		memcpy(nand->oob_poi, doc->oob_buf, 16); +		return 0; +	} + +	/* +	 * Separate read of oob data only. +	 */ +	docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page); + +	writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); + +	/* the 1st byte from the I/O reg is a status; the rest is oob data */ +	status = readw(docptr + DOC_IOSPACE_DATA); +	if (status & DOCG4_READ_ERROR) { +		printf("docg4_read_oob failed: status = 0x%02x\n", status); +		return -EIO; +	} + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: status = 0x%x\n", __func__, status); + +	docg4_read_buf(mtd, nand->oob_poi, 16); + +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	writew(0, docptr + DOC_DATAEND); +	write_nop(docptr); + +	return 0; +} + +static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand, +			   int page) +{ +	/* +	 * Writing oob-only is not really supported, because MLC nand must write +	 * oob bytes at the same time as page data.  Nonetheless, we save the +	 * oob buffer contents here, and then write it along with the page data +	 * if the same page is subsequently written.  This allows user space +	 * utilities that write the oob data prior to the page data to work +	 * (e.g., nandwrite).  The disdvantage is that, if the intention was to +	 * write oob only, the operation is quietly ignored.  Also, oob can get +	 * corrupted if two concurrent processes are running nandwrite. +	 */ + +	/* note that bytes 7..14 are hw generated hamming/ecc and overwritten */ +	struct docg4_priv *doc = nand->priv; +	doc->oob_page = page; +	memcpy(doc->oob_buf, nand->oob_poi, 16); +	return 0; +} + +static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip) +{ +	/* only called when module_param ignore_badblocks is set */ +	return 0; +} + +static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *nand = mtd->priv; +	uint16_t *p = (uint16_t *)buf; +	len >>= 1; + +	for (i = 0; i < len; i++) +		writew(p[i], nand->IO_ADDR_W); +} + +static int write_page(struct mtd_info *mtd, struct nand_chip *nand, +		       const uint8_t *buf, int use_ecc) +{ +	void __iomem *docptr = CONFIG_SYS_NAND_BASE; +	uint8_t ecc_buf[8]; + +	writew(DOC_ECCCONF0_ECC_ENABLE | +	       DOC_ECCCONF0_UNKNOWN | +	       DOCG4_BCH_SIZE, +	       docptr + DOC_ECCCONF0); +	write_nop(docptr); + +	/* write the page data */ +	docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE); + +	/* oob bytes 0 through 5 are written to I/O reg */ +	docg4_write_buf16(mtd, nand->oob_poi, 6); + +	/* oob byte 6 written to a separate reg */ +	writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7); + +	write_nop(docptr); +	write_nop(docptr); + +	/* write hw-generated ecc bytes to oob */ +	if (likely(use_ecc)) { +		/* oob byte 7 is hamming code */ +		uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY); +		hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */ +		writew(hamming, docptr + DOCG4_OOB_6_7); +		write_nop(docptr); + +		/* read the 7 bch bytes from ecc regs */ +		read_hw_ecc(docptr, ecc_buf); +		ecc_buf[7] = 0;         /* clear the "page written" flag */ +	} + +	/* write user-supplied bytes to oob */ +	else { +		writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7); +		write_nop(docptr); +		memcpy(ecc_buf, &nand->oob_poi[8], 8); +	} + +	docg4_write_buf16(mtd, ecc_buf, 8); +	write_nop(docptr); +	write_nop(docptr); +	writew(0, docptr + DOC_DATAEND); +	write_nop(docptr); + +	return 0; +} + +static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, +				 const uint8_t *buf, int oob_required) +{ +	return write_page(mtd, nand, buf, 0); +} + +static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, +			     const uint8_t *buf, int oob_required) +{ +	return write_page(mtd, nand, buf, 1); +} + +static int read_page(struct mtd_info *mtd, struct nand_chip *nand, +		     uint8_t *buf, int page, int use_ecc) +{ +	struct docg4_priv *doc = nand->priv; +	void __iomem *docptr = CONFIG_SYS_NAND_BASE; +	uint16_t status, edc_err, *buf16; + +	writew(DOC_ECCCONF0_READ_MODE | +	       DOC_ECCCONF0_ECC_ENABLE | +	       DOC_ECCCONF0_UNKNOWN | +	       DOCG4_BCH_SIZE, +	       docptr + DOC_ECCCONF0); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); + +	/* the 1st byte from the I/O reg is a status; the rest is page data */ +	status = readw(docptr + DOC_IOSPACE_DATA); +	if (status & DOCG4_READ_ERROR) { +		printf("docg4_read_page: bad status: 0x%02x\n", status); +		writew(0, docptr + DOC_DATAEND); +		return -EIO; +	} + +	docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */ + +	/* first 14 oob bytes read from I/O reg */ +	docg4_read_buf(mtd, nand->oob_poi, 14); + +	/* last 2 read from another reg */ +	buf16 = (uint16_t *)(nand->oob_poi + 14); +	*buf16 = readw(docptr + DOCG4_MYSTERY_REG); + +	/* +	 * Diskonchips read oob immediately after a page read.  Mtd +	 * infrastructure issues a separate command for reading oob after the +	 * page is read.  So we save the oob bytes in a local buffer and just +	 * copy it if the next command reads oob from the same page. +	 */ +	memcpy(doc->oob_buf, nand->oob_poi, 16); + +	write_nop(docptr); + +	if (likely(use_ecc)) { +		/* read the register that tells us if bitflip(s) detected  */ +		edc_err = readw(docptr + DOC_ECCCONF1); +		edc_err = readw(docptr + DOC_ECCCONF1); + +		/* If bitflips are reported, attempt to correct with ecc */ +		if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) { +			int bits_corrected = correct_data(mtd, buf, page); +			if (bits_corrected == -EBADMSG) +				mtd->ecc_stats.failed++; +			else +				mtd->ecc_stats.corrected += bits_corrected; +		} +	} + +	writew(0, docptr + DOC_DATAEND); +	return 0; +} + + +static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand, +			       uint8_t *buf, int oob_required, int page) +{ +	return read_page(mtd, nand, buf, page, 0); +} + +static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand, +			   uint8_t *buf, int oob_required, int page) +{ +	return read_page(mtd, nand, buf, page, 1); +} + +static void docg4_erase_block(struct mtd_info *mtd, int page) +{ +	struct nand_chip *nand = mtd->priv; +	struct docg4_priv *doc = nand->priv; +	void __iomem *docptr = CONFIG_SYS_NAND_BASE; +	uint16_t g4_page; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: page %04x\n", __func__, page); + +	sequence_reset(docptr); + +	writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE); +	writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); + +	/* only 2 bytes of address are written to specify erase block */ +	g4_page = (uint16_t)(page / 4);  /* to g4's 2k page addressing */ +	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS); +	g4_page >>= 8; +	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS); +	write_nop(docptr); + +	/* start the erasure */ +	writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_nop(docptr); + +	poll_status(docptr); +	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE); +	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND); +	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); + +	read_progstatus(doc, docptr); + +	writew(0, docptr + DOC_DATAEND); +	write_nop(docptr); +	poll_status(docptr); +	write_nop(docptr); +} + +static int read_factory_bbt(struct mtd_info *mtd) +{ +	/* +	 * The device contains a read-only factory bad block table.  Read it and +	 * update the memory-based bbt accordingly. +	 */ + +	struct nand_chip *nand = mtd->priv; +	uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0); +	uint8_t *buf; +	int i, block, status; + +	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL); +	if (buf == NULL) +		return -ENOMEM; + +	read_page_prologue(CONFIG_SYS_NAND_BASE, g4_addr); +	status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE); +	if (status) +		goto exit; + +	/* +	 * If no memory-based bbt was created, exit.  This will happen if module +	 * parameter ignore_badblocks is set.  Then why even call this function? +	 * For an unknown reason, block erase always fails if it's the first +	 * operation after device power-up.  The above read ensures it never is. +	 * Ugly, I know. +	 */ +	if (nand->bbt == NULL)  /* no memory-based bbt */ +		goto exit; + +	/* +	 * Parse factory bbt and update memory-based bbt.  Factory bbt format is +	 * simple: one bit per block, block numbers increase left to right (msb +	 * to lsb).  Bit clear means bad block. +	 */ +	for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) { +		int bitnum; +		uint8_t mask; +		for (bitnum = 0, mask = 0x80; +		     bitnum < 8; bitnum++, mask >>= 1) { +			if (!(buf[i] & mask)) { +				int badblock = block + bitnum; +				nand->bbt[badblock / 4] |= +					0x03 << ((badblock % 4) * 2); +				mtd->ecc_stats.badblocks++; +				printf("factory-marked bad block: %d\n", +				       badblock); +			} +		} +	} + exit: +	kfree(buf); +	return status; +} + +static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	/* +	 * Mark a block as bad.  Bad blocks are marked in the oob area of the +	 * first page of the block.  The default scan_bbt() in the nand +	 * infrastructure code works fine for building the memory-based bbt +	 * during initialization, as does the nand infrastructure function that +	 * checks if a block is bad by reading the bbt.  This function replaces +	 * the nand default because writes to oob-only are not supported. +	 */ + +	int ret, i; +	uint8_t *buf; +	struct nand_chip *nand = mtd->priv; +	struct nand_bbt_descr *bbtd = nand->badblock_pattern; +	int block = (int)(ofs >> nand->bbt_erase_shift); +	int page = (int)(ofs >> nand->page_shift); +	uint32_t g4_addr = mtd_to_docg4_address(page, 0); + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: %08llx\n", __func__, ofs); + +	if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1))) +		printf("%s: ofs %llx not start of block!\n", +		       __func__, ofs); + +	/* allocate blank buffer for page data */ +	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL); +	if (buf == NULL) +		return -ENOMEM; + +	/* update bbt in memory */ +	nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2); + +	/* write bit-wise negation of pattern to oob buffer */ +	memset(nand->oob_poi, 0xff, mtd->oobsize); +	for (i = 0; i < bbtd->len; i++) +		nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i]; + +	/* write first page of block */ +	write_page_prologue(CONFIG_SYS_NAND_BASE, g4_addr); +	docg4_write_page(mtd, nand, buf, 1); +	ret = pageprog(mtd); +	if (!ret) +		mtd->ecc_stats.badblocks++; + +	kfree(buf); + +	return ret; +} + +static uint8_t docg4_read_byte(struct mtd_info *mtd) +{ +	struct nand_chip *nand = mtd->priv; +	struct docg4_priv *doc = nand->priv; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s\n", __func__); + +	if (doc->last_command.command == NAND_CMD_STATUS) { +		int status; + +		/* +		 * Previous nand command was status request, so nand +		 * infrastructure code expects to read the status here.  If an +		 * error occurred in a previous operation, report it. +		 */ +		doc->last_command.command = 0; + +		if (doc->status) { +			status = doc->status; +			doc->status = 0; +		} + +		/* why is NAND_STATUS_WP inverse logic?? */ +		else +			status = NAND_STATUS_WP | NAND_STATUS_READY; + +		return status; +	} + +	printf("unexpectd call to read_byte()\n"); + +	return 0; +} + +static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand) +{ +	struct docg4_priv *doc = nand->priv; +	int status = NAND_STATUS_WP;       /* inverse logic?? */ +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s...\n", __func__); + +	/* report any previously unreported error */ +	if (doc->status) { +		status |= doc->status; +		doc->status = 0; +		return status; +	} + +	status |= poll_status(CONFIG_SYS_NAND_BASE); +	return status; +} + +int docg4_nand_init(struct mtd_info *mtd, struct nand_chip *nand, int devnum) +{ +	uint16_t id1, id2; +	struct docg4_priv *docg4; +	int retval; + +	docg4 = kzalloc(sizeof(*docg4), GFP_KERNEL); +	if (!docg4) +		return -1; + +	mtd->priv = nand; +	nand->priv = docg4; + +	/* These must be initialized here because the docg4 is non-standard +	 * and doesn't produce an id that the nand code can use to look up +	 * these values (nand_scan_ident() not called). +	 */ +	mtd->size = DOCG4_CHIP_SIZE; +	mtd->name = "Msys_Diskonchip_G4"; +	mtd->writesize = DOCG4_PAGE_SIZE; +	mtd->erasesize = DOCG4_BLOCK_SIZE; +	mtd->oobsize = DOCG4_OOB_SIZE; + +	nand->IO_ADDR_R = +		(void __iomem *)CONFIG_SYS_NAND_BASE + DOC_IOSPACE_DATA; +	nand->IO_ADDR_W = nand->IO_ADDR_R; +	nand->chipsize = DOCG4_CHIP_SIZE; +	nand->chip_shift = DOCG4_CHIP_SHIFT; +	nand->bbt_erase_shift = DOCG4_ERASE_SHIFT; +	nand->phys_erase_shift = DOCG4_ERASE_SHIFT; +	nand->chip_delay = 20; +	nand->page_shift = DOCG4_PAGE_SHIFT; +	nand->pagemask = 0x3ffff; +	nand->badblockpos = NAND_LARGE_BADBLOCK_POS; +	nand->badblockbits = 8; +	nand->ecc.layout = &docg4_oobinfo; +	nand->ecc.mode = NAND_ECC_HW_SYNDROME; +	nand->ecc.size = DOCG4_PAGE_SIZE; +	nand->ecc.prepad = 8; +	nand->ecc.bytes	= 8; +	nand->ecc.strength = DOCG4_T; +	nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE; +	nand->controller = &nand->hwcontrol; + +	/* methods */ +	nand->cmdfunc = docg4_command; +	nand->waitfunc = docg4_wait; +	nand->select_chip = docg4_select_chip; +	nand->read_byte = docg4_read_byte; +	nand->block_markbad = docg4_block_markbad; +	nand->read_buf = docg4_read_buf; +	nand->write_buf = docg4_write_buf16; +	nand->scan_bbt = nand_default_bbt; +	nand->erase_cmd = docg4_erase_block; +	nand->ecc.read_page = docg4_read_page; +	nand->ecc.write_page = docg4_write_page; +	nand->ecc.read_page_raw = docg4_read_page_raw; +	nand->ecc.write_page_raw = docg4_write_page_raw; +	nand->ecc.read_oob = docg4_read_oob; +	nand->ecc.write_oob = docg4_write_oob; + +	/* +	 * The way the nand infrastructure code is written, a memory-based bbt +	 * is not created if NAND_SKIP_BBTSCAN is set.  With no memory bbt, +	 * nand->block_bad() is used.  So when ignoring bad blocks, we skip the +	 * scan and define a dummy block_bad() which always returns 0. +	 */ +	if (ignore_badblocks) { +		nand->options |= NAND_SKIP_BBTSCAN; +		nand->block_bad	= docg4_block_neverbad; +	} + +	reset(CONFIG_SYS_NAND_BASE); + +	/* check for presence of g4 chip by reading id registers */ +	id1 = readw(CONFIG_SYS_NAND_BASE + DOC_CHIPID); +	id1 = readw(CONFIG_SYS_NAND_BASE + DOCG4_MYSTERY_REG); +	id2 = readw(CONFIG_SYS_NAND_BASE + DOC_CHIPID_INV); +	id2 = readw(CONFIG_SYS_NAND_BASE + DOCG4_MYSTERY_REG); +	if (id1 != DOCG4_IDREG1_VALUE || id2 != DOCG4_IDREG2_VALUE) +		return -1; + +	/* initialize bch algorithm */ +	docg4->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY); +	if (docg4->bch == NULL) +		return -1; + +	retval = nand_scan_tail(mtd); +	if (retval) +		return -1; + +	/* +	 * Scan for bad blocks and create bbt here, then add the factory-marked +	 * bad blocks to the bbt. +	 */ +	nand->scan_bbt(mtd); +	nand->options |= NAND_BBT_SCANNED; +	retval = read_factory_bbt(mtd); +	if (retval) +		return -1; + +	retval = nand_register(devnum); +	if (retval) +		return -1; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/docg4_spl.c b/roms/u-boot/drivers/mtd/nand/docg4_spl.c new file mode 100644 index 00000000..351b75a0 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/docg4_spl.c @@ -0,0 +1,219 @@ +/* + * SPL driver for Diskonchip G4 nand flash + * + * Copyright (C) 2013 Mike Dunn <mikedunn@newsguy.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * This driver basically mimics the load functionality of a typical IPL (initial + * program loader) resident in the 2k NOR-like region of the docg4 that is + * mapped to the reset vector.  It allows the u-boot SPL to continue loading if + * the IPL loads a fixed number of flash blocks that is insufficient to contain + * the entire u-boot image.  In this case, a concatenated spl + u-boot image is + * written at the flash offset from which the IPL loads an image, and when the + * IPL jumps to the SPL, the SPL resumes loading where the IPL left off.  See + * the palmtreo680 for an example. + * + * This driver assumes that the data was written to the flash using the device's + * "reliable" mode, and also assumes that each 512 byte page is stored + * redundantly in the subsequent page.  This storage format is likely to be used + * by all boards that boot from the docg4.  The format compensates for the lack + * of ecc in the IPL. + * + * Reliable mode reduces the capacity of a block by half, and the redundant + * pages reduce it by half again.  As a result, the normal 256k capacity of a + * block is reduced to 64k for the purposes of the IPL/SPL. + */ + +#include <asm/io.h> +#include <linux/mtd/docg4.h> + +/* forward declarations */ +static inline void write_nop(void __iomem *docptr); +static int poll_status(void __iomem *docptr); +static void write_addr(void __iomem *docptr, uint32_t docg4_addr); +static void address_sequence(unsigned int g4_page, unsigned int g4_index, +			     void __iomem *docptr); +static int docg4_load_block_reliable(uint32_t flash_offset, void *dest_addr); + +int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst) +{ +	void *load_addr = dst; +	uint32_t flash_offset = offs; +	const unsigned int block_count = +		(size + DOCG4_BLOCK_CAPACITY_SPL - 1) +		/ DOCG4_BLOCK_CAPACITY_SPL; +	int i; + +	for (i = 0; i < block_count; i++) { +		int ret = docg4_load_block_reliable(flash_offset, load_addr); +		if (ret) +			return ret; +		load_addr += DOCG4_BLOCK_CAPACITY_SPL; +		flash_offset += DOCG4_BLOCK_SIZE; +	} +	return 0; +} + +static inline void write_nop(void __iomem *docptr) +{ +	writew(0, docptr + DOC_NOP); +} + +static int poll_status(void __iomem *docptr) +{ +	/* +	 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL +	 * register.  Operations known to take a long time (e.g., block erase) +	 * should sleep for a while before calling this. +	 */ + +	uint8_t flash_status; + +	/* hardware quirk requires reading twice initially */ +	flash_status = readb(docptr + DOC_FLASHCONTROL); + +	do { +		flash_status = readb(docptr + DOC_FLASHCONTROL); +	} while (!(flash_status & DOC_CTRL_FLASHREADY)); + +	return 0; +} + +static void write_addr(void __iomem *docptr, uint32_t docg4_addr) +{ +	/* write the four address bytes packed in docg4_addr to the device */ + +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +	docg4_addr >>= 8; +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +	docg4_addr >>= 8; +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +	docg4_addr >>= 8; +	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); +} + +static void address_sequence(unsigned int g4_page, unsigned int g4_index, +			     void __iomem *docptr) +{ +	writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE); +	writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_addr(docptr, ((uint32_t)g4_page << 16) | g4_index); +	write_nop(docptr); +} + +static int docg4_load_block_reliable(uint32_t flash_offset, void *dest_addr) +{ +	void __iomem *docptr = (void *)CONFIG_SYS_NAND_BASE; +	unsigned int g4_page = flash_offset >> 11; /* 2k page */ +	const unsigned int last_g4_page = g4_page + 0x80; /* last in block */ +	int g4_index = 0; +	uint16_t flash_status; +	uint16_t *buf; + +	/* flash_offset must be aligned to the start of a block */ +	if (flash_offset & 0x3ffff) +		return -1; + +	writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE); +	writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_nop(docptr); +	poll_status(docptr); +	write_nop(docptr); +	writew(0x45, docptr + DOC_FLASHSEQUENCE); +	writew(0xa3, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	writew(0x22, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); + +	/* read 1st 4 oob bytes of first subpage of block */ +	address_sequence(g4_page, 0x0100, docptr); /* index at oob */ +	write_nop(docptr); +	flash_status = readw(docptr + DOC_FLASHCONTROL); +	flash_status = readw(docptr + DOC_FLASHCONTROL); +	if (flash_status & 0x06) /* sequence or protection errors */ +		return -1; +	writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND); +	write_nop(docptr); +	write_nop(docptr); +	poll_status(docptr); +	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); +	write_nop(docptr); + +	/* +	 * Here we read the first four oob bytes of the first page of the block. +	 * The IPL on the palmtreo680 requires that this contain a 32 bit magic +	 * number, or the load aborts.  We'll ignore it. +	 */ +	readw(docptr + 0x103c); /* hw quirk; 1st read discarded */ +	readw(docptr + 0x103c);	/* lower 16 bits of magic number */ +	readw(docptr + DOCG4_MYSTERY_REG); /* upper 16 bits of magic number */ +	writew(0, docptr + DOC_DATAEND); +	write_nop(docptr); +	write_nop(docptr); + +	/* load contents of block to memory */ +	buf = (uint16_t *)dest_addr; +	do { +		int i; + +		address_sequence(g4_page, g4_index, docptr); +		writew(DOCG4_CMD_READ2, +		       docptr + DOC_FLASHCOMMAND); +		write_nop(docptr); +		write_nop(docptr); +		poll_status(docptr); +		writew(DOC_ECCCONF0_READ_MODE | +		       DOC_ECCCONF0_ECC_ENABLE | +		       DOCG4_BCH_SIZE, +		       docptr + DOC_ECCCONF0); +		write_nop(docptr); +		write_nop(docptr); +		write_nop(docptr); +		write_nop(docptr); +		write_nop(docptr); + +		/* read the 512 bytes of page data, 2 bytes at a time */ +		readw(docptr + 0x103c); /* hw quirk */ +		for (i = 0; i < 256; i++) +			*buf++ = readw(docptr + 0x103c); + +		/* read oob, but discard it */ +		for (i = 0; i < 7; i++) +			readw(docptr + 0x103c); +		readw(docptr + DOCG4_OOB_6_7); +		readw(docptr + DOCG4_OOB_6_7); + +		writew(0, docptr + DOC_DATAEND); +		write_nop(docptr); +		write_nop(docptr); + +		if (!(g4_index & 0x100)) { +			/* not redundant subpage read; check for ecc error */ +			write_nop(docptr); +			flash_status = readw(docptr + DOC_ECCCONF1); +			flash_status = readw(docptr + DOC_ECCCONF1); +			if (flash_status & 0x80) { /* ecc error */ +				g4_index += 0x108; /* read redundant subpage */ +				buf -= 256;        /* back up ram ptr */ +				continue; +			} else                       /* no ecc error */ +				g4_index += 0x210; /* skip redundant subpage */ +		} else  /* redundant page was just read; skip ecc error check */ +			g4_index += 0x108; + +		if (g4_index == 0x420) { /* finished with 2k page */ +			g4_index = 0; +			g4_page += 2; /* odd-numbered 2k pages skipped */ +		} + +	} while (g4_page != last_g4_page); /* while still on same block */ + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/fsl_elbc_nand.c b/roms/u-boot/drivers/mtd/nand/fsl_elbc_nand.c new file mode 100644 index 00000000..2f31fc96 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/fsl_elbc_nand.c @@ -0,0 +1,829 @@ +/* Freescale Enhanced Local Bus Controller FCM NAND driver + * + * Copyright (c) 2006-2008 Freescale Semiconductor + * + * Authors: Nick Spence <nick.spence@freescale.com>, + *          Scott Wood <scottwood@freescale.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <malloc.h> +#include <nand.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_ecc.h> + +#include <asm/io.h> +#include <asm/errno.h> + +#ifdef VERBOSE_DEBUG +#define DEBUG_ELBC +#define vdbg(format, arg...) printf("DEBUG: " format, ##arg) +#else +#define vdbg(format, arg...) do {} while (0) +#endif + +/* Can't use plain old DEBUG because the linux mtd + * headers define it as a macro. + */ +#ifdef DEBUG_ELBC +#define dbg(format, arg...) printf("DEBUG: " format, ##arg) +#else +#define dbg(format, arg...) do {} while (0) +#endif + +#define MAX_BANKS 8 +#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */ +#define FCM_TIMEOUT_MSECS 10 /* Maximum number of mSecs to wait for FCM */ + +#define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC) + +struct fsl_elbc_ctrl; + +/* mtd information per set */ + +struct fsl_elbc_mtd { +	struct nand_chip chip; +	struct fsl_elbc_ctrl *ctrl; + +	struct device *dev; +	int bank;               /* Chip select bank number           */ +	u8 __iomem *vbase;      /* Chip select base virtual address  */ +	int page_size;          /* NAND page size (0=512, 1=2048)    */ +	unsigned int fmr;       /* FCM Flash Mode Register value     */ +}; + +/* overview of the fsl elbc controller */ + +struct fsl_elbc_ctrl { +	struct nand_hw_control controller; +	struct fsl_elbc_mtd *chips[MAX_BANKS]; + +	/* device info */ +	fsl_lbc_t *regs; +	u8 __iomem *addr;        /* Address of assigned FCM buffer        */ +	unsigned int page;       /* Last page written to / read from      */ +	unsigned int read_bytes; /* Number of bytes read during command   */ +	unsigned int column;     /* Saved column from SEQIN               */ +	unsigned int index;      /* Pointer to next byte to 'read'        */ +	unsigned int status;     /* status read from LTESR after last op  */ +	unsigned int mdr;        /* UPM/FCM Data Register value           */ +	unsigned int use_mdr;    /* Non zero if the MDR is to be set      */ +	unsigned int oob;        /* Non zero if operating on OOB data     */ +}; + +/* These map to the positions used by the FCM hardware ECC generator */ + +/* Small Page FLASH with FMR[ECCM] = 0 */ +static struct nand_ecclayout fsl_elbc_oob_sp_eccm0 = { +	.eccbytes = 3, +	.eccpos = {6, 7, 8}, +	.oobfree = { {0, 5}, {9, 7} }, +}; + +/* Small Page FLASH with FMR[ECCM] = 1 */ +static struct nand_ecclayout fsl_elbc_oob_sp_eccm1 = { +	.eccbytes = 3, +	.eccpos = {8, 9, 10}, +	.oobfree = { {0, 5}, {6, 2}, {11, 5} }, +}; + +/* Large Page FLASH with FMR[ECCM] = 0 */ +static struct nand_ecclayout fsl_elbc_oob_lp_eccm0 = { +	.eccbytes = 12, +	.eccpos = {6, 7, 8, 22, 23, 24, 38, 39, 40, 54, 55, 56}, +	.oobfree = { {1, 5}, {9, 13}, {25, 13}, {41, 13}, {57, 7} }, +}; + +/* Large Page FLASH with FMR[ECCM] = 1 */ +static struct nand_ecclayout fsl_elbc_oob_lp_eccm1 = { +	.eccbytes = 12, +	.eccpos = {8, 9, 10, 24, 25, 26, 40, 41, 42, 56, 57, 58}, +	.oobfree = { {1, 7}, {11, 13}, {27, 13}, {43, 13}, {59, 5} }, +}; + +/* + * fsl_elbc_oob_lp_eccm* specify that LP NAND's OOB free area starts at offset + * 1, so we have to adjust bad block pattern. This pattern should be used for + * x8 chips only. So far hardware does not support x16 chips anyway. + */ +static u8 scan_ff_pattern[] = { 0xff, }; + +static struct nand_bbt_descr largepage_memorybased = { +	.options = 0, +	.offs = 0, +	.len = 1, +	.pattern = scan_ff_pattern, +}; + +/* + * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt, + * interfere with ECC positions, that's why we implement our own descriptors. + * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0. + */ +static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; +static u8 mirror_pattern[] = {'1', 't', 'b', 'B' }; + +static struct nand_bbt_descr bbt_main_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | +		   NAND_BBT_2BIT | NAND_BBT_VERSION, +	.offs =	11, +	.len = 4, +	.veroffs = 15, +	.maxblocks = 4, +	.pattern = bbt_pattern, +}; + +static struct nand_bbt_descr bbt_mirror_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | +		   NAND_BBT_2BIT | NAND_BBT_VERSION, +	.offs =	11, +	.len = 4, +	.veroffs = 15, +	.maxblocks = 4, +	.pattern = mirror_pattern, +}; + +/*=================================*/ + +/* + * Set up the FCM hardware block and page address fields, and the fcm + * structure addr field to point to the correct FCM buffer in memory + */ +static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	fsl_lbc_t *lbc = ctrl->regs; +	int buf_num; + +	ctrl->page = page_addr; + +	if (priv->page_size) { +		out_be32(&lbc->fbar, page_addr >> 6); +		out_be32(&lbc->fpar, +			 ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) | +			 (oob ? FPAR_LP_MS : 0) | column); +		buf_num = (page_addr & 1) << 2; +	} else { +		out_be32(&lbc->fbar, page_addr >> 5); +		out_be32(&lbc->fpar, +			 ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) | +			 (oob ? FPAR_SP_MS : 0) | column); +		buf_num = page_addr & 7; +	} + +	ctrl->addr = priv->vbase + buf_num * 1024; +	ctrl->index = column; + +	/* for OOB data point to the second half of the buffer */ +	if (oob) +		ctrl->index += priv->page_size ? 2048 : 512; + +	vdbg("set_addr: bank=%d, ctrl->addr=0x%p (0x%p), " +	     "index %x, pes %d ps %d\n", +	     buf_num, ctrl->addr, priv->vbase, ctrl->index, +	     chip->phys_erase_shift, chip->page_shift); +} + +/* + * execute FCM command and wait for it to complete + */ +static int fsl_elbc_run_command(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	fsl_lbc_t *lbc = ctrl->regs; +	long long end_tick; +	u32 ltesr; + +	/* Setup the FMR[OP] to execute without write protection */ +	out_be32(&lbc->fmr, priv->fmr | 3); +	if (ctrl->use_mdr) +		out_be32(&lbc->mdr, ctrl->mdr); + +	vdbg("fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n", +	     in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr)); +	vdbg("fsl_elbc_run_command: fbar=%08x fpar=%08x " +	     "fbcr=%08x bank=%d\n", +	     in_be32(&lbc->fbar), in_be32(&lbc->fpar), +	     in_be32(&lbc->fbcr), priv->bank); + +	/* execute special operation */ +	out_be32(&lbc->lsor, priv->bank); + +	/* wait for FCM complete flag or timeout */ +	end_tick = usec2ticks(FCM_TIMEOUT_MSECS * 1000) + get_ticks(); + +	ltesr = 0; +	while (end_tick > get_ticks()) { +		ltesr = in_be32(&lbc->ltesr); +		if (ltesr & LTESR_CC) +			break; +	} + +	ctrl->status = ltesr & LTESR_NAND_MASK; +	out_be32(&lbc->ltesr, ctrl->status); +	out_be32(&lbc->lteatr, 0); + +	/* store mdr value in case it was needed */ +	if (ctrl->use_mdr) +		ctrl->mdr = in_be32(&lbc->mdr); + +	ctrl->use_mdr = 0; + +	vdbg("fsl_elbc_run_command: stat=%08x mdr=%08x fmr=%08x\n", +	     ctrl->status, ctrl->mdr, in_be32(&lbc->fmr)); + +	/* returns 0 on success otherwise non-zero) */ +	return ctrl->status == LTESR_CC ? 0 : -EIO; +} + +static void fsl_elbc_do_read(struct nand_chip *chip, int oob) +{ +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	fsl_lbc_t *lbc = ctrl->regs; + +	if (priv->page_size) { +		out_be32(&lbc->fir, +			 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +			 (FIR_OP_CA  << FIR_OP1_SHIFT) | +			 (FIR_OP_PA  << FIR_OP2_SHIFT) | +			 (FIR_OP_CW1 << FIR_OP3_SHIFT) | +			 (FIR_OP_RBW << FIR_OP4_SHIFT)); + +		out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | +				    (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); +	} else { +		out_be32(&lbc->fir, +			 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +			 (FIR_OP_CA  << FIR_OP1_SHIFT) | +			 (FIR_OP_PA  << FIR_OP2_SHIFT) | +			 (FIR_OP_RBW << FIR_OP3_SHIFT)); + +		if (oob) +			out_be32(&lbc->fcr, +				 NAND_CMD_READOOB << FCR_CMD0_SHIFT); +		else +			out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT); +	} +} + +/* cmdfunc send commands to the FCM */ +static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command, +			     int column, int page_addr) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	fsl_lbc_t *lbc = ctrl->regs; + +	ctrl->use_mdr = 0; + +	/* clear the read buffer */ +	ctrl->read_bytes = 0; +	if (command != NAND_CMD_PAGEPROG) +		ctrl->index = 0; + +	switch (command) { +	/* READ0 and READ1 read the entire buffer to use hardware ECC. */ +	case NAND_CMD_READ1: +		column += 256; + +	/* fall-through */ +	case NAND_CMD_READ0: +		vdbg("fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:" +		     " 0x%x, column: 0x%x.\n", page_addr, column); + +		out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */ +		set_addr(mtd, 0, page_addr, 0); + +		ctrl->read_bytes = mtd->writesize + mtd->oobsize; +		ctrl->index += column; + +		fsl_elbc_do_read(chip, 0); +		fsl_elbc_run_command(mtd); +		return; + +	/* READOOB reads only the OOB because no ECC is performed. */ +	case NAND_CMD_READOOB: +		vdbg("fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:" +		     " 0x%x, column: 0x%x.\n", page_addr, column); + +		out_be32(&lbc->fbcr, mtd->oobsize - column); +		set_addr(mtd, column, page_addr, 1); + +		ctrl->read_bytes = mtd->writesize + mtd->oobsize; + +		fsl_elbc_do_read(chip, 1); +		fsl_elbc_run_command(mtd); + +		return; + +	/* READID must read all 5 possible bytes while CEB is active */ +	case NAND_CMD_READID: +	case NAND_CMD_PARAM: +		vdbg("fsl_elbc_cmdfunc: NAND_CMD 0x%x.\n", command); + +		out_be32(&lbc->fir, (FIR_OP_CW0 << FIR_OP0_SHIFT) | +				    (FIR_OP_UA  << FIR_OP1_SHIFT) | +				    (FIR_OP_RBW << FIR_OP2_SHIFT)); +		out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT); +		/* +		 * although currently it's 8 bytes for READID, we always read +		 * the maximum 256 bytes(for PARAM) +		 */ +		out_be32(&lbc->fbcr, 256); +		ctrl->read_bytes = 256; +		ctrl->use_mdr = 1; +		ctrl->mdr = column; +		set_addr(mtd, 0, 0, 0); +		fsl_elbc_run_command(mtd); +		return; + +	/* ERASE1 stores the block and page address */ +	case NAND_CMD_ERASE1: +		vdbg("fsl_elbc_cmdfunc: NAND_CMD_ERASE1, " +		     "page_addr: 0x%x.\n", page_addr); +		set_addr(mtd, 0, page_addr, 0); +		return; + +	/* ERASE2 uses the block and page address from ERASE1 */ +	case NAND_CMD_ERASE2: +		vdbg("fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n"); + +		out_be32(&lbc->fir, +			 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +			 (FIR_OP_PA  << FIR_OP1_SHIFT) | +			 (FIR_OP_CM1 << FIR_OP2_SHIFT)); + +		out_be32(&lbc->fcr, +			 (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) | +			 (NAND_CMD_ERASE2 << FCR_CMD1_SHIFT)); + +		out_be32(&lbc->fbcr, 0); +		ctrl->read_bytes = 0; + +		fsl_elbc_run_command(mtd); +		return; + +	/* SEQIN sets up the addr buffer and all registers except the length */ +	case NAND_CMD_SEQIN: { +		u32 fcr; +		vdbg("fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, " +		     "page_addr: 0x%x, column: 0x%x.\n", +		     page_addr, column); + +		ctrl->column = column; +		ctrl->oob = 0; + +		if (priv->page_size) { +			fcr = (NAND_CMD_SEQIN << FCR_CMD0_SHIFT) | +			      (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT); + +			out_be32(&lbc->fir, +				 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +				 (FIR_OP_CA  << FIR_OP1_SHIFT) | +				 (FIR_OP_PA  << FIR_OP2_SHIFT) | +				 (FIR_OP_WB  << FIR_OP3_SHIFT) | +				 (FIR_OP_CW1 << FIR_OP4_SHIFT)); +		} else { +			fcr = (NAND_CMD_PAGEPROG << FCR_CMD1_SHIFT) | +			      (NAND_CMD_SEQIN << FCR_CMD2_SHIFT); + +			out_be32(&lbc->fir, +				 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +				 (FIR_OP_CM2 << FIR_OP1_SHIFT) | +				 (FIR_OP_CA  << FIR_OP2_SHIFT) | +				 (FIR_OP_PA  << FIR_OP3_SHIFT) | +				 (FIR_OP_WB  << FIR_OP4_SHIFT) | +				 (FIR_OP_CW1 << FIR_OP5_SHIFT)); + +			if (column >= mtd->writesize) { +				/* OOB area --> READOOB */ +				column -= mtd->writesize; +				fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT; +				ctrl->oob = 1; +			} else if (column < 256) { +				/* First 256 bytes --> READ0 */ +				fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT; +			} else { +				/* Second 256 bytes --> READ1 */ +				fcr |= NAND_CMD_READ1 << FCR_CMD0_SHIFT; +			} +		} + +		out_be32(&lbc->fcr, fcr); +		set_addr(mtd, column, page_addr, ctrl->oob); +		return; +	} + +	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */ +	case NAND_CMD_PAGEPROG: { +		vdbg("fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG " +		     "writing %d bytes.\n", ctrl->index); + +		/* if the write did not start at 0 or is not a full page +		 * then set the exact length, otherwise use a full page +		 * write so the HW generates the ECC. +		 */ +		if (ctrl->oob || ctrl->column != 0 || +		    ctrl->index != mtd->writesize + mtd->oobsize) +			out_be32(&lbc->fbcr, ctrl->index); +		else +			out_be32(&lbc->fbcr, 0); + +		fsl_elbc_run_command(mtd); + +		return; +	} + +	/* CMD_STATUS must read the status byte while CEB is active */ +	/* Note - it does not wait for the ready line */ +	case NAND_CMD_STATUS: +		out_be32(&lbc->fir, +			 (FIR_OP_CM0 << FIR_OP0_SHIFT) | +			 (FIR_OP_RBW << FIR_OP1_SHIFT)); +		out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); +		out_be32(&lbc->fbcr, 1); +		set_addr(mtd, 0, 0, 0); +		ctrl->read_bytes = 1; + +		fsl_elbc_run_command(mtd); + +		/* The chip always seems to report that it is +		 * write-protected, even when it is not. +		 */ +		out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP); +		return; + +	/* RESET without waiting for the ready line */ +	case NAND_CMD_RESET: +		dbg("fsl_elbc_cmdfunc: NAND_CMD_RESET.\n"); +		out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT); +		out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT); +		fsl_elbc_run_command(mtd); +		return; + +	default: +		printf("fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n", +			command); +	} +} + +static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip) +{ +	/* The hardware does not seem to support multiple +	 * chips per bank. +	 */ +} + +/* + * Write buf to the FCM Controller Data Buffer + */ +static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	unsigned int bufsize = mtd->writesize + mtd->oobsize; + +	if (len <= 0) { +		printf("write_buf of %d bytes", len); +		ctrl->status = 0; +		return; +	} + +	if ((unsigned int)len > bufsize - ctrl->index) { +		printf("write_buf beyond end of buffer " +		       "(%d requested, %u available)\n", +		       len, bufsize - ctrl->index); +		len = bufsize - ctrl->index; +	} + +	memcpy_toio(&ctrl->addr[ctrl->index], buf, len); +	/* +	 * This is workaround for the weird elbc hangs during nand write, +	 * Scott Wood says: "...perhaps difference in how long it takes a +	 * write to make it through the localbus compared to a write to IMMR +	 * is causing problems, and sync isn't helping for some reason." +	 * Reading back the last byte helps though. +	 */ +	in_8(&ctrl->addr[ctrl->index] + len - 1); + +	ctrl->index += len; +} + +/* + * read a byte from either the FCM hardware buffer if it has any data left + * otherwise issue a command to read a single byte. + */ +static u8 fsl_elbc_read_byte(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; + +	/* If there are still bytes in the FCM, then use the next byte. */ +	if (ctrl->index < ctrl->read_bytes) +		return in_8(&ctrl->addr[ctrl->index++]); + +	printf("read_byte beyond end of buffer\n"); +	return ERR_BYTE; +} + +/* + * Read from the FCM Controller Data Buffer + */ +static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	int avail; + +	if (len < 0) +		return; + +	avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index); +	memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail); +	ctrl->index += avail; + +	if (len > avail) +		printf("read_buf beyond end of buffer " +		       "(%d requested, %d available)\n", +		       len, avail); +} + +/* + * Verify buffer against the FCM Controller Data Buffer + */ +static int fsl_elbc_verify_buf(struct mtd_info *mtd, +			       const u_char *buf, int len) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	int i; + +	if (len < 0) { +		printf("write_buf of %d bytes", len); +		return -EINVAL; +	} + +	if ((unsigned int)len > ctrl->read_bytes - ctrl->index) { +		printf("verify_buf beyond end of buffer " +		       "(%d requested, %u available)\n", +		       len, ctrl->read_bytes - ctrl->index); + +		ctrl->index = ctrl->read_bytes; +		return -EINVAL; +	} + +	for (i = 0; i < len; i++) +		if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i]) +			break; + +	ctrl->index += len; +	return i == len && ctrl->status == LTESR_CC ? 0 : -EIO; +} + +/* This function is called after Program and Erase Operations to + * check for success or failure. + */ +static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip) +{ +	struct fsl_elbc_mtd *priv = chip->priv; +	struct fsl_elbc_ctrl *ctrl = priv->ctrl; +	fsl_lbc_t *lbc = ctrl->regs; + +	if (ctrl->status != LTESR_CC) +		return NAND_STATUS_FAIL; + +	/* Use READ_STATUS command, but wait for the device to be ready */ +	ctrl->use_mdr = 0; +	out_be32(&lbc->fir, +		 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +		 (FIR_OP_RBW << FIR_OP1_SHIFT)); +	out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT); +	out_be32(&lbc->fbcr, 1); +	set_addr(mtd, 0, 0, 0); +	ctrl->read_bytes = 1; + +	fsl_elbc_run_command(mtd); + +	if (ctrl->status != LTESR_CC) +		return NAND_STATUS_FAIL; + +	/* The chip always seems to report that it is +	 * write-protected, even when it is not. +	 */ +	out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP); +	return fsl_elbc_read_byte(mtd); +} + +static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip, +			      uint8_t *buf, int oob_required, int page) +{ +	fsl_elbc_read_buf(mtd, buf, mtd->writesize); +	fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); + +	if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) +		mtd->ecc_stats.failed++; + +	return 0; +} + +/* ECC will be calculated automatically, and errors will be detected in + * waitfunc. + */ +static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip, +				const uint8_t *buf, int oob_required) +{ +	fsl_elbc_write_buf(mtd, buf, mtd->writesize); +	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); + +	return 0; +} + +static struct fsl_elbc_ctrl *elbc_ctrl; + +static void fsl_elbc_ctrl_init(void) +{ +	elbc_ctrl = kzalloc(sizeof(*elbc_ctrl), GFP_KERNEL); +	if (!elbc_ctrl) +		return; + +	elbc_ctrl->regs = LBC_BASE_ADDR; + +	/* clear event registers */ +	out_be32(&elbc_ctrl->regs->ltesr, LTESR_NAND_MASK); +	out_be32(&elbc_ctrl->regs->lteatr, 0); + +	/* Enable interrupts for any detected events */ +	out_be32(&elbc_ctrl->regs->lteir, LTESR_NAND_MASK); + +	elbc_ctrl->read_bytes = 0; +	elbc_ctrl->index = 0; +	elbc_ctrl->addr = NULL; +} + +static int fsl_elbc_chip_init(int devnum, u8 *addr) +{ +	struct mtd_info *mtd = &nand_info[devnum]; +	struct nand_chip *nand; +	struct fsl_elbc_mtd *priv; +	uint32_t br = 0, or = 0; +	int ret; + +	if (!elbc_ctrl) { +		fsl_elbc_ctrl_init(); +		if (!elbc_ctrl) +			return -1; +	} + +	priv = kzalloc(sizeof(*priv), GFP_KERNEL); +	if (!priv) +		return -ENOMEM; + +	priv->ctrl = elbc_ctrl; +	priv->vbase = addr; + +	/* Find which chip select it is connected to.  It'd be nice +	 * if we could pass more than one datum to the NAND driver... +	 */ +	for (priv->bank = 0; priv->bank < MAX_BANKS; priv->bank++) { +		phys_addr_t phys_addr = virt_to_phys(addr); + +		br = in_be32(&elbc_ctrl->regs->bank[priv->bank].br); +		or = in_be32(&elbc_ctrl->regs->bank[priv->bank].or); + +		if ((br & BR_V) && (br & BR_MSEL) == BR_MS_FCM && +		    (br & or & BR_BA) == BR_PHYS_ADDR(phys_addr)) +			break; +	} + +	if (priv->bank >= MAX_BANKS) { +		printf("fsl_elbc_nand: address did not match any " +		       "chip selects\n"); +		return -ENODEV; +	} + +	nand = &priv->chip; +	mtd->priv = nand; + +	elbc_ctrl->chips[priv->bank] = priv; + +	/* fill in nand_chip structure */ +	/* set up function call table */ +	nand->read_byte = fsl_elbc_read_byte; +	nand->write_buf = fsl_elbc_write_buf; +	nand->read_buf = fsl_elbc_read_buf; +	nand->verify_buf = fsl_elbc_verify_buf; +	nand->select_chip = fsl_elbc_select_chip; +	nand->cmdfunc = fsl_elbc_cmdfunc; +	nand->waitfunc = fsl_elbc_wait; + +	/* set up nand options */ +	nand->bbt_td = &bbt_main_descr; +	nand->bbt_md = &bbt_mirror_descr; + +  	/* set up nand options */ +	nand->options = NAND_NO_SUBPAGE_WRITE; +	nand->bbt_options = NAND_BBT_USE_FLASH; + +	nand->controller = &elbc_ctrl->controller; +	nand->priv = priv; + +	nand->ecc.read_page = fsl_elbc_read_page; +	nand->ecc.write_page = fsl_elbc_write_page; + +	priv->fmr = (15 << FMR_CWTO_SHIFT) | (2 << FMR_AL_SHIFT); + +	/* If CS Base Register selects full hardware ECC then use it */ +	if ((br & BR_DECC) == BR_DECC_CHK_GEN) { +		nand->ecc.mode = NAND_ECC_HW; + +		nand->ecc.layout = (priv->fmr & FMR_ECCM) ? +				   &fsl_elbc_oob_sp_eccm1 : +				   &fsl_elbc_oob_sp_eccm0; + +		nand->ecc.size = 512; +		nand->ecc.bytes = 3; +		nand->ecc.steps = 1; +		nand->ecc.strength = 1; +	} else { +		/* otherwise fall back to software ECC */ +#if defined(CONFIG_NAND_ECC_BCH) +		nand->ecc.mode = NAND_ECC_SOFT_BCH; +#else +		nand->ecc.mode = NAND_ECC_SOFT; +#endif +	} + +	ret = nand_scan_ident(mtd, 1, NULL); +	if (ret) +		return ret; + +	/* Large-page-specific setup */ +	if (mtd->writesize == 2048) { +		setbits_be32(&elbc_ctrl->regs->bank[priv->bank].or, +			     OR_FCM_PGS); +		in_be32(&elbc_ctrl->regs->bank[priv->bank].or); + +		priv->page_size = 1; +		nand->badblock_pattern = &largepage_memorybased; + +		/* +		 * Hardware expects small page has ECCM0, large page has +		 * ECCM1 when booting from NAND, and we follow that even +		 * when not booting from NAND. +		 */ +		priv->fmr |= FMR_ECCM; + +		/* adjust ecc setup if needed */ +		if ((br & BR_DECC) == BR_DECC_CHK_GEN) { +			nand->ecc.steps = 4; +			nand->ecc.layout = (priv->fmr & FMR_ECCM) ? +					   &fsl_elbc_oob_lp_eccm1 : +					   &fsl_elbc_oob_lp_eccm0; +		} +	} else if (mtd->writesize == 512) { +		clrbits_be32(&elbc_ctrl->regs->bank[priv->bank].or, +			     OR_FCM_PGS); +		in_be32(&elbc_ctrl->regs->bank[priv->bank].or); +	} else { +		return -ENODEV; +	} + +	ret = nand_scan_tail(mtd); +	if (ret) +		return ret; + +	ret = nand_register(devnum); +	if (ret) +		return ret; + +	return 0; +} + +#ifndef CONFIG_SYS_NAND_BASE_LIST +#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE } +#endif + +static unsigned long base_address[CONFIG_SYS_MAX_NAND_DEVICE] = +	CONFIG_SYS_NAND_BASE_LIST; + +void board_nand_init(void) +{ +	int i; + +	for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) +		fsl_elbc_chip_init(i, (u8 *)base_address[i]); +} diff --git a/roms/u-boot/drivers/mtd/nand/fsl_elbc_spl.c b/roms/u-boot/drivers/mtd/nand/fsl_elbc_spl.c new file mode 100644 index 00000000..29521359 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/fsl_elbc_spl.c @@ -0,0 +1,168 @@ +/* + * NAND boot for Freescale Enhanced Local Bus Controller, Flash Control Machine + * + * (C) Copyright 2006-2008 + * Stefan Roese, DENX Software Engineering, sr@denx.de. + * + * Copyright (c) 2008 Freescale Semiconductor, Inc. + * Author: Scott Wood <scottwood@freescale.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/fsl_lbc.h> +#include <nand.h> + +#define WINDOW_SIZE 8192 + +static void nand_wait(void) +{ +	fsl_lbc_t *regs = LBC_BASE_ADDR; + +	for (;;) { +		uint32_t status = in_be32(®s->ltesr); + +		if (status == 1) +			return; + +		if (status & 1) { +			puts("read failed (ltesr)\n"); +			for (;;); +		} +	} +} + +#ifdef CONFIG_TPL_BUILD +int nand_spl_load_image(uint32_t offs, unsigned int uboot_size, void *vdst) +#else +static int nand_load_image(uint32_t offs, unsigned int uboot_size, void *vdst) +#endif +{ +	fsl_lbc_t *regs = LBC_BASE_ADDR; +	uchar *buf = (uchar *)CONFIG_SYS_NAND_BASE; +	const int large = CONFIG_SYS_NAND_OR_PRELIM & OR_FCM_PGS; +	const int block_shift = large ? 17 : 14; +	const int block_size = 1 << block_shift; +	const int page_size = large ? 2048 : 512; +	const int bad_marker = large ? page_size + 0 : page_size + 5; +	int fmr = (15 << FMR_CWTO_SHIFT) | (2 << FMR_AL_SHIFT) | 2; +	int pos = 0; +	char *dst = vdst; + +	if (offs & (block_size - 1)) { +		puts("bad offset\n"); +		for (;;); +	} + +	if (large) { +		fmr |= FMR_ECCM; +		out_be32(®s->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) | +				     (NAND_CMD_READSTART << FCR_CMD1_SHIFT)); +		out_be32(®s->fir, +			 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +			 (FIR_OP_CA  << FIR_OP1_SHIFT) | +			 (FIR_OP_PA  << FIR_OP2_SHIFT) | +			 (FIR_OP_CW1 << FIR_OP3_SHIFT) | +			 (FIR_OP_RBW << FIR_OP4_SHIFT)); +	} else { +		out_be32(®s->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT); +		out_be32(®s->fir, +			 (FIR_OP_CW0 << FIR_OP0_SHIFT) | +			 (FIR_OP_CA  << FIR_OP1_SHIFT) | +			 (FIR_OP_PA  << FIR_OP2_SHIFT) | +			 (FIR_OP_RBW << FIR_OP3_SHIFT)); +	} + +	out_be32(®s->fbcr, 0); +	clrsetbits_be32(®s->bank[0].br, BR_DECC, BR_DECC_CHK_GEN); + +	while (pos < uboot_size) { +		int i = 0; +		out_be32(®s->fbar, offs >> block_shift); + +		do { +			int j; +			unsigned int page_offs = (offs & (block_size - 1)) << 1; + +			out_be32(®s->ltesr, ~0); +			out_be32(®s->lteatr, 0); +			out_be32(®s->fpar, page_offs); +			out_be32(®s->fmr, fmr); +			out_be32(®s->lsor, 0); +			nand_wait(); + +			page_offs %= WINDOW_SIZE; + +			/* +			 * If either of the first two pages are marked bad, +			 * continue to the next block. +			 */ +			if (i++ < 2 && buf[page_offs + bad_marker] != 0xff) { +				puts("skipping\n"); +				offs = (offs + block_size) & ~(block_size - 1); +				pos &= ~(block_size - 1); +				break; +			} + +			for (j = 0; j < page_size; j++) +				dst[pos + j] = buf[page_offs + j]; + +			pos += page_size; +			offs += page_size; +		} while ((offs & (block_size - 1)) && (pos < uboot_size)); +	} + +	return 0; +} + +/* + * Defines a static function nand_load_image() here, because non-static makes + * the code too large for certain SPLs(minimal SPL, maximum size <= 4Kbytes) + */ +#ifndef CONFIG_TPL_BUILD +#define nand_spl_load_image(offs, uboot_size, vdst) \ +	nand_load_image(offs, uboot_size, vdst) +#endif + +/* + * The main entry for NAND booting. It's necessary that SDRAM is already + * configured and available since this code loads the main U-Boot image + * from NAND into SDRAM and starts it from there. + */ +void nand_boot(void) +{ +	__attribute__((noreturn)) void (*uboot)(void); +	/* +	 * Load U-Boot image from NAND into RAM +	 */ +	nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS, +			    CONFIG_SYS_NAND_U_BOOT_SIZE, +			    (void *)CONFIG_SYS_NAND_U_BOOT_DST); + +#ifdef CONFIG_NAND_ENV_DST +	nand_spl_load_image(CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE, +			    (void *)CONFIG_NAND_ENV_DST); + +#ifdef CONFIG_ENV_OFFSET_REDUND +	nand_spl_load_image(CONFIG_ENV_OFFSET_REDUND, CONFIG_ENV_SIZE, +			    (void *)CONFIG_NAND_ENV_DST + CONFIG_ENV_SIZE); +#endif +#endif + +#ifdef CONFIG_SPL_FLUSH_IMAGE +	/* +	 * Clean d-cache and invalidate i-cache, to +	 * make sure that no stale data is executed. +	 */ +	flush_cache(CONFIG_SYS_NAND_U_BOOT_DST, CONFIG_SYS_NAND_U_BOOT_SIZE); +#endif + +	puts("transfering control\n"); +	/* +	 * Jump to U-Boot image +	 */ +	uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START; +	(*uboot)(); +} diff --git a/roms/u-boot/drivers/mtd/nand/fsl_ifc_nand.c b/roms/u-boot/drivers/mtd/nand/fsl_ifc_nand.c new file mode 100644 index 00000000..be5a16a1 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/fsl_ifc_nand.c @@ -0,0 +1,1039 @@ +/* Integrated Flash Controller NAND Machine Driver + * + * Copyright (c) 2012 Freescale Semiconductor, Inc + * + * Authors: Dipen Dudhat <Dipen.Dudhat@freescale.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <malloc.h> +#include <nand.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_ecc.h> + +#include <asm/io.h> +#include <asm/errno.h> +#include <fsl_ifc.h> + +#define FSL_IFC_V1_1_0	0x01010000 +#define MAX_BANKS	4 +#define ERR_BYTE	0xFF /* Value returned for read bytes +				when read failed */ +#define IFC_TIMEOUT_MSECS 10 /* Maximum number of mSecs to wait for IFC +				NAND Machine */ + +struct fsl_ifc_ctrl; + +/* mtd information per set */ +struct fsl_ifc_mtd { +	struct nand_chip chip; +	struct fsl_ifc_ctrl *ctrl; + +	struct device *dev; +	int bank;               /* Chip select bank number                */ +	unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */ +	u8 __iomem *vbase;      /* Chip select base virtual address       */ +}; + +/* overview of the fsl ifc controller */ +struct fsl_ifc_ctrl { +	struct nand_hw_control controller; +	struct fsl_ifc_mtd *chips[MAX_BANKS]; + +	/* device info */ +	struct fsl_ifc *regs; +	uint8_t __iomem *addr;   /* Address of assigned IFC buffer        */ +	unsigned int cs_nand;    /* On which chipsel NAND is connected	  */ +	unsigned int page;       /* Last page written to / read from      */ +	unsigned int read_bytes; /* Number of bytes read during command   */ +	unsigned int column;     /* Saved column from SEQIN               */ +	unsigned int index;      /* Pointer to next byte to 'read'        */ +	unsigned int status;     /* status read from NEESR after last op  */ +	unsigned int oob;        /* Non zero if operating on OOB data     */ +	unsigned int eccread;    /* Non zero for a full-page ECC read     */ +}; + +static struct fsl_ifc_ctrl *ifc_ctrl; + +/* 512-byte page with 4-bit ECC, 8-bit */ +static struct nand_ecclayout oob_512_8bit_ecc4 = { +	.eccbytes = 8, +	.eccpos = {8, 9, 10, 11, 12, 13, 14, 15}, +	.oobfree = { {0, 5}, {6, 2} }, +}; + +/* 512-byte page with 4-bit ECC, 16-bit */ +static struct nand_ecclayout oob_512_16bit_ecc4 = { +	.eccbytes = 8, +	.eccpos = {8, 9, 10, 11, 12, 13, 14, 15}, +	.oobfree = { {2, 6}, }, +}; + +/* 2048-byte page size with 4-bit ECC */ +static struct nand_ecclayout oob_2048_ecc4 = { +	.eccbytes = 32, +	.eccpos = { +		8, 9, 10, 11, 12, 13, 14, 15, +		16, 17, 18, 19, 20, 21, 22, 23, +		24, 25, 26, 27, 28, 29, 30, 31, +		32, 33, 34, 35, 36, 37, 38, 39, +	}, +	.oobfree = { {2, 6}, {40, 24} }, +}; + +/* 4096-byte page size with 4-bit ECC */ +static struct nand_ecclayout oob_4096_ecc4 = { +	.eccbytes = 64, +	.eccpos = { +		8, 9, 10, 11, 12, 13, 14, 15, +		16, 17, 18, 19, 20, 21, 22, 23, +		24, 25, 26, 27, 28, 29, 30, 31, +		32, 33, 34, 35, 36, 37, 38, 39, +		40, 41, 42, 43, 44, 45, 46, 47, +		48, 49, 50, 51, 52, 53, 54, 55, +		56, 57, 58, 59, 60, 61, 62, 63, +		64, 65, 66, 67, 68, 69, 70, 71, +	}, +	.oobfree = { {2, 6}, {72, 56} }, +}; + +/* 4096-byte page size with 8-bit ECC -- requires 218-byte OOB */ +static struct nand_ecclayout oob_4096_ecc8 = { +	.eccbytes = 128, +	.eccpos = { +		8, 9, 10, 11, 12, 13, 14, 15, +		16, 17, 18, 19, 20, 21, 22, 23, +		24, 25, 26, 27, 28, 29, 30, 31, +		32, 33, 34, 35, 36, 37, 38, 39, +		40, 41, 42, 43, 44, 45, 46, 47, +		48, 49, 50, 51, 52, 53, 54, 55, +		56, 57, 58, 59, 60, 61, 62, 63, +		64, 65, 66, 67, 68, 69, 70, 71, +		72, 73, 74, 75, 76, 77, 78, 79, +		80, 81, 82, 83, 84, 85, 86, 87, +		88, 89, 90, 91, 92, 93, 94, 95, +		96, 97, 98, 99, 100, 101, 102, 103, +		104, 105, 106, 107, 108, 109, 110, 111, +		112, 113, 114, 115, 116, 117, 118, 119, +		120, 121, 122, 123, 124, 125, 126, 127, +		128, 129, 130, 131, 132, 133, 134, 135, +	}, +	.oobfree = { {2, 6}, {136, 82} }, +}; + +/* 8192-byte page size with 4-bit ECC */ +static struct nand_ecclayout oob_8192_ecc4 = { +	.eccbytes = 128, +	.eccpos = { +		8, 9, 10, 11, 12, 13, 14, 15, +		16, 17, 18, 19, 20, 21, 22, 23, +		24, 25, 26, 27, 28, 29, 30, 31, +		32, 33, 34, 35, 36, 37, 38, 39, +		40, 41, 42, 43, 44, 45, 46, 47, +		48, 49, 50, 51, 52, 53, 54, 55, +		56, 57, 58, 59, 60, 61, 62, 63, +		64, 65, 66, 67, 68, 69, 70, 71, +		72, 73, 74, 75, 76, 77, 78, 79, +		80, 81, 82, 83, 84, 85, 86, 87, +		88, 89, 90, 91, 92, 93, 94, 95, +		96, 97, 98, 99, 100, 101, 102, 103, +		104, 105, 106, 107, 108, 109, 110, 111, +		112, 113, 114, 115, 116, 117, 118, 119, +		120, 121, 122, 123, 124, 125, 126, 127, +		128, 129, 130, 131, 132, 133, 134, 135, +	}, +	.oobfree = { {2, 6}, {136, 208} }, +}; + +/* 8192-byte page size with 8-bit ECC -- requires 218-byte OOB */ +static struct nand_ecclayout oob_8192_ecc8 = { +	.eccbytes = 256, +	.eccpos = { +		8, 9, 10, 11, 12, 13, 14, 15, +		16, 17, 18, 19, 20, 21, 22, 23, +		24, 25, 26, 27, 28, 29, 30, 31, +		32, 33, 34, 35, 36, 37, 38, 39, +		40, 41, 42, 43, 44, 45, 46, 47, +		48, 49, 50, 51, 52, 53, 54, 55, +		56, 57, 58, 59, 60, 61, 62, 63, +		64, 65, 66, 67, 68, 69, 70, 71, +		72, 73, 74, 75, 76, 77, 78, 79, +		80, 81, 82, 83, 84, 85, 86, 87, +		88, 89, 90, 91, 92, 93, 94, 95, +		96, 97, 98, 99, 100, 101, 102, 103, +		104, 105, 106, 107, 108, 109, 110, 111, +		112, 113, 114, 115, 116, 117, 118, 119, +		120, 121, 122, 123, 124, 125, 126, 127, +		128, 129, 130, 131, 132, 133, 134, 135, +		136, 137, 138, 139, 140, 141, 142, 143, +		144, 145, 146, 147, 148, 149, 150, 151, +		152, 153, 154, 155, 156, 157, 158, 159, +		160, 161, 162, 163, 164, 165, 166, 167, +		168, 169, 170, 171, 172, 173, 174, 175, +		176, 177, 178, 179, 180, 181, 182, 183, +		184, 185, 186, 187, 188, 189, 190, 191, +		192, 193, 194, 195, 196, 197, 198, 199, +		200, 201, 202, 203, 204, 205, 206, 207, +		208, 209, 210, 211, 212, 213, 214, 215, +		216, 217, 218, 219, 220, 221, 222, 223, +		224, 225, 226, 227, 228, 229, 230, 231, +		232, 233, 234, 235, 236, 237, 238, 239, +		240, 241, 242, 243, 244, 245, 246, 247, +		248, 249, 250, 251, 252, 253, 254, 255, +		256, 257, 258, 259, 260, 261, 262, 263, +	}, +	.oobfree = { {2, 6}, {264, 80} }, +}; + +/* + * Generic flash bbt descriptors + */ +static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; +static u8 mirror_pattern[] = {'1', 't', 'b', 'B' }; + +static struct nand_bbt_descr bbt_main_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | +		   NAND_BBT_2BIT | NAND_BBT_VERSION, +	.offs =	2, /* 0 on 8-bit small page */ +	.len = 4, +	.veroffs = 6, +	.maxblocks = 4, +	.pattern = bbt_pattern, +}; + +static struct nand_bbt_descr bbt_mirror_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | +		   NAND_BBT_2BIT | NAND_BBT_VERSION, +	.offs =	2, /* 0 on 8-bit small page */ +	.len = 4, +	.veroffs = 6, +	.maxblocks = 4, +	.pattern = mirror_pattern, +}; + +/* + * Set up the IFC hardware block and page address fields, and the ifc nand + * structure addr field to point to the correct IFC buffer in memory + */ +static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	struct fsl_ifc *ifc = ctrl->regs; +	int buf_num; + +	ctrl->page = page_addr; + +	/* Program ROW0/COL0 */ +	ifc_out32(&ifc->ifc_nand.row0, page_addr); +	ifc_out32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column); + +	buf_num = page_addr & priv->bufnum_mask; + +	ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2); +	ctrl->index = column; + +	/* for OOB data point to the second half of the buffer */ +	if (oob) +		ctrl->index += mtd->writesize; +} + +static int is_blank(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, +		    unsigned int bufnum) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2); +	u32 __iomem *main = (u32 *)addr; +	u8 __iomem *oob = addr + mtd->writesize; +	int i; + +	for (i = 0; i < mtd->writesize / 4; i++) { +		if (__raw_readl(&main[i]) != 0xffffffff) +			return 0; +	} + +	for (i = 0; i < chip->ecc.layout->eccbytes; i++) { +		int pos = chip->ecc.layout->eccpos[i]; + +		if (__raw_readb(&oob[pos]) != 0xff) +			return 0; +	} + +	return 1; +} + +/* returns nonzero if entire page is blank */ +static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, +			  u32 *eccstat, unsigned int bufnum) +{ +	u32 reg = eccstat[bufnum / 4]; +	int errors; + +	errors = (reg >> ((3 - bufnum % 4) * 8)) & 15; + +	return errors; +} + +/* + * execute IFC NAND command and wait for it to complete + */ +static int fsl_ifc_run_command(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	struct fsl_ifc *ifc = ctrl->regs; +	long long end_tick; +	u32 eccstat[4]; +	int i; + +	/* set the chip select for NAND Transaction */ +	ifc_out32(&ifc->ifc_nand.nand_csel, ifc_ctrl->cs_nand); + +	/* start read/write seq */ +	ifc_out32(&ifc->ifc_nand.nandseq_strt, +		  IFC_NAND_SEQ_STRT_FIR_STRT); + +	/* wait for NAND Machine complete flag or timeout */ +	end_tick = usec2ticks(IFC_TIMEOUT_MSECS * 1000) + get_ticks(); + +	while (end_tick > get_ticks()) { +		ctrl->status = ifc_in32(&ifc->ifc_nand.nand_evter_stat); + +		if (ctrl->status & IFC_NAND_EVTER_STAT_OPC) +			break; +	} + +	ifc_out32(&ifc->ifc_nand.nand_evter_stat, ctrl->status); + +	if (ctrl->status & IFC_NAND_EVTER_STAT_FTOER) +		printf("%s: Flash Time Out Error\n", __func__); +	if (ctrl->status & IFC_NAND_EVTER_STAT_WPER) +		printf("%s: Write Protect Error\n", __func__); + +	if (ctrl->eccread) { +		int errors; +		int bufnum = ctrl->page & priv->bufnum_mask; +		int sector = bufnum * chip->ecc.steps; +		int sector_end = sector + chip->ecc.steps - 1; + +		for (i = sector / 4; i <= sector_end / 4; i++) +			eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]); + +		for (i = sector; i <= sector_end; i++) { +			errors = check_read_ecc(mtd, ctrl, eccstat, i); + +			if (errors == 15) { +				/* +				 * Uncorrectable error. +				 * OK only if the whole page is blank. +				 * +				 * We disable ECCER reporting due to erratum +				 * IFC-A002770 -- so report it now if we +				 * see an uncorrectable error in ECCSTAT. +				 */ +				if (!is_blank(mtd, ctrl, bufnum)) +					ctrl->status |= +						IFC_NAND_EVTER_STAT_ECCER; +				break; +			} + +			mtd->ecc_stats.corrected += errors; +		} + +		ctrl->eccread = 0; +	} + +	/* returns 0 on success otherwise non-zero) */ +	return ctrl->status == IFC_NAND_EVTER_STAT_OPC ? 0 : -EIO; +} + +static void fsl_ifc_do_read(struct nand_chip *chip, +			    int oob, +			    struct mtd_info *mtd) +{ +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	struct fsl_ifc *ifc = ctrl->regs; + +	/* Program FIR/IFC_NAND_FCR0 for Small/Large page */ +	if (mtd->writesize > 512) { +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | +			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT)); +		ifc_out32(&ifc->ifc_nand.nand_fir1, 0x0); + +		ifc_out32(&ifc->ifc_nand.nand_fcr0, +			  (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | +			  (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT)); +	} else { +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +			  (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) | +			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT)); + +		if (oob) +			ifc_out32(&ifc->ifc_nand.nand_fcr0, +				  NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT); +		else +			ifc_out32(&ifc->ifc_nand.nand_fcr0, +				  NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT); +	} +} + +/* cmdfunc send commands to the IFC NAND Machine */ +static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, +			     int column, int page_addr) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	struct fsl_ifc *ifc = ctrl->regs; + +	/* clear the read buffer */ +	ctrl->read_bytes = 0; +	if (command != NAND_CMD_PAGEPROG) +		ctrl->index = 0; + +	switch (command) { +	/* READ0 read the entire buffer to use hardware ECC. */ +	case NAND_CMD_READ0: { +		ifc_out32(&ifc->ifc_nand.nand_fbcr, 0); +		set_addr(mtd, 0, page_addr, 0); + +		ctrl->read_bytes = mtd->writesize + mtd->oobsize; +		ctrl->index += column; + +		if (chip->ecc.mode == NAND_ECC_HW) +			ctrl->eccread = 1; + +		fsl_ifc_do_read(chip, 0, mtd); +		fsl_ifc_run_command(mtd); +		return; +	} + +	/* READOOB reads only the OOB because no ECC is performed. */ +	case NAND_CMD_READOOB: +		ifc_out32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column); +		set_addr(mtd, column, page_addr, 1); + +		ctrl->read_bytes = mtd->writesize + mtd->oobsize; + +		fsl_ifc_do_read(chip, 1, mtd); +		fsl_ifc_run_command(mtd); + +		return; + +	/* READID must read all possible bytes while CEB is active */ +	case NAND_CMD_READID: +	case NAND_CMD_PARAM: { +		int timing = IFC_FIR_OP_RB; +		if (command == NAND_CMD_PARAM) +			timing = IFC_FIR_OP_RBCD; + +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) | +			  (timing << IFC_NAND_FIR0_OP2_SHIFT)); +		ifc_out32(&ifc->ifc_nand.nand_fcr0, +			  command << IFC_NAND_FCR0_CMD0_SHIFT); +		ifc_out32(&ifc->ifc_nand.row3, column); + +		/* +		 * although currently it's 8 bytes for READID, we always read +		 * the maximum 256 bytes(for PARAM) +		 */ +		ifc_out32(&ifc->ifc_nand.nand_fbcr, 256); +		ctrl->read_bytes = 256; + +		set_addr(mtd, 0, 0, 0); +		fsl_ifc_run_command(mtd); +		return; +	} + +	/* ERASE1 stores the block and page address */ +	case NAND_CMD_ERASE1: +		set_addr(mtd, 0, page_addr, 0); +		return; + +	/* ERASE2 uses the block and page address from ERASE1 */ +	case NAND_CMD_ERASE2: +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | +			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT)); + +		ifc_out32(&ifc->ifc_nand.nand_fcr0, +			  (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | +			  (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT)); + +		ifc_out32(&ifc->ifc_nand.nand_fbcr, 0); +		ctrl->read_bytes = 0; +		fsl_ifc_run_command(mtd); +		return; + +	/* SEQIN sets up the addr buffer and all registers except the length */ +	case NAND_CMD_SEQIN: { +		u32 nand_fcr0; +		ctrl->column = column; +		ctrl->oob = 0; + +		if (mtd->writesize > 512) { +			nand_fcr0 = +				(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) | +				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) | +				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT); + +			ifc_out32(&ifc->ifc_nand.nand_fir0, +				  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +				  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +				  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +				  (IFC_FIR_OP_WBCD  << +						IFC_NAND_FIR0_OP3_SHIFT) | +				  (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT)); +			ifc_out32(&ifc->ifc_nand.nand_fir1, +				  (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | +				  (IFC_FIR_OP_RDSTAT << +					IFC_NAND_FIR1_OP6_SHIFT) | +				  (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT)); +		} else { +			nand_fcr0 = ((NAND_CMD_PAGEPROG << +					IFC_NAND_FCR0_CMD1_SHIFT) | +				    (NAND_CMD_SEQIN << +					IFC_NAND_FCR0_CMD2_SHIFT) | +				    (NAND_CMD_STATUS << +					IFC_NAND_FCR0_CMD3_SHIFT)); + +			ifc_out32(&ifc->ifc_nand.nand_fir0, +				  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +				  (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | +				  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | +				  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | +				  (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT)); +			ifc_out32(&ifc->ifc_nand.nand_fir1, +				  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | +				  (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | +				  (IFC_FIR_OP_RDSTAT << +					IFC_NAND_FIR1_OP7_SHIFT) | +				  (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT)); + +			if (column >= mtd->writesize) +				nand_fcr0 |= +				NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT; +			else +				nand_fcr0 |= +				NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT; +		} + +		if (column >= mtd->writesize) { +			/* OOB area --> READOOB */ +			column -= mtd->writesize; +			ctrl->oob = 1; +		} +		ifc_out32(&ifc->ifc_nand.nand_fcr0, nand_fcr0); +		set_addr(mtd, column, page_addr, ctrl->oob); +		return; +	} + +	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */ +	case NAND_CMD_PAGEPROG: +		if (ctrl->oob) +			ifc_out32(&ifc->ifc_nand.nand_fbcr, +				  ctrl->index - ctrl->column); +		else +			ifc_out32(&ifc->ifc_nand.nand_fbcr, 0); + +		fsl_ifc_run_command(mtd); +		return; + +	case NAND_CMD_STATUS: +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +			  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT)); +		ifc_out32(&ifc->ifc_nand.nand_fcr0, +			  NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT); +		ifc_out32(&ifc->ifc_nand.nand_fbcr, 1); +		set_addr(mtd, 0, 0, 0); +		ctrl->read_bytes = 1; + +		fsl_ifc_run_command(mtd); + +		/* Chip sometimes reporting write protect even when it's not */ +		out_8(ctrl->addr, in_8(ctrl->addr) | NAND_STATUS_WP); +		return; + +	case NAND_CMD_RESET: +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT); +		ifc_out32(&ifc->ifc_nand.nand_fcr0, +			  NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT); +		fsl_ifc_run_command(mtd); +		return; + +	default: +		printf("%s: error, unsupported command 0x%x.\n", +			__func__, command); +	} +} + +/* + * Write buf to the IFC NAND Controller Data Buffer + */ +static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	unsigned int bufsize = mtd->writesize + mtd->oobsize; + +	if (len <= 0) { +		printf("%s of %d bytes", __func__, len); +		ctrl->status = 0; +		return; +	} + +	if ((unsigned int)len > bufsize - ctrl->index) { +		printf("%s beyond end of buffer " +		       "(%d requested, %u available)\n", +			__func__, len, bufsize - ctrl->index); +		len = bufsize - ctrl->index; +	} + +	memcpy_toio(&ctrl->addr[ctrl->index], buf, len); +	ctrl->index += len; +} + +/* + * read a byte from either the IFC hardware buffer if it has any data left + * otherwise issue a command to read a single byte. + */ +static u8 fsl_ifc_read_byte(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; + +	/* If there are still bytes in the IFC buffer, then use the +	 * next byte. */ +	if (ctrl->index < ctrl->read_bytes) +		return in_8(&ctrl->addr[ctrl->index++]); + +	printf("%s beyond end of buffer\n", __func__); +	return ERR_BYTE; +} + +/* + * Read two bytes from the IFC hardware buffer + * read function for 16-bit buswith + */ +static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	uint16_t data; + +	/* +	 * If there are still bytes in the IFC buffer, then use the +	 * next byte. +	 */ +	if (ctrl->index < ctrl->read_bytes) { +		data = ifc_in16((uint16_t *)&ctrl-> +				 addr[ctrl->index]); +		ctrl->index += 2; +		return (uint8_t)data; +	} + +	printf("%s beyond end of buffer\n", __func__); +	return ERR_BYTE; +} + +/* + * Read from the IFC Controller Data Buffer + */ +static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	int avail; + +	if (len < 0) +		return; + +	avail = min((unsigned int)len, ctrl->read_bytes - ctrl->index); +	memcpy_fromio(buf, &ctrl->addr[ctrl->index], avail); +	ctrl->index += avail; + +	if (len > avail) +		printf("%s beyond end of buffer " +		       "(%d requested, %d available)\n", +		       __func__, len, avail); +} + +/* + * Verify buffer against the IFC Controller Data Buffer + */ +static int fsl_ifc_verify_buf(struct mtd_info *mtd, +			       const u_char *buf, int len) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	int i; + +	if (len < 0) { +		printf("%s of %d bytes", __func__, len); +		return -EINVAL; +	} + +	if ((unsigned int)len > ctrl->read_bytes - ctrl->index) { +		printf("%s beyond end of buffer " +		       "(%d requested, %u available)\n", +		       __func__, len, ctrl->read_bytes - ctrl->index); + +		ctrl->index = ctrl->read_bytes; +		return -EINVAL; +	} + +	for (i = 0; i < len; i++) +		if (in_8(&ctrl->addr[ctrl->index + i]) != buf[i]) +			break; + +	ctrl->index += len; +	return i == len && ctrl->status == IFC_NAND_EVTER_STAT_OPC ? 0 : -EIO; +} + +/* This function is called after Program and Erase Operations to + * check for success or failure. + */ +static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) +{ +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; +	struct fsl_ifc *ifc = ctrl->regs; +	u32 nand_fsr; + +	if (ctrl->status != IFC_NAND_EVTER_STAT_OPC) +		return NAND_STATUS_FAIL; + +	/* Use READ_STATUS command, but wait for the device to be ready */ +	ifc_out32(&ifc->ifc_nand.nand_fir0, +		  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +		  (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT)); +	ifc_out32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS << +		  IFC_NAND_FCR0_CMD0_SHIFT); +	ifc_out32(&ifc->ifc_nand.nand_fbcr, 1); +	set_addr(mtd, 0, 0, 0); +	ctrl->read_bytes = 1; + +	fsl_ifc_run_command(mtd); + +	if (ctrl->status != IFC_NAND_EVTER_STAT_OPC) +		return NAND_STATUS_FAIL; + +	nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); + +	/* Chip sometimes reporting write protect even when it's not */ +	nand_fsr = nand_fsr | NAND_STATUS_WP; +	return nand_fsr; +} + +static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, +			     uint8_t *buf, int oob_required, int page) +{ +	struct fsl_ifc_mtd *priv = chip->priv; +	struct fsl_ifc_ctrl *ctrl = priv->ctrl; + +	fsl_ifc_read_buf(mtd, buf, mtd->writesize); +	fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); + +	if (ctrl->status != IFC_NAND_EVTER_STAT_OPC) +		mtd->ecc_stats.failed++; + +	return 0; +} + +/* ECC will be calculated automatically, and errors will be detected in + * waitfunc. + */ +static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip, +			       const uint8_t *buf, int oob_required) +{ +	fsl_ifc_write_buf(mtd, buf, mtd->writesize); +	fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); + +	return 0; +} + +static void fsl_ifc_ctrl_init(void) +{ +	ifc_ctrl = kzalloc(sizeof(*ifc_ctrl), GFP_KERNEL); +	if (!ifc_ctrl) +		return; + +	ifc_ctrl->regs = IFC_BASE_ADDR; + +	/* clear event registers */ +	ifc_out32(&ifc_ctrl->regs->ifc_nand.nand_evter_stat, ~0U); +	ifc_out32(&ifc_ctrl->regs->ifc_nand.pgrdcmpl_evt_stat, ~0U); + +	/* Enable error and event for any detected errors */ +	ifc_out32(&ifc_ctrl->regs->ifc_nand.nand_evter_en, +		  IFC_NAND_EVTER_EN_OPC_EN | +		  IFC_NAND_EVTER_EN_PGRDCMPL_EN | +		  IFC_NAND_EVTER_EN_FTOER_EN | +		  IFC_NAND_EVTER_EN_WPER_EN); + +	ifc_out32(&ifc_ctrl->regs->ifc_nand.ncfgr, 0x0); +} + +static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip) +{ +} + +static void fsl_ifc_sram_init(void) +{ +	struct fsl_ifc *ifc = ifc_ctrl->regs; +	uint32_t cs = 0, csor = 0, csor_8k = 0, csor_ext = 0; +	long long end_tick; + +	cs = ifc_ctrl->cs_nand >> IFC_NAND_CSEL_SHIFT; + +	/* Save CSOR and CSOR_ext */ +	csor = ifc_in32(&ifc_ctrl->regs->csor_cs[cs].csor); +	csor_ext = ifc_in32(&ifc_ctrl->regs->csor_cs[cs].csor_ext); + +	/* chage PageSize 8K and SpareSize 1K*/ +	csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; +	ifc_out32(&ifc_ctrl->regs->csor_cs[cs].csor, csor_8k); +	ifc_out32(&ifc_ctrl->regs->csor_cs[cs].csor_ext, 0x0000400); + +	/* READID */ +	ifc_out32(&ifc->ifc_nand.nand_fir0, +		  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +		  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) | +		  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); +	ifc_out32(&ifc->ifc_nand.nand_fcr0, +		  NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT); +	ifc_out32(&ifc->ifc_nand.row3, 0x0); + +	ifc_out32(&ifc->ifc_nand.nand_fbcr, 0x0); + +	/* Program ROW0/COL0 */ +	ifc_out32(&ifc->ifc_nand.row0, 0x0); +	ifc_out32(&ifc->ifc_nand.col0, 0x0); + +	/* set the chip select for NAND Transaction */ +	ifc_out32(&ifc->ifc_nand.nand_csel, ifc_ctrl->cs_nand); + +	/* start read seq */ +	ifc_out32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT); + +	/* wait for NAND Machine complete flag or timeout */ +	end_tick = usec2ticks(IFC_TIMEOUT_MSECS * 1000) + get_ticks(); + +	while (end_tick > get_ticks()) { +		ifc_ctrl->status = ifc_in32(&ifc->ifc_nand.nand_evter_stat); + +		if (ifc_ctrl->status & IFC_NAND_EVTER_STAT_OPC) +			break; +	} + +	ifc_out32(&ifc->ifc_nand.nand_evter_stat, ifc_ctrl->status); + +	/* Restore CSOR and CSOR_ext */ +	ifc_out32(&ifc_ctrl->regs->csor_cs[cs].csor, csor); +	ifc_out32(&ifc_ctrl->regs->csor_cs[cs].csor_ext, csor_ext); +} + +static int fsl_ifc_chip_init(int devnum, u8 *addr) +{ +	struct mtd_info *mtd = &nand_info[devnum]; +	struct nand_chip *nand; +	struct fsl_ifc_mtd *priv; +	struct nand_ecclayout *layout; +	uint32_t cspr = 0, csor = 0, ver = 0; +	int ret; + +	if (!ifc_ctrl) { +		fsl_ifc_ctrl_init(); +		if (!ifc_ctrl) +			return -1; +	} + +	priv = kzalloc(sizeof(*priv), GFP_KERNEL); +	if (!priv) +		return -ENOMEM; + +	priv->ctrl = ifc_ctrl; +	priv->vbase = addr; + +	/* Find which chip select it is connected to. +	 */ +	for (priv->bank = 0; priv->bank < MAX_BANKS; priv->bank++) { +		phys_addr_t phys_addr = virt_to_phys(addr); + +		cspr = ifc_in32(&ifc_ctrl->regs->cspr_cs[priv->bank].cspr); +		csor = ifc_in32(&ifc_ctrl->regs->csor_cs[priv->bank].csor); + +		if ((cspr & CSPR_V) && (cspr & CSPR_MSEL) == CSPR_MSEL_NAND && +		    (cspr & CSPR_BA) == CSPR_PHYS_ADDR(phys_addr)) { +			ifc_ctrl->cs_nand = priv->bank << IFC_NAND_CSEL_SHIFT; +			break; +		} +	} + +	if (priv->bank >= MAX_BANKS) { +		printf("%s: address did not match any " +		       "chip selects\n", __func__); +		kfree(priv); +		return -ENODEV; +	} + +	nand = &priv->chip; +	mtd->priv = nand; + +	ifc_ctrl->chips[priv->bank] = priv; + +	/* fill in nand_chip structure */ +	/* set up function call table */ + +	nand->write_buf = fsl_ifc_write_buf; +	nand->read_buf = fsl_ifc_read_buf; +	nand->verify_buf = fsl_ifc_verify_buf; +	nand->select_chip = fsl_ifc_select_chip; +	nand->cmdfunc = fsl_ifc_cmdfunc; +	nand->waitfunc = fsl_ifc_wait; + +	/* set up nand options */ +	nand->bbt_td = &bbt_main_descr; +	nand->bbt_md = &bbt_mirror_descr; + +	/* set up nand options */ +	nand->options = NAND_NO_SUBPAGE_WRITE; +	nand->bbt_options = NAND_BBT_USE_FLASH; + +	if (cspr & CSPR_PORT_SIZE_16) { +		nand->read_byte = fsl_ifc_read_byte16; +		nand->options |= NAND_BUSWIDTH_16; +	} else { +		nand->read_byte = fsl_ifc_read_byte; +	} + +	nand->controller = &ifc_ctrl->controller; +	nand->priv = priv; + +	nand->ecc.read_page = fsl_ifc_read_page; +	nand->ecc.write_page = fsl_ifc_write_page; + +	/* Hardware generates ECC per 512 Bytes */ +	nand->ecc.size = 512; +	nand->ecc.bytes = 8; + +	switch (csor & CSOR_NAND_PGS_MASK) { +	case CSOR_NAND_PGS_512: +		if (nand->options & NAND_BUSWIDTH_16) { +			layout = &oob_512_16bit_ecc4; +		} else { +			layout = &oob_512_8bit_ecc4; + +			/* Avoid conflict with bad block marker */ +			bbt_main_descr.offs = 0; +			bbt_mirror_descr.offs = 0; +		} + +		nand->ecc.strength = 4; +		priv->bufnum_mask = 15; +		break; + +	case CSOR_NAND_PGS_2K: +		layout = &oob_2048_ecc4; +		nand->ecc.strength = 4; +		priv->bufnum_mask = 3; +		break; + +	case CSOR_NAND_PGS_4K: +		if ((csor & CSOR_NAND_ECC_MODE_MASK) == +		    CSOR_NAND_ECC_MODE_4) { +			layout = &oob_4096_ecc4; +			nand->ecc.strength = 4; +		} else { +			layout = &oob_4096_ecc8; +			nand->ecc.strength = 8; +			nand->ecc.bytes = 16; +		} + +		priv->bufnum_mask = 1; +		break; + +	case CSOR_NAND_PGS_8K: +		if ((csor & CSOR_NAND_ECC_MODE_MASK) == +		    CSOR_NAND_ECC_MODE_4) { +			layout = &oob_8192_ecc4; +			nand->ecc.strength = 4; +		} else { +			layout = &oob_8192_ecc8; +			nand->ecc.strength = 8; +			nand->ecc.bytes = 16; +		} + +		priv->bufnum_mask = 0; +		break; + + +	default: +		printf("ifc nand: bad csor %#x: bad page size\n", csor); +		return -ENODEV; +	} + +	/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */ +	if (csor & CSOR_NAND_ECC_DEC_EN) { +		nand->ecc.mode = NAND_ECC_HW; +		nand->ecc.layout = layout; +	} else { +		nand->ecc.mode = NAND_ECC_SOFT; +	} + +	ver = ifc_in32(&ifc_ctrl->regs->ifc_rev); +	if (ver == FSL_IFC_V1_1_0) +		fsl_ifc_sram_init(); + +	ret = nand_scan_ident(mtd, 1, NULL); +	if (ret) +		return ret; + +	ret = nand_scan_tail(mtd); +	if (ret) +		return ret; + +	ret = nand_register(devnum); +	if (ret) +		return ret; +	return 0; +} + +#ifndef CONFIG_SYS_NAND_BASE_LIST +#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE } +#endif + +static unsigned long base_address[CONFIG_SYS_MAX_NAND_DEVICE] = +	CONFIG_SYS_NAND_BASE_LIST; + +void board_nand_init(void) +{ +	int i; + +	for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) +		fsl_ifc_chip_init(i, (u8 *)base_address[i]); +} diff --git a/roms/u-boot/drivers/mtd/nand/fsl_ifc_spl.c b/roms/u-boot/drivers/mtd/nand/fsl_ifc_spl.c new file mode 100644 index 00000000..51007728 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/fsl_ifc_spl.c @@ -0,0 +1,252 @@ +/* + * NAND boot for Freescale Integrated Flash Controller, NAND FCM + * + * Copyright 2011 Freescale Semiconductor, Inc. + * Author: Dipen Dudhat <dipen.dudhat@freescale.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <fsl_ifc.h> +#include <linux/mtd/nand.h> + +static inline int is_blank(uchar *addr, int page_size) +{ +	int i; + +	for (i = 0; i < page_size; i++) { +		if (__raw_readb(&addr[i]) != 0xff) +			return 0; +	} + +	/* +	 * For the SPL, don't worry about uncorrectable errors +	 * where the main area is all FFs but shouldn't be. +	 */ +	return 1; +} + +/* returns nonzero if entire page is blank */ +static inline int check_read_ecc(uchar *buf, u32 *eccstat, +				 unsigned int bufnum, int page_size) +{ +	u32 reg = eccstat[bufnum / 4]; +	int errors = (reg >> ((3 - bufnum % 4) * 8)) & 0xf; + +	if (errors == 0xf) { /* uncorrectable */ +		/* Blank pages fail hw ECC checks */ +		if (is_blank(buf, page_size)) +			return 1; + +		puts("ecc error\n"); +		for (;;) +			; +	} + +	return 0; +} + +static inline void nand_wait(uchar *buf, int bufnum, int page_size) +{ +	struct fsl_ifc *ifc = IFC_BASE_ADDR; +	u32 status; +	u32 eccstat[4]; +	int bufperpage = page_size / 512; +	int bufnum_end, i; + +	bufnum *= bufperpage; +	bufnum_end = bufnum + bufperpage - 1; + +	do { +		status = ifc_in32(&ifc->ifc_nand.nand_evter_stat); +	} while (!(status & IFC_NAND_EVTER_STAT_OPC)); + +	if (status & IFC_NAND_EVTER_STAT_FTOER) { +		puts("flash time out error\n"); +		for (;;) +			; +	} + +	for (i = bufnum / 4; i <= bufnum_end / 4; i++) +		eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]); + +	for (i = bufnum; i <= bufnum_end; i++) { +		if (check_read_ecc(buf, eccstat, i, page_size)) +			break; +	} + +	ifc_out32(&ifc->ifc_nand.nand_evter_stat, status); +} + +static inline int bad_block(uchar *marker, int port_size) +{ +	if (port_size == 8) +		return __raw_readb(marker) != 0xff; +	else +		return __raw_readw((u16 *)marker) != 0xffff; +} + +int nand_spl_load_image(uint32_t offs, unsigned int uboot_size, void *vdst) +{ +	struct fsl_ifc *ifc = IFC_BASE_ADDR; +	uchar *buf = (uchar *)CONFIG_SYS_NAND_BASE; +	int page_size; +	int port_size; +	int pages_per_blk; +	int blk_size; +	int bad_marker = 0; +	int bufnum_mask, bufnum; + +	int csor, cspr; +	int pos = 0; +	int j = 0; + +	int sram_addr; +	int pg_no; +	uchar *dst = vdst; + +	/* Get NAND Flash configuration */ +	csor = CONFIG_SYS_NAND_CSOR; +	cspr = CONFIG_SYS_NAND_CSPR; + +	port_size = (cspr & CSPR_PORT_SIZE_16) ? 16 : 8; + +	if ((csor & CSOR_NAND_PGS_MASK) == CSOR_NAND_PGS_8K) { +		page_size = 8192; +		bufnum_mask = 0x0; +	} else if ((csor & CSOR_NAND_PGS_MASK) == CSOR_NAND_PGS_4K) { +		page_size = 4096; +		bufnum_mask = 0x1; +	} else if ((csor & CSOR_NAND_PGS_MASK) == CSOR_NAND_PGS_2K) { +		page_size = 2048; +		bufnum_mask = 0x3; +	} else { +		page_size = 512; +		bufnum_mask = 0xf; + +		if (port_size == 8) +			bad_marker = 5; +	} + +	pages_per_blk = +		32 << ((csor & CSOR_NAND_PB_MASK) >> CSOR_NAND_PB_SHIFT); + +	blk_size = pages_per_blk * page_size; + +	/* Open Full SRAM mapping for spare are access */ +	ifc_out32(&ifc->ifc_nand.ncfgr, 0x0); + +	/* Clear Boot events */ +	ifc_out32(&ifc->ifc_nand.nand_evter_stat, 0xffffffff); + +	/* Program FIR/FCR for Large/Small page */ +	if (page_size > 512) { +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | +			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | +			  (IFC_FIR_OP_BTRD << IFC_NAND_FIR0_OP4_SHIFT)); +		ifc_out32(&ifc->ifc_nand.nand_fir1, 0x0); + +		ifc_out32(&ifc->ifc_nand.nand_fcr0, +			  (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | +			  (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT)); +	} else { +		ifc_out32(&ifc->ifc_nand.nand_fir0, +			  (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | +			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | +			  (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) | +			  (IFC_FIR_OP_BTRD << IFC_NAND_FIR0_OP3_SHIFT)); +		ifc_out32(&ifc->ifc_nand.nand_fir1, 0x0); + +		ifc_out32(&ifc->ifc_nand.nand_fcr0, +			  NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT); +	} + +	/* Program FBCR = 0 for full page read */ +	ifc_out32(&ifc->ifc_nand.nand_fbcr, 0); + +	/* Read and copy u-boot on SDRAM from NAND device, In parallel +	 * check for Bad block if found skip it and read continue to +	 * next Block +	 */ +	while (pos < uboot_size) { +		int i = 0; +		do { +			pg_no = offs / page_size; +			bufnum = pg_no & bufnum_mask; +			sram_addr = bufnum * page_size * 2; + +			ifc_out32(&ifc->ifc_nand.row0, pg_no); +			ifc_out32(&ifc->ifc_nand.col0, 0); +			/* start read */ +			ifc_out32(&ifc->ifc_nand.nandseq_strt, +				  IFC_NAND_SEQ_STRT_FIR_STRT); + +			/* wait for read to complete */ +			nand_wait(&buf[sram_addr], bufnum, page_size); + +			/* +			 * If either of the first two pages are marked bad, +			 * continue to the next block. +			 */ +			if (i++ < 2 && +			    bad_block(&buf[sram_addr + page_size + bad_marker], +				      port_size)) { +				puts("skipping\n"); +				offs = (offs + blk_size) & ~(blk_size - 1); +				pos &= ~(blk_size - 1); +				break; +			} + +			for (j = 0; j < page_size; j++) +				dst[pos + j] = __raw_readb(&buf[sram_addr + j]); + +			pos += page_size; +			offs += page_size; +		} while ((offs & (blk_size - 1)) && (pos < uboot_size)); +	} + +	return 0; +} + +/* + * Main entrypoint for NAND Boot. It's necessary that SDRAM is already + * configured and available since this code loads the main U-boot image + * from NAND into SDRAM and starts from there. + */ +void nand_boot(void) +{ +	__attribute__((noreturn)) void (*uboot)(void); +	/* +	 * Load U-Boot image from NAND into RAM +	 */ +	nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS, +			    CONFIG_SYS_NAND_U_BOOT_SIZE, +			    (uchar *)CONFIG_SYS_NAND_U_BOOT_DST); + +#ifdef CONFIG_NAND_ENV_DST +	nand_spl_load_image(CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE, +			    (uchar *)CONFIG_NAND_ENV_DST); + +#ifdef CONFIG_ENV_OFFSET_REDUND +	nand_spl_load_image(CONFIG_ENV_OFFSET_REDUND, CONFIG_ENV_SIZE, +			    (uchar *)CONFIG_NAND_ENV_DST + CONFIG_ENV_SIZE); +#endif +#endif +	/* +	 * Jump to U-Boot image +	 */ +#ifdef CONFIG_SPL_FLUSH_IMAGE +	/* +	 * Clean d-cache and invalidate i-cache, to +	 * make sure that no stale data is executed. +	 */ +	flush_cache(CONFIG_SYS_NAND_U_BOOT_DST, CONFIG_SYS_NAND_U_BOOT_SIZE); +#endif +	uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START; +	uboot(); +} diff --git a/roms/u-boot/drivers/mtd/nand/fsl_upm.c b/roms/u-boot/drivers/mtd/nand/fsl_upm.c new file mode 100644 index 00000000..3ae0044f --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/fsl_upm.c @@ -0,0 +1,199 @@ +/* + * FSL UPM NAND driver + * + * Copyright (C) 2007 MontaVista Software, Inc. + *                    Anton Vorontsov <avorontsov@ru.mvista.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <config.h> +#include <common.h> +#include <asm/io.h> +#include <asm/errno.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/fsl_upm.h> +#include <nand.h> + +static void fsl_upm_start_pattern(struct fsl_upm *upm, u32 pat_offset) +{ +	clrsetbits_be32(upm->mxmr, MxMR_MAD_MSK, MxMR_OP_RUNP | pat_offset); +	(void)in_be32(upm->mxmr); +} + +static void fsl_upm_end_pattern(struct fsl_upm *upm) +{ +	clrbits_be32(upm->mxmr, MxMR_OP_RUNP); + +	while (in_be32(upm->mxmr) & MxMR_OP_RUNP) +		eieio(); +} + +static void fsl_upm_run_pattern(struct fsl_upm *upm, int width, +				void __iomem *io_addr, u32 mar) +{ +	out_be32(upm->mar, mar); +	(void)in_be32(upm->mar); +	switch (width) { +	case 8: +		out_8(io_addr, 0x0); +		break; +	case 16: +		out_be16(io_addr, 0x0); +		break; +	case 32: +		out_be32(io_addr, 0x0); +		break; +	} +} + +static void fun_wait(struct fsl_upm_nand *fun) +{ +	if (fun->dev_ready) { +		while (!fun->dev_ready(fun->chip_nr)) +			debug("unexpected busy state\n"); +	} else { +		/* +		 * If the R/B pin is not connected, +		 * a short delay is necessary. +		 */ +		udelay(1); +	} +} + +#if CONFIG_SYS_NAND_MAX_CHIPS > 1 +static void fun_select_chip(struct mtd_info *mtd, int chip_nr) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_upm_nand *fun = chip->priv; + +	if (chip_nr >= 0) { +		fun->chip_nr = chip_nr; +		chip->IO_ADDR_R = chip->IO_ADDR_W = +			fun->upm.io_addr + fun->chip_offset * chip_nr; +	} else if (chip_nr == -1) { +		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); +	} +} +#endif + +static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_upm_nand *fun = chip->priv; +	void __iomem *io_addr; +	u32 mar; + +	if (!(ctrl & fun->last_ctrl)) { +		fsl_upm_end_pattern(&fun->upm); + +		if (cmd == NAND_CMD_NONE) +			return; + +		fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE); +	} + +	if (ctrl & NAND_CTRL_CHANGE) { +		if (ctrl & NAND_ALE) +			fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset); +		else if (ctrl & NAND_CLE) +			fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset); +	} + +	mar = cmd << (32 - fun->width); +	io_addr = fun->upm.io_addr; +#if CONFIG_SYS_NAND_MAX_CHIPS > 1 +	if (fun->chip_nr > 0) { +		io_addr += fun->chip_offset * fun->chip_nr; +		if (fun->upm_mar_chip_offset) +			mar |= fun->upm_mar_chip_offset * fun->chip_nr; +	} +#endif +	fsl_upm_run_pattern(&fun->upm, fun->width, io_addr, mar); + +	/* +	 * Some boards/chips needs this.  At least the MPC8360E-RDK +	 * needs it.  Probably weird chip, because I don't see any +	 * need for this on MPC8555E + Samsung K9F1G08U0A.  Usually +	 * here are 0-2 unexpected busy states per block read. +	 */ +	if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN) +		fun_wait(fun); +} + +static u8 upm_nand_read_byte(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; + +	return in_8(chip->IO_ADDR_R); +} + +static void upm_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; +	struct fsl_upm_nand *fun = chip->priv; + +	for (i = 0; i < len; i++) { +		out_8(chip->IO_ADDR_W, buf[i]); +		if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE) +			fun_wait(fun); +	} + +	if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER) +		fun_wait(fun); +} + +static void upm_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; + +	for (i = 0; i < len; i++) +		buf[i] = in_8(chip->IO_ADDR_R); +} + +static int upm_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; + +	for (i = 0; i < len; i++) { +		if (buf[i] != in_8(chip->IO_ADDR_R)) +			return -EFAULT; +	} + +	return 0; +} + +static int nand_dev_ready(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct fsl_upm_nand *fun = chip->priv; + +	return fun->dev_ready(fun->chip_nr); +} + +int fsl_upm_nand_init(struct nand_chip *chip, struct fsl_upm_nand *fun) +{ +	if (fun->width != 8 && fun->width != 16 && fun->width != 32) +		return -ENOSYS; + +	fun->last_ctrl = NAND_CLE; + +	chip->priv = fun; +	chip->chip_delay = fun->chip_delay; +	chip->ecc.mode = NAND_ECC_SOFT; +	chip->cmd_ctrl = fun_cmd_ctrl; +#if CONFIG_SYS_NAND_MAX_CHIPS > 1 +	chip->select_chip = fun_select_chip; +#endif +	chip->read_byte = upm_nand_read_byte; +	chip->read_buf = upm_nand_read_buf; +	chip->write_buf = upm_nand_write_buf; +	chip->verify_buf = upm_nand_verify_buf; +	if (fun->dev_ready) +		chip->dev_ready = nand_dev_ready; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/fsmc_nand.c b/roms/u-boot/drivers/mtd/nand/fsmc_nand.c new file mode 100644 index 00000000..567eff09 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/fsmc_nand.c @@ -0,0 +1,473 @@ +/* + * (C) Copyright 2010 + * Vipin Kumar, ST Microelectronics, vipin.kumar@st.com. + * + * (C) Copyright 2012 + * Amit Virdi, ST Microelectronics, amit.virdi@st.com. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <asm/io.h> +#include <linux/bitops.h> +#include <linux/err.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/fsmc_nand.h> +#include <asm/arch/hardware.h> + +static u32 fsmc_version; +static struct fsmc_regs *const fsmc_regs_p = (struct fsmc_regs *) +	CONFIG_SYS_FSMC_BASE; + +/* + * ECC4 and ECC1 have 13 bytes and 3 bytes of ecc respectively for 512 bytes of + * data. ECC4 can correct up to 8 bits in 512 bytes of data while ECC1 can + * correct 1 bit in 512 bytes + */ + +static struct nand_ecclayout fsmc_ecc4_lp_layout = { +	.eccbytes = 104, +	.eccpos = {  2,   3,   4,   5,   6,   7,   8, +		9,  10,  11,  12,  13,  14, +		18,  19,  20,  21,  22,  23,  24, +		25,  26,  27,  28,  29,  30, +		34,  35,  36,  37,  38,  39,  40, +		41,  42,  43,  44,  45,  46, +		50,  51,  52,  53,  54,  55,  56, +		57,  58,  59,  60,  61,  62, +		66,  67,  68,  69,  70,  71,  72, +		73,  74,  75,  76,  77,  78, +		82,  83,  84,  85,  86,  87,  88, +		89,  90,  91,  92,  93,  94, +		98,  99, 100, 101, 102, 103, 104, +		105, 106, 107, 108, 109, 110, +		114, 115, 116, 117, 118, 119, 120, +		121, 122, 123, 124, 125, 126 +	}, +	.oobfree = { +		{.offset = 15, .length = 3}, +		{.offset = 31, .length = 3}, +		{.offset = 47, .length = 3}, +		{.offset = 63, .length = 3}, +		{.offset = 79, .length = 3}, +		{.offset = 95, .length = 3}, +		{.offset = 111, .length = 3}, +		{.offset = 127, .length = 1} +	} +}; + +/* + * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes + * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118 + * bytes are free for use. + */ +static struct nand_ecclayout fsmc_ecc4_224_layout = { +	.eccbytes = 104, +	.eccpos = {  2,   3,   4,   5,   6,   7,   8, +		9,  10,  11,  12,  13,  14, +		18,  19,  20,  21,  22,  23,  24, +		25,  26,  27,  28,  29,  30, +		34,  35,  36,  37,  38,  39,  40, +		41,  42,  43,  44,  45,  46, +		50,  51,  52,  53,  54,  55,  56, +		57,  58,  59,  60,  61,  62, +		66,  67,  68,  69,  70,  71,  72, +		73,  74,  75,  76,  77,  78, +		82,  83,  84,  85,  86,  87,  88, +		89,  90,  91,  92,  93,  94, +		98,  99, 100, 101, 102, 103, 104, +		105, 106, 107, 108, 109, 110, +		114, 115, 116, 117, 118, 119, 120, +		121, 122, 123, 124, 125, 126 +	}, +	.oobfree = { +		{.offset = 15, .length = 3}, +		{.offset = 31, .length = 3}, +		{.offset = 47, .length = 3}, +		{.offset = 63, .length = 3}, +		{.offset = 79, .length = 3}, +		{.offset = 95, .length = 3}, +		{.offset = 111, .length = 3}, +		{.offset = 127, .length = 97} +	} +}; + +/* + * ECC placement definitions in oobfree type format + * There are 13 bytes of ecc for every 512 byte block and it has to be read + * consecutively and immediately after the 512 byte data block for hardware to + * generate the error bit offsets in 512 byte data + * Managing the ecc bytes in the following way makes it easier for software to + * read ecc bytes consecutive to data bytes. This way is similar to + * oobfree structure maintained already in u-boot nand driver + */ +static struct fsmc_eccplace fsmc_eccpl_lp = { +	.eccplace = { +		{.offset = 2, .length = 13}, +		{.offset = 18, .length = 13}, +		{.offset = 34, .length = 13}, +		{.offset = 50, .length = 13}, +		{.offset = 66, .length = 13}, +		{.offset = 82, .length = 13}, +		{.offset = 98, .length = 13}, +		{.offset = 114, .length = 13} +	} +}; + +static struct nand_ecclayout fsmc_ecc4_sp_layout = { +	.eccbytes = 13, +	.eccpos = { 0,  1,  2,  3,  6,  7, 8, +		9, 10, 11, 12, 13, 14 +	}, +	.oobfree = { +		{.offset = 15, .length = 1}, +	} +}; + +static struct fsmc_eccplace fsmc_eccpl_sp = { +	.eccplace = { +		{.offset = 0, .length = 4}, +		{.offset = 6, .length = 9} +	} +}; + +static struct nand_ecclayout fsmc_ecc1_layout = { +	.eccbytes = 24, +	.eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, +		66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, +	.oobfree = { +		{.offset = 8, .length = 8}, +		{.offset = 24, .length = 8}, +		{.offset = 40, .length = 8}, +		{.offset = 56, .length = 8}, +		{.offset = 72, .length = 8}, +		{.offset = 88, .length = 8}, +		{.offset = 104, .length = 8}, +		{.offset = 120, .length = 8} +	} +}; + +/* Count the number of 0's in buff upto a max of max_bits */ +static int count_written_bits(uint8_t *buff, int size, int max_bits) +{ +	int k, written_bits = 0; + +	for (k = 0; k < size; k++) { +		written_bits += hweight8(~buff[k]); +		if (written_bits > max_bits) +			break; +	} + +	return written_bits; +} + +static void fsmc_nand_hwcontrol(struct mtd_info *mtd, int cmd, uint ctrl) +{ +	struct nand_chip *this = mtd->priv; +	ulong IO_ADDR_W; + +	if (ctrl & NAND_CTRL_CHANGE) { +		IO_ADDR_W = (ulong)this->IO_ADDR_W; + +		IO_ADDR_W &= ~(CONFIG_SYS_NAND_CLE | CONFIG_SYS_NAND_ALE); +		if (ctrl & NAND_CLE) +			IO_ADDR_W |= CONFIG_SYS_NAND_CLE; +		if (ctrl & NAND_ALE) +			IO_ADDR_W |= CONFIG_SYS_NAND_ALE; + +		if (ctrl & NAND_NCE) { +			writel(readl(&fsmc_regs_p->pc) | +					FSMC_ENABLE, &fsmc_regs_p->pc); +		} else { +			writel(readl(&fsmc_regs_p->pc) & +					~FSMC_ENABLE, &fsmc_regs_p->pc); +		} +		this->IO_ADDR_W = (void *)IO_ADDR_W; +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, this->IO_ADDR_W); +} + +static int fsmc_bch8_correct_data(struct mtd_info *mtd, u_char *dat, +		u_char *read_ecc, u_char *calc_ecc) +{ +	/* The calculated ecc is actually the correction index in data */ +	u32 err_idx[8]; +	u32 num_err, i; +	u32 ecc1, ecc2, ecc3, ecc4; + +	num_err = (readl(&fsmc_regs_p->sts) >> 10) & 0xF; + +	if (likely(num_err == 0)) +		return 0; + +	if (unlikely(num_err > 8)) { +		/* +		 * This is a temporary erase check. A newly erased page read +		 * would result in an ecc error because the oob data is also +		 * erased to FF and the calculated ecc for an FF data is not +		 * FF..FF. +		 * This is a workaround to skip performing correction in case +		 * data is FF..FF +		 * +		 * Logic: +		 * For every page, each bit written as 0 is counted until these +		 * number of bits are greater than 8 (the maximum correction +		 * capability of FSMC for each 512 + 13 bytes) +		 */ + +		int bits_ecc = count_written_bits(read_ecc, 13, 8); +		int bits_data = count_written_bits(dat, 512, 8); + +		if ((bits_ecc + bits_data) <= 8) { +			if (bits_data) +				memset(dat, 0xff, 512); +			return bits_data + bits_ecc; +		} + +		return -EBADMSG; +	} + +	ecc1 = readl(&fsmc_regs_p->ecc1); +	ecc2 = readl(&fsmc_regs_p->ecc2); +	ecc3 = readl(&fsmc_regs_p->ecc3); +	ecc4 = readl(&fsmc_regs_p->sts); + +	err_idx[0] = (ecc1 >> 0) & 0x1FFF; +	err_idx[1] = (ecc1 >> 13) & 0x1FFF; +	err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F); +	err_idx[3] = (ecc2 >> 7) & 0x1FFF; +	err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF); +	err_idx[5] = (ecc3 >> 1) & 0x1FFF; +	err_idx[6] = (ecc3 >> 14) & 0x1FFF; +	err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F); + +	i = 0; +	while (i < num_err) { +		err_idx[i] ^= 3; + +		if (err_idx[i] < 512 * 8) +			__change_bit(err_idx[i], dat); + +		i++; +	} + +	return num_err; +} + +static int fsmc_read_hwecc(struct mtd_info *mtd, +			const u_char *data, u_char *ecc) +{ +	u_int ecc_tmp; +	int timeout = CONFIG_SYS_HZ; +	ulong start; + +	switch (fsmc_version) { +	case FSMC_VER8: +		start = get_timer(0); +		while (get_timer(start) < timeout) { +			/* +			 * Busy waiting for ecc computation +			 * to finish for 512 bytes +			 */ +			if (readl(&fsmc_regs_p->sts) & FSMC_CODE_RDY) +				break; +		} + +		ecc_tmp = readl(&fsmc_regs_p->ecc1); +		ecc[0] = (u_char) (ecc_tmp >> 0); +		ecc[1] = (u_char) (ecc_tmp >> 8); +		ecc[2] = (u_char) (ecc_tmp >> 16); +		ecc[3] = (u_char) (ecc_tmp >> 24); + +		ecc_tmp = readl(&fsmc_regs_p->ecc2); +		ecc[4] = (u_char) (ecc_tmp >> 0); +		ecc[5] = (u_char) (ecc_tmp >> 8); +		ecc[6] = (u_char) (ecc_tmp >> 16); +		ecc[7] = (u_char) (ecc_tmp >> 24); + +		ecc_tmp = readl(&fsmc_regs_p->ecc3); +		ecc[8] = (u_char) (ecc_tmp >> 0); +		ecc[9] = (u_char) (ecc_tmp >> 8); +		ecc[10] = (u_char) (ecc_tmp >> 16); +		ecc[11] = (u_char) (ecc_tmp >> 24); + +		ecc_tmp = readl(&fsmc_regs_p->sts); +		ecc[12] = (u_char) (ecc_tmp >> 16); +		break; + +	default: +		ecc_tmp = readl(&fsmc_regs_p->ecc1); +		ecc[0] = (u_char) (ecc_tmp >> 0); +		ecc[1] = (u_char) (ecc_tmp >> 8); +		ecc[2] = (u_char) (ecc_tmp >> 16); +		break; +	} + +	return 0; +} + +void fsmc_enable_hwecc(struct mtd_info *mtd, int mode) +{ +	writel(readl(&fsmc_regs_p->pc) & ~FSMC_ECCPLEN_256, +			&fsmc_regs_p->pc); +	writel(readl(&fsmc_regs_p->pc) & ~FSMC_ECCEN, +			&fsmc_regs_p->pc); +	writel(readl(&fsmc_regs_p->pc) | FSMC_ECCEN, +			&fsmc_regs_p->pc); +} + +/* + * fsmc_read_page_hwecc + * @mtd:	mtd info structure + * @chip:	nand chip info structure + * @buf:	buffer to store read data + * @oob_required:	caller expects OOB data read to chip->oob_poi + * @page:	page number to read + * + * This routine is needed for fsmc verison 8 as reading from NAND chip has to be + * performed in a strict sequence as follows: + * data(512 byte) -> ecc(13 byte) + * After this read, fsmc hardware generates and reports error data bits(upto a + * max of 8 bits) + */ +static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, +				 uint8_t *buf, int oob_required, int page) +{ +	struct fsmc_eccplace *fsmc_eccpl; +	int i, j, s, stat, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *p = buf; +	uint8_t *ecc_calc = chip->buffers->ecccalc; +	uint8_t *ecc_code = chip->buffers->ecccode; +	int off, len, group = 0; +	uint8_t oob[13] __attribute__ ((aligned (2))); + +	/* Differentiate between small and large page ecc place definitions */ +	if (mtd->writesize == 512) +		fsmc_eccpl = &fsmc_eccpl_sp; +	else +		fsmc_eccpl = &fsmc_eccpl_lp; + +	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { + +		chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); +		chip->ecc.hwctl(mtd, NAND_ECC_READ); +		chip->read_buf(mtd, p, eccsize); + +		for (j = 0; j < eccbytes;) { +			off = fsmc_eccpl->eccplace[group].offset; +			len = fsmc_eccpl->eccplace[group].length; +			group++; + +			/* +			 * length is intentionally kept a higher multiple of 2 +			 * to read at least 13 bytes even in case of 16 bit NAND +			 * devices +			 */ +			if (chip->options & NAND_BUSWIDTH_16) +				len = roundup(len, 2); +			chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); +			chip->read_buf(mtd, oob + j, len); +			j += len; +		} + +		memcpy(&ecc_code[i], oob, 13); +		chip->ecc.calculate(mtd, p, &ecc_calc[i]); + +		stat = chip->ecc.correct(mtd, p, &ecc_code[i], +				&ecc_calc[i]); +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; +	} + +	return 0; +} + +int fsmc_nand_init(struct nand_chip *nand) +{ +	static int chip_nr; +	struct mtd_info *mtd; +	int i; +	u32 peripid2 = readl(&fsmc_regs_p->peripid2); + +	fsmc_version = (peripid2 >> FSMC_REVISION_SHFT) & +		FSMC_REVISION_MSK; + +	writel(readl(&fsmc_regs_p->ctrl) | FSMC_WP, &fsmc_regs_p->ctrl); + +#if defined(CONFIG_SYS_FSMC_NAND_16BIT) +	writel(FSMC_DEVWID_16 | FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON, +			&fsmc_regs_p->pc); +#elif defined(CONFIG_SYS_FSMC_NAND_8BIT) +	writel(FSMC_DEVWID_8 | FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON, +			&fsmc_regs_p->pc); +#else +#error Please define CONFIG_SYS_FSMC_NAND_16BIT or CONFIG_SYS_FSMC_NAND_8BIT +#endif +	writel(readl(&fsmc_regs_p->pc) | FSMC_TCLR_1 | FSMC_TAR_1, +			&fsmc_regs_p->pc); +	writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, +			&fsmc_regs_p->comm); +	writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, +			&fsmc_regs_p->attrib); + +	nand->options = 0; +#if defined(CONFIG_SYS_FSMC_NAND_16BIT) +	nand->options |= NAND_BUSWIDTH_16; +#endif +	nand->ecc.mode = NAND_ECC_HW; +	nand->ecc.size = 512; +	nand->ecc.calculate = fsmc_read_hwecc; +	nand->ecc.hwctl = fsmc_enable_hwecc; +	nand->cmd_ctrl = fsmc_nand_hwcontrol; +	nand->IO_ADDR_R = nand->IO_ADDR_W = +		(void  __iomem *)CONFIG_SYS_NAND_BASE; +	nand->badblockbits = 7; + +	mtd = &nand_info[chip_nr++]; +	mtd->priv = nand; + +	switch (fsmc_version) { +	case FSMC_VER8: +		nand->ecc.bytes = 13; +		nand->ecc.strength = 8; +		nand->ecc.correct = fsmc_bch8_correct_data; +		nand->ecc.read_page = fsmc_read_page_hwecc; +		if (mtd->writesize == 512) +			nand->ecc.layout = &fsmc_ecc4_sp_layout; +		else { +			if (mtd->oobsize == 224) +				nand->ecc.layout = &fsmc_ecc4_224_layout; +			else +				nand->ecc.layout = &fsmc_ecc4_lp_layout; +		} + +		break; +	default: +		nand->ecc.bytes = 3; +		nand->ecc.strength = 1; +		nand->ecc.layout = &fsmc_ecc1_layout; +		nand->ecc.correct = nand_correct_data; +		break; +	} + +	/* Detect NAND chips */ +	if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) +		return -ENXIO; + +	if (nand_scan_tail(mtd)) +		return -ENXIO; + +	for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) +		if (nand_register(i)) +			return -ENXIO; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/jz4740_nand.c b/roms/u-boot/drivers/mtd/nand/jz4740_nand.c new file mode 100644 index 00000000..7a62cc33 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/jz4740_nand.c @@ -0,0 +1,259 @@ +/* + * Platform independend driver for JZ4740. + * + * Copyright (c) 2007 Ingenic Semiconductor Inc. + * Author: <jlwei@ingenic.cn> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ +#include <common.h> + +#include <nand.h> +#include <asm/io.h> +#include <asm/jz4740.h> + +#define JZ_NAND_DATA_ADDR ((void __iomem *)0xB8000000) +#define JZ_NAND_CMD_ADDR (JZ_NAND_DATA_ADDR + 0x8000) +#define JZ_NAND_ADDR_ADDR (JZ_NAND_DATA_ADDR + 0x10000) + +#define BIT(x) (1 << (x)) +#define JZ_NAND_ECC_CTRL_ENCODING	BIT(3) +#define JZ_NAND_ECC_CTRL_RS		BIT(2) +#define JZ_NAND_ECC_CTRL_RESET		BIT(1) +#define JZ_NAND_ECC_CTRL_ENABLE		BIT(0) + +#define EMC_SMCR1_OPT_NAND	0x094c4400 +/* Optimize the timing of nand */ + +static struct jz4740_emc * emc = (struct jz4740_emc *)JZ4740_EMC_BASE; + +static struct nand_ecclayout qi_lb60_ecclayout_2gb = { +	.eccbytes = 72, +	.eccpos = { +		12, 13, 14, 15, 16, 17, 18, 19, +		20, 21, 22, 23, 24, 25, 26, 27, +		28, 29, 30, 31, 32, 33, 34, 35, +		36, 37, 38, 39, 40, 41, 42, 43, +		44, 45, 46, 47, 48, 49, 50, 51, +		52, 53, 54, 55, 56, 57, 58, 59, +		60, 61, 62, 63, 64, 65, 66, 67, +		68, 69, 70, 71, 72, 73, 74, 75, +		76, 77, 78, 79, 80, 81, 82, 83 }, +	.oobfree = { +		{.offset = 2, +		 .length = 10 }, +		{.offset = 84, +		 .length = 44 } } +}; + +static int is_reading; + +static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	struct nand_chip *this = mtd->priv; +	uint32_t reg; + +	if (ctrl & NAND_CTRL_CHANGE) { +		if (ctrl & NAND_ALE) +			this->IO_ADDR_W = JZ_NAND_ADDR_ADDR; +		else if (ctrl & NAND_CLE) +			this->IO_ADDR_W = JZ_NAND_CMD_ADDR; +		else +			this->IO_ADDR_W = JZ_NAND_DATA_ADDR; + +		reg = readl(&emc->nfcsr); +		if (ctrl & NAND_NCE) +			reg |= EMC_NFCSR_NFCE1; +		else +			reg &= ~EMC_NFCSR_NFCE1; +		writel(reg, &emc->nfcsr); +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, this->IO_ADDR_W); +} + +static int jz_nand_device_ready(struct mtd_info *mtd) +{ +	return (readl(GPIO_PXPIN(2)) & 0x40000000) ? 1 : 0; +} + +void board_nand_select_device(struct nand_chip *nand, int chip) +{ +	/* +	 * Don't use "chip" to address the NAND device, +	 * generate the cs from the address where it is encoded. +	 */ +} + +static int jz_nand_rs_calculate_ecc(struct mtd_info *mtd, const u_char *dat, +				u_char *ecc_code) +{ +	uint32_t status; +	int i; + +	if (is_reading) +		return 0; + +	do { +		status = readl(&emc->nfints); +	} while (!(status & EMC_NFINTS_ENCF)); + +	/* disable ecc */ +	writel(readl(&emc->nfecr) & ~EMC_NFECR_ECCE, &emc->nfecr); + +	for (i = 0; i < 9; i++) +		ecc_code[i] = readb(&emc->nfpar[i]); + +	return 0; +} + +static void jz_nand_hwctl(struct mtd_info *mtd, int mode) +{ +	uint32_t reg; + +	writel(0, &emc->nfints); +	reg = readl(&emc->nfecr); +	reg |= JZ_NAND_ECC_CTRL_RESET; +	reg |= JZ_NAND_ECC_CTRL_ENABLE; +	reg |= JZ_NAND_ECC_CTRL_RS; + +	switch (mode) { +	case NAND_ECC_READ: +		reg &= ~JZ_NAND_ECC_CTRL_ENCODING; +		is_reading = 1; +		break; +	case NAND_ECC_WRITE: +		reg |= JZ_NAND_ECC_CTRL_ENCODING; +		is_reading = 0; +		break; +	default: +		break; +	} + +	writel(reg, &emc->nfecr); +} + +/* Correct 1~9-bit errors in 512-bytes data */ +static void jz_rs_correct(unsigned char *dat, int idx, int mask) +{ +	int i; + +	idx--; + +	i = idx + (idx >> 3); +	if (i >= 512) +		return; + +	mask <<= (idx & 0x7); + +	dat[i] ^= mask & 0xff; +	if (i < 511) +		dat[i + 1] ^= (mask >> 8) & 0xff; +} + +static int jz_nand_rs_correct_data(struct mtd_info *mtd, u_char *dat, +				   u_char *read_ecc, u_char *calc_ecc) +{ +	int k; +	uint32_t errcnt, index, mask, status; + +	/* Set PAR values */ +	const uint8_t all_ff_ecc[] = { +		0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f }; + +	if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && +	    read_ecc[2] == 0xff && read_ecc[3] == 0xff && +	    read_ecc[4] == 0xff && read_ecc[5] == 0xff && +	    read_ecc[6] == 0xff && read_ecc[7] == 0xff && +	    read_ecc[8] == 0xff) { +		for (k = 0; k < 9; k++) +			writeb(all_ff_ecc[k], &emc->nfpar[k]); +	} else { +		for (k = 0; k < 9; k++) +			writeb(read_ecc[k], &emc->nfpar[k]); +	} +	/* Set PRDY */ +	writel(readl(&emc->nfecr) | EMC_NFECR_PRDY, &emc->nfecr); + +	/* Wait for completion */ +	do { +		status = readl(&emc->nfints); +	} while (!(status & EMC_NFINTS_DECF)); + +	/* disable ecc */ +	writel(readl(&emc->nfecr) & ~EMC_NFECR_ECCE, &emc->nfecr); + +	/* Check decoding */ +	if (!(status & EMC_NFINTS_ERR)) +		return 0; + +	if (status & EMC_NFINTS_UNCOR) { +		printf("uncorrectable ecc\n"); +		return -1; +	} + +	errcnt = (status & EMC_NFINTS_ERRCNT_MASK) >> EMC_NFINTS_ERRCNT_BIT; + +	switch (errcnt) { +	case 4: +		index = (readl(&emc->nferr[3]) & EMC_NFERR_INDEX_MASK) >> +			EMC_NFERR_INDEX_BIT; +		mask = (readl(&emc->nferr[3]) & EMC_NFERR_MASK_MASK) >> +			EMC_NFERR_MASK_BIT; +		jz_rs_correct(dat, index, mask); +	case 3: +		index = (readl(&emc->nferr[2]) & EMC_NFERR_INDEX_MASK) >> +			EMC_NFERR_INDEX_BIT; +		mask = (readl(&emc->nferr[2]) & EMC_NFERR_MASK_MASK) >> +			EMC_NFERR_MASK_BIT; +		jz_rs_correct(dat, index, mask); +	case 2: +		index = (readl(&emc->nferr[1]) & EMC_NFERR_INDEX_MASK) >> +			EMC_NFERR_INDEX_BIT; +		mask = (readl(&emc->nferr[1]) & EMC_NFERR_MASK_MASK) >> +			EMC_NFERR_MASK_BIT; +		jz_rs_correct(dat, index, mask); +	case 1: +		index = (readl(&emc->nferr[0]) & EMC_NFERR_INDEX_MASK) >> +			EMC_NFERR_INDEX_BIT; +		mask = (readl(&emc->nferr[0]) & EMC_NFERR_MASK_MASK) >> +			EMC_NFERR_MASK_BIT; +		jz_rs_correct(dat, index, mask); +	default: +		break; +	} + +	return errcnt; +} + +/* + * Main initialization routine + */ +int board_nand_init(struct nand_chip *nand) +{ +	uint32_t reg; + +	reg = readl(&emc->nfcsr); +	reg |= EMC_NFCSR_NFE1;	/* EMC setup, Set NFE bit */ +	writel(reg, &emc->nfcsr); + +	writel(EMC_SMCR1_OPT_NAND, &emc->smcr[1]); + +	nand->IO_ADDR_R		= JZ_NAND_DATA_ADDR; +	nand->IO_ADDR_W		= JZ_NAND_DATA_ADDR; +	nand->cmd_ctrl		= jz_nand_cmd_ctrl; +	nand->dev_ready		= jz_nand_device_ready; +	nand->ecc.hwctl		= jz_nand_hwctl; +	nand->ecc.correct	= jz_nand_rs_correct_data; +	nand->ecc.calculate	= jz_nand_rs_calculate_ecc; +	nand->ecc.mode		= NAND_ECC_HW_OOB_FIRST; +	nand->ecc.size		= CONFIG_SYS_NAND_ECCSIZE; +	nand->ecc.bytes		= CONFIG_SYS_NAND_ECCBYTES; +	nand->ecc.strength	= 4; +	nand->ecc.layout	= &qi_lb60_ecclayout_2gb; +	nand->chip_delay	= 50; +	nand->bbt_options	|= NAND_BBT_USE_FLASH; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/kb9202_nand.c b/roms/u-boot/drivers/mtd/nand/kb9202_nand.c new file mode 100644 index 00000000..22c56254 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/kb9202_nand.c @@ -0,0 +1,134 @@ +/* + * (C) Copyright 2006 + * KwikByte <kb9200_dev@kwikbyte.com> + * + * (C) Copyright 2009 + * Matthias Kaehlcke <matthias@kaehlcke.net> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/arch/AT91RM9200.h> +#include <asm/arch/hardware.h> + +#include <nand.h> + +/* + *      hardware specific access to control-lines + */ + +#define MASK_ALE        (1 << 22)       /* our ALE is A22 */ +#define MASK_CLE        (1 << 21)       /* our CLE is A21 */ + +#define KB9202_NAND_NCE (1 << 28) /* EN* on D28 */ +#define KB9202_NAND_BUSY (1 << 29) /* RB* on D29 */ + +#define KB9202_SMC2_NWS (1 << 2) +#define KB9202_SMC2_TDF (1 << 8) +#define KB9202_SMC2_RWSETUP (1 << 24) +#define KB9202_SMC2_RWHOLD (1 << 29) + +/* + *	Board-specific function to access device control signals + */ +static void kb9202_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	struct nand_chip *this = mtd->priv; + +	if (ctrl & NAND_CTRL_CHANGE) { +		ulong IO_ADDR_W = (ulong) this->IO_ADDR_W; + +		/* clear ALE and CLE bits */ +		IO_ADDR_W &= ~(MASK_ALE | MASK_CLE); + +		if (ctrl & NAND_CLE) +			IO_ADDR_W |= MASK_CLE; + +		if (ctrl & NAND_ALE) +			IO_ADDR_W |= MASK_ALE; + +		this->IO_ADDR_W = (void *) IO_ADDR_W; + +		if (ctrl & NAND_NCE) +			writel(KB9202_NAND_NCE, AT91C_PIOC_CODR); +		else +			writel(KB9202_NAND_NCE, AT91C_PIOC_SODR); +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, this->IO_ADDR_W); +} + + +/* + * Board-specific function to access the device ready signal. + */ +static int kb9202_nand_ready(struct mtd_info *mtd) +{ +	return readl(AT91C_PIOC_PDSR) & KB9202_NAND_BUSY; +} + + +/* + * Board-specific NAND init.  Copied from include/linux/mtd/nand.h for reference. + * + * struct nand_chip - NAND Private Flash Chip Data + * @IO_ADDR_R:		[BOARDSPECIFIC] address to read the 8 I/O lines of the flash device + * @IO_ADDR_W:		[BOARDSPECIFIC] address to write the 8 I/O lines of the flash device + * @hwcontrol:		[BOARDSPECIFIC] hardwarespecific function for accesing control-lines + * @dev_ready:		[BOARDSPECIFIC] hardwarespecific function for accesing device ready/busy line + *			If set to NULL no access to ready/busy is available and the ready/busy information + *			is read from the chip status register + * @enable_hwecc:	[BOARDSPECIFIC] function to enable (reset) hardware ecc generator. Must only + *			be provided if a hardware ECC is available + * @eccmode:		[BOARDSPECIFIC] mode of ecc, see defines + * @chip_delay:		[BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) + * @options:		[BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about + *			special functionality. See the defines for further explanation +*/ +/* + * This routine initializes controller and GPIOs. + */ +int board_nand_init(struct nand_chip *nand) +{ +	unsigned int value; + +	nand->ecc.mode = NAND_ECC_SOFT; +	nand->cmd_ctrl = kb9202_nand_hwcontrol; +	nand->dev_ready = kb9202_nand_ready; + +	/* in case running outside of bootloader */ +	writel(1 << AT91C_ID_PIOC, AT91C_PMC_PCER); + +	/* setup nand flash access (allow ample margin) */ +	/* 4 wait states, 1 setup, 1 hold, 1 float for 8-bit device */ +	writel(AT91C_SMC2_WSEN | KB9202_SMC2_NWS | KB9202_SMC2_TDF | +		AT91C_SMC2_DBW_8 | KB9202_SMC2_RWSETUP | KB9202_SMC2_RWHOLD, +		AT91C_SMC_CSR3); + +	/* enable internal NAND controller */ +	value = readl(AT91C_EBI_CSA); +	value |= AT91C_EBI_CS3A_SMC_SmartMedia; +	writel(value, AT91C_EBI_CSA); + +	/* enable SMOE/SMWE */ +	writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_ASR); +	writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_PDR); +	writel(AT91C_PC1_BFRDY_SMOE | AT91C_PC3_BFBAA_SMWE, AT91C_PIOC_OER); + +	/* set NCE to high */ +	writel(KB9202_NAND_NCE, AT91C_PIOC_SODR); + +	/* disable output on pin connected to the busy line of the NAND */ +	writel(KB9202_NAND_BUSY, AT91C_PIOC_ODR); + +	/* enable the PIO to control NCE and BUSY */ +	writel(KB9202_NAND_NCE | KB9202_NAND_BUSY, AT91C_PIOC_PER); + +	/* enable output for NCE */ +	writel(KB9202_NAND_NCE, AT91C_PIOC_OER); + +	return (0); +} diff --git a/roms/u-boot/drivers/mtd/nand/kirkwood_nand.c b/roms/u-boot/drivers/mtd/nand/kirkwood_nand.c new file mode 100644 index 00000000..72687a1d --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/kirkwood_nand.c @@ -0,0 +1,70 @@ +/* + * (C) Copyright 2009 + * Marvell Semiconductor <www.marvell.com> + * Written-by: Prafulla Wadaskar <prafulla@marvell.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/arch/kirkwood.h> +#include <nand.h> + +/* NAND Flash Soc registers */ +struct kwnandf_registers { +	u32 rd_params;	/* 0x10418 */ +	u32 wr_param;	/* 0x1041c */ +	u8  pad[0x10470 - 0x1041c - 4]; +	u32 ctrl;	/* 0x10470 */ +}; + +static struct kwnandf_registers *nf_reg = +	(struct kwnandf_registers *)KW_NANDF_BASE; + +/* + * hardware specific access to control-lines/bits + */ +#define NAND_ACTCEBOOT_BIT		0x02 + +static void kw_nand_hwcontrol(struct mtd_info *mtd, int cmd, +			      unsigned int ctrl) +{ +	struct nand_chip *nc = mtd->priv; +	u32 offs; + +	if (cmd == NAND_CMD_NONE) +		return; + +	if (ctrl & NAND_CLE) +		offs = (1 << 0);	/* Commands with A[1:0] == 01 */ +	else if (ctrl & NAND_ALE) +		offs = (1 << 1);	/* Addresses with A[1:0] == 10 */ +	else +		return; + +	writeb(cmd, nc->IO_ADDR_W + offs); +} + +void kw_nand_select_chip(struct mtd_info *mtd, int chip) +{ +	u32 data; + +	data = readl(&nf_reg->ctrl); +	data |= NAND_ACTCEBOOT_BIT; +	writel(data, &nf_reg->ctrl); +} + +int board_nand_init(struct nand_chip *nand) +{ +	nand->options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING; +#if defined(CONFIG_NAND_ECC_BCH) +	nand->ecc.mode = NAND_ECC_SOFT_BCH; +#else +	nand->ecc.mode = NAND_ECC_SOFT; +#endif +	nand->cmd_ctrl = kw_nand_hwcontrol; +	nand->chip_delay = 40; +	nand->select_chip = kw_nand_select_chip; +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/kmeter1_nand.c b/roms/u-boot/drivers/mtd/nand/kmeter1_nand.c new file mode 100644 index 00000000..df0bde57 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/kmeter1_nand.c @@ -0,0 +1,123 @@ +/* + * (C) Copyright 2009 + * Heiko Schocher, DENX Software Engineering, hs@denx.de + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <asm/io.h> + +#define CONFIG_NAND_MODE_REG	(void *)(CONFIG_SYS_NAND_BASE + 0x20000) +#define CONFIG_NAND_DATA_REG	(void *)(CONFIG_SYS_NAND_BASE + 0x30000) + +#define read_mode()	in_8(CONFIG_NAND_MODE_REG) +#define write_mode(val)	out_8(CONFIG_NAND_MODE_REG, val) +#define read_data()	in_8(CONFIG_NAND_DATA_REG) +#define write_data(val)	out_8(CONFIG_NAND_DATA_REG, val) + +#define KPN_RDY2	(1 << 7) +#define KPN_RDY1	(1 << 6) +#define KPN_WPN		(1 << 4) +#define KPN_CE2N	(1 << 3) +#define KPN_CE1N	(1 << 2) +#define KPN_ALE		(1 << 1) +#define KPN_CLE		(1 << 0) + +#define KPN_DEFAULT_CHIP_DELAY 50 + +static int kpn_chip_ready(void) +{ +	if (read_mode() & KPN_RDY1) +		return 1; + +	return 0; +} + +static void kpn_wait_rdy(void) +{ +	int cnt = 1000000; + +	while (--cnt && !kpn_chip_ready()) +		udelay(1); + +	if (!cnt) +		printf ("timeout while waiting for RDY\n"); +} + +static void kpn_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	u8 reg_val = read_mode(); + +	if (ctrl & NAND_CTRL_CHANGE) { +		reg_val = reg_val & ~(KPN_ALE + KPN_CLE); + +		if (ctrl & NAND_CLE) +			reg_val = reg_val | KPN_CLE; +		if (ctrl & NAND_ALE) +			reg_val = reg_val | KPN_ALE; +		if (ctrl & NAND_NCE) +			reg_val = reg_val & ~KPN_CE1N; +		else +			reg_val = reg_val | KPN_CE1N; + +		write_mode(reg_val); +	} +	if (cmd != NAND_CMD_NONE) +		write_data(cmd); + +	/* wait until flash is ready */ +	kpn_wait_rdy(); +} + +static u_char kpn_nand_read_byte(struct mtd_info *mtd) +{ +	return read_data(); +} + +static void kpn_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) +{ +	int i; + +	for (i = 0; i < len; i++) { +		write_data(buf[i]); +		kpn_wait_rdy(); +	} +} + +static void kpn_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ +	int i; + +	for (i = 0; i < len; i++) +		buf[i] = read_data(); +} + +static int kpn_nand_dev_ready(struct mtd_info *mtd) +{ +	kpn_wait_rdy(); + +	return 1; +} + +int board_nand_init(struct nand_chip *nand) +{ +#if defined(CONFIG_NAND_ECC_BCH) +	nand->ecc.mode = NAND_ECC_SOFT_BCH; +#else +	nand->ecc.mode = NAND_ECC_SOFT; +#endif + +	/* Reference hardware control function */ +	nand->cmd_ctrl  = kpn_nand_hwcontrol; +	nand->read_byte  = kpn_nand_read_byte; +	nand->write_buf  = kpn_nand_write_buf; +	nand->read_buf   = kpn_nand_read_buf; +	nand->dev_ready  = kpn_nand_dev_ready; +	nand->chip_delay = KPN_DEFAULT_CHIP_DELAY; + +	/* reset mode register */ +	write_mode(KPN_CE1N + KPN_CE2N + KPN_WPN); +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/mpc5121_nfc.c b/roms/u-boot/drivers/mtd/nand/mpc5121_nfc.c new file mode 100644 index 00000000..d0f3a353 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/mpc5121_nfc.c @@ -0,0 +1,681 @@ +/* + * Copyright 2004-2008 Freescale Semiconductor, Inc. + * Copyright 2009 Semihalf. + * (C) Copyright 2009 Stefan Roese <sr@denx.de> + * + * Based on original driver from Freescale Semiconductor + * written by John Rigby <jrigby@freescale.com> on basis + * of drivers/mtd/nand/mxc_nand.c. Reworked and extended + * Piotr Ziecik <kosmo@semihalf.com>. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <malloc.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/compat.h> + +#include <asm/errno.h> +#include <asm/io.h> +#include <asm/processor.h> +#include <nand.h> + +#define DRV_NAME		"mpc5121_nfc" + +/* Timeouts */ +#define NFC_RESET_TIMEOUT	1000	/* 1 ms */ +#define NFC_TIMEOUT		2000	/* 2000 us */ + +/* Addresses for NFC MAIN RAM BUFFER areas */ +#define NFC_MAIN_AREA(n)	((n) *  0x200) + +/* Addresses for NFC SPARE BUFFER areas */ +#define NFC_SPARE_BUFFERS	8 +#define NFC_SPARE_LEN		0x40 +#define NFC_SPARE_AREA(n)	(0x1000 + ((n) * NFC_SPARE_LEN)) + +/* MPC5121 NFC registers */ +#define NFC_BUF_ADDR		0x1E04 +#define NFC_FLASH_ADDR		0x1E06 +#define NFC_FLASH_CMD		0x1E08 +#define NFC_CONFIG		0x1E0A +#define NFC_ECC_STATUS1		0x1E0C +#define NFC_ECC_STATUS2		0x1E0E +#define NFC_SPAS		0x1E10 +#define NFC_WRPROT		0x1E12 +#define NFC_NF_WRPRST		0x1E18 +#define NFC_CONFIG1		0x1E1A +#define NFC_CONFIG2		0x1E1C +#define NFC_UNLOCKSTART_BLK0	0x1E20 +#define NFC_UNLOCKEND_BLK0	0x1E22 +#define NFC_UNLOCKSTART_BLK1	0x1E24 +#define NFC_UNLOCKEND_BLK1	0x1E26 +#define NFC_UNLOCKSTART_BLK2	0x1E28 +#define NFC_UNLOCKEND_BLK2	0x1E2A +#define NFC_UNLOCKSTART_BLK3	0x1E2C +#define NFC_UNLOCKEND_BLK3	0x1E2E + +/* Bit Definitions: NFC_BUF_ADDR */ +#define NFC_RBA_MASK		(7 << 0) +#define NFC_ACTIVE_CS_SHIFT	5 +#define NFC_ACTIVE_CS_MASK	(3 << NFC_ACTIVE_CS_SHIFT) + +/* Bit Definitions: NFC_CONFIG */ +#define NFC_BLS_UNLOCKED	(1 << 1) + +/* Bit Definitions: NFC_CONFIG1 */ +#define NFC_ECC_4BIT		(1 << 0) +#define NFC_FULL_PAGE_DMA	(1 << 1) +#define NFC_SPARE_ONLY		(1 << 2) +#define NFC_ECC_ENABLE		(1 << 3) +#define NFC_INT_MASK		(1 << 4) +#define NFC_BIG_ENDIAN		(1 << 5) +#define NFC_RESET		(1 << 6) +#define NFC_CE			(1 << 7) +#define NFC_ONE_CYCLE		(1 << 8) +#define NFC_PPB_32		(0 << 9) +#define NFC_PPB_64		(1 << 9) +#define NFC_PPB_128		(2 << 9) +#define NFC_PPB_256		(3 << 9) +#define NFC_PPB_MASK		(3 << 9) +#define NFC_FULL_PAGE_INT	(1 << 11) + +/* Bit Definitions: NFC_CONFIG2 */ +#define NFC_COMMAND		(1 << 0) +#define NFC_ADDRESS		(1 << 1) +#define NFC_INPUT		(1 << 2) +#define NFC_OUTPUT		(1 << 3) +#define NFC_ID			(1 << 4) +#define NFC_STATUS		(1 << 5) +#define NFC_CMD_FAIL		(1 << 15) +#define NFC_INT			(1 << 15) + +/* Bit Definitions: NFC_WRPROT */ +#define NFC_WPC_LOCK_TIGHT	(1 << 0) +#define NFC_WPC_LOCK		(1 << 1) +#define NFC_WPC_UNLOCK		(1 << 2) + +struct mpc5121_nfc_prv { +	struct mtd_info mtd; +	struct nand_chip chip; +	int irq; +	void __iomem *regs; +	struct clk *clk; +	uint column; +	int spareonly; +	int chipsel; +}; + +int mpc5121_nfc_chip = 0; + +static void mpc5121_nfc_done(struct mtd_info *mtd); + +/* Read NFC register */ +static inline u16 nfc_read(struct mtd_info *mtd, uint reg) +{ +	struct nand_chip *chip = mtd->priv; +	struct mpc5121_nfc_prv *prv = chip->priv; + +	return in_be16(prv->regs + reg); +} + +/* Write NFC register */ +static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val) +{ +	struct nand_chip *chip = mtd->priv; +	struct mpc5121_nfc_prv *prv = chip->priv; + +	out_be16(prv->regs + reg, val); +} + +/* Set bits in NFC register */ +static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits) +{ +	nfc_write(mtd, reg, nfc_read(mtd, reg) | bits); +} + +/* Clear bits in NFC register */ +static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits) +{ +	nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits); +} + +/* Invoke address cycle */ +static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr) +{ +	nfc_write(mtd, NFC_FLASH_ADDR, addr); +	nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS); +	mpc5121_nfc_done(mtd); +} + +/* Invoke command cycle */ +static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd) +{ +	nfc_write(mtd, NFC_FLASH_CMD, cmd); +	nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND); +	mpc5121_nfc_done(mtd); +} + +/* Send data from NFC buffers to NAND flash */ +static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd) +{ +	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); +	nfc_write(mtd, NFC_CONFIG2, NFC_INPUT); +	mpc5121_nfc_done(mtd); +} + +/* Receive data from NAND flash */ +static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd) +{ +	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); +	nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT); +	mpc5121_nfc_done(mtd); +} + +/* Receive ID from NAND flash */ +static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd) +{ +	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); +	nfc_write(mtd, NFC_CONFIG2, NFC_ID); +	mpc5121_nfc_done(mtd); +} + +/* Receive status from NAND flash */ +static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd) +{ +	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK); +	nfc_write(mtd, NFC_CONFIG2, NFC_STATUS); +	mpc5121_nfc_done(mtd); +} + +static void mpc5121_nfc_done(struct mtd_info *mtd) +{ +	int max_retries = NFC_TIMEOUT; + +	while (1) { +		max_retries--; +		if (nfc_read(mtd, NFC_CONFIG2) & NFC_INT) +			break; +		udelay(1); +	} + +	if (max_retries <= 0) +		printk(KERN_WARNING DRV_NAME +		       ": Timeout while waiting for completion.\n"); +} + +/* Do address cycle(s) */ +static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page) +{ +	struct nand_chip *chip = mtd->priv; +	u32 pagemask = chip->pagemask; + +	if (column != -1) { +		mpc5121_nfc_send_addr(mtd, column); +		if (mtd->writesize > 512) +			mpc5121_nfc_send_addr(mtd, column >> 8); +	} + +	if (page != -1) { +		do { +			mpc5121_nfc_send_addr(mtd, page & 0xFF); +			page >>= 8; +			pagemask >>= 8; +		} while (pagemask); +	} +} + +/* Control chip select signals */ + +/* + * Selecting the active device: + * + * This is different than the linux version. Switching between chips + * is done via board_nand_select_device(). The Linux select_chip + * function used here in U-Boot has only 2 valid chip numbers: + * 	0 select + * 	-1 deselect + */ + +/* + * Implement it as a weak default, so that boards with a specific + * chip-select routine can use their own function. + */ +void __mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip) +{ +	if (chip < 0) { +		nfc_clear(mtd, NFC_CONFIG1, NFC_CE); +		return; +	} + +	nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK); +	nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) & +		NFC_ACTIVE_CS_MASK); +	nfc_set(mtd, NFC_CONFIG1, NFC_CE); +} +void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip) +	__attribute__((weak, alias("__mpc5121_nfc_select_chip"))); + +void board_nand_select_device(struct nand_chip *nand, int chip) +{ +	/* +	 * Only save this chip number in global variable here. This +	 * will be used later in mpc5121_nfc_select_chip(). +	 */ +	mpc5121_nfc_chip = chip; +} + +/* Read NAND Ready/Busy signal */ +static int mpc5121_nfc_dev_ready(struct mtd_info *mtd) +{ +	/* +	 * NFC handles ready/busy signal internally. Therefore, this function +	 * always returns status as ready. +	 */ +	return 1; +} + +/* Write command to NAND flash */ +static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command, +				int column, int page) +{ +	struct nand_chip *chip = mtd->priv; +	struct mpc5121_nfc_prv *prv = chip->priv; + +	prv->column = (column >= 0) ? column : 0; +	prv->spareonly = 0; + +	switch (command) { +	case NAND_CMD_PAGEPROG: +		mpc5121_nfc_send_prog_page(mtd); +		break; +		/* +		 * NFC does not support sub-page reads and writes, +		 * so emulate them using full page transfers. +		 */ +	case NAND_CMD_READ0: +		column = 0; +		break; + +	case NAND_CMD_READ1: +		prv->column += 256; +		command = NAND_CMD_READ0; +		column = 0; +		break; + +	case NAND_CMD_READOOB: +		prv->spareonly = 1; +		command = NAND_CMD_READ0; +		column = 0; +		break; + +	case NAND_CMD_SEQIN: +		mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page); +		column = 0; +		break; + +	case NAND_CMD_ERASE1: +	case NAND_CMD_ERASE2: +	case NAND_CMD_READID: +	case NAND_CMD_STATUS: +	case NAND_CMD_RESET: +		break; + +	default: +		return; +	} + +	mpc5121_nfc_send_cmd(mtd, command); +	mpc5121_nfc_addr_cycle(mtd, column, page); + +	switch (command) { +	case NAND_CMD_READ0: +		if (mtd->writesize > 512) +			mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART); +		mpc5121_nfc_send_read_page(mtd); +		break; + +	case NAND_CMD_READID: +		mpc5121_nfc_send_read_id(mtd); +		break; + +	case NAND_CMD_STATUS: +		mpc5121_nfc_send_read_status(mtd); +		if (chip->options & NAND_BUSWIDTH_16) +			prv->column = 1; +		else +			prv->column = 0; +		break; +	} +} + +/* Copy data from/to NFC spare buffers. */ +static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset, +				   u8 * buffer, uint size, int wr) +{ +	struct nand_chip *nand = mtd->priv; +	struct mpc5121_nfc_prv *prv = nand->priv; +	uint o, s, sbsize, blksize; + +	/* +	 * NAND spare area is available through NFC spare buffers. +	 * The NFC divides spare area into (page_size / 512) chunks. +	 * Each chunk is placed into separate spare memory area, using +	 * first (spare_size / num_of_chunks) bytes of the buffer. +	 * +	 * For NAND device in which the spare area is not divided fully +	 * by the number of chunks, number of used bytes in each spare +	 * buffer is rounded down to the nearest even number of bytes, +	 * and all remaining bytes are added to the last used spare area. +	 * +	 * For more information read section 26.6.10 of MPC5121e +	 * Microcontroller Reference Manual, Rev. 3. +	 */ + +	/* Calculate number of valid bytes in each spare buffer */ +	sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1; + +	while (size) { +		/* Calculate spare buffer number */ +		s = offset / sbsize; +		if (s > NFC_SPARE_BUFFERS - 1) +			s = NFC_SPARE_BUFFERS - 1; + +		/* +		 * Calculate offset to requested data block in selected spare +		 * buffer and its size. +		 */ +		o = offset - (s * sbsize); +		blksize = min(sbsize - o, size); + +		if (wr) +			memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o, +				    buffer, blksize); +		else +			memcpy_fromio(buffer, +				      prv->regs + NFC_SPARE_AREA(s) + o, +				      blksize); + +		buffer += blksize; +		offset += blksize; +		size -= blksize; +	}; +} + +/* Copy data from/to NFC main and spare buffers */ +static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char * buf, int len, +				 int wr) +{ +	struct nand_chip *chip = mtd->priv; +	struct mpc5121_nfc_prv *prv = chip->priv; +	uint c = prv->column; +	uint l; + +	/* Handle spare area access */ +	if (prv->spareonly || c >= mtd->writesize) { +		/* Calculate offset from beginning of spare area */ +		if (c >= mtd->writesize) +			c -= mtd->writesize; + +		prv->column += len; +		mpc5121_nfc_copy_spare(mtd, c, buf, len, wr); +		return; +	} + +	/* +	 * Handle main area access - limit copy length to prevent +	 * crossing main/spare boundary. +	 */ +	l = min((uint) len, mtd->writesize - c); +	prv->column += l; + +	if (wr) +		memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l); +	else +		memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l); + +	/* Handle crossing main/spare boundary */ +	if (l != len) { +		buf += l; +		len -= l; +		mpc5121_nfc_buf_copy(mtd, buf, len, wr); +	} +} + +/* Read data from NFC buffers */ +static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char * buf, int len) +{ +	mpc5121_nfc_buf_copy(mtd, buf, len, 0); +} + +/* Write data to NFC buffers */ +static void mpc5121_nfc_write_buf(struct mtd_info *mtd, +				  const u_char * buf, int len) +{ +	mpc5121_nfc_buf_copy(mtd, (u_char *) buf, len, 1); +} + +/* Compare buffer with NAND flash */ +static int mpc5121_nfc_verify_buf(struct mtd_info *mtd, +				  const u_char * buf, int len) +{ +	u_char tmp[256]; +	uint bsize; + +	while (len) { +		bsize = min(len, 256); +		mpc5121_nfc_read_buf(mtd, tmp, bsize); + +		if (memcmp(buf, tmp, bsize)) +			return 1; + +		buf += bsize; +		len -= bsize; +	} + +	return 0; +} + +/* Read byte from NFC buffers */ +static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd) +{ +	u8 tmp; + +	mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp)); + +	return tmp; +} + +/* Read word from NFC buffers */ +static u16 mpc5121_nfc_read_word(struct mtd_info *mtd) +{ +	u16 tmp; + +	mpc5121_nfc_read_buf(mtd, (u_char *) & tmp, sizeof(tmp)); + +	return tmp; +} + +/* + * Read NFC configuration from Reset Config Word + * + * NFC is configured during reset in basis of information stored + * in Reset Config Word. There is no other way to set NAND block + * size, spare size and bus width. + */ +static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd) +{ +	immap_t *im = (immap_t *)CONFIG_SYS_IMMR; +	struct nand_chip *chip = mtd->priv; +	uint rcw_pagesize = 0; +	uint rcw_sparesize = 0; +	uint rcw_width; +	uint rcwh; +	uint romloc, ps; + +	rcwh = in_be32(&(im->reset.rcwh)); + +	/* Bit 6: NFC bus width */ +	rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1; + +	/* Bit 7: NFC Page/Spare size */ +	ps = (rcwh >> 7) & 0x1; + +	/* Bits [22:21]: ROM Location */ +	romloc = (rcwh >> 21) & 0x3; + +	/* Decode RCW bits */ +	switch ((ps << 2) | romloc) { +	case 0x00: +	case 0x01: +		rcw_pagesize = 512; +		rcw_sparesize = 16; +		break; +	case 0x02: +	case 0x03: +		rcw_pagesize = 4096; +		rcw_sparesize = 128; +		break; +	case 0x04: +	case 0x05: +		rcw_pagesize = 2048; +		rcw_sparesize = 64; +		break; +	case 0x06: +	case 0x07: +		rcw_pagesize = 4096; +		rcw_sparesize = 218; +		break; +	} + +	mtd->writesize = rcw_pagesize; +	mtd->oobsize = rcw_sparesize; +	if (rcw_width == 2) +		chip->options |= NAND_BUSWIDTH_16; + +	debug(KERN_NOTICE DRV_NAME ": Configured for " +	      "%u-bit NAND, page size %u with %u spare.\n", +	      rcw_width * 8, rcw_pagesize, rcw_sparesize); +	return 0; +} + +int board_nand_init(struct nand_chip *chip) +{ +	struct mpc5121_nfc_prv *prv; +	struct mtd_info *mtd; +	int resettime = 0; +	int retval = 0; +	int rev; +	static int chip_nr = 0; + +	/* +	 * Check SoC revision. This driver supports only NFC +	 * in MPC5121 revision 2. +	 */ +	rev = (mfspr(SPRN_SVR) >> 4) & 0xF; +	if (rev != 2) { +		printk(KERN_ERR DRV_NAME +		       ": SoC revision %u is not supported!\n", rev); +		return -ENXIO; +	} + +	prv = malloc(sizeof(*prv)); +	if (!prv) { +		printk(KERN_ERR DRV_NAME ": Memory exhausted!\n"); +		return -ENOMEM; +	} + +	mtd = &nand_info[chip_nr++]; +	mtd->priv = chip; +	chip->priv = prv; + +	/* Read NFC configuration from Reset Config Word */ +	retval = mpc5121_nfc_read_hw_config(mtd); +	if (retval) { +		printk(KERN_ERR DRV_NAME ": Unable to read NFC config!\n"); +		return retval; +	} + +	prv->regs = (void __iomem *)CONFIG_SYS_NAND_BASE; +	chip->dev_ready = mpc5121_nfc_dev_ready; +	chip->cmdfunc = mpc5121_nfc_command; +	chip->read_byte = mpc5121_nfc_read_byte; +	chip->read_word = mpc5121_nfc_read_word; +	chip->read_buf = mpc5121_nfc_read_buf; +	chip->write_buf = mpc5121_nfc_write_buf; +	chip->verify_buf = mpc5121_nfc_verify_buf; +	chip->select_chip = mpc5121_nfc_select_chip; +	chip->bbt_options = NAND_BBT_USE_FLASH; +	chip->ecc.mode = NAND_ECC_SOFT; + +	/* Reset NAND Flash controller */ +	nfc_set(mtd, NFC_CONFIG1, NFC_RESET); +	while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) { +		if (resettime++ >= NFC_RESET_TIMEOUT) { +			printk(KERN_ERR DRV_NAME +			       ": Timeout while resetting NFC!\n"); +			retval = -EINVAL; +			goto error; +		} + +		udelay(1); +	} + +	/* Enable write to NFC memory */ +	nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED); + +	/* Enable write to all NAND pages */ +	nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000); +	nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF); +	nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK); + +	/* +	 * Setup NFC: +	 *      - Big Endian transfers, +	 *      - Interrupt after full page read/write. +	 */ +	nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK | +		  NFC_FULL_PAGE_INT); + +	/* Set spare area size */ +	nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1); + +	/* Detect NAND chips */ +	if (nand_scan(mtd, 1)) { +		printk(KERN_ERR DRV_NAME ": NAND Flash not found !\n"); +		retval = -ENXIO; +		goto error; +	} + +	/* Set erase block size */ +	switch (mtd->erasesize / mtd->writesize) { +	case 32: +		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32); +		break; + +	case 64: +		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64); +		break; + +	case 128: +		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128); +		break; + +	case 256: +		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256); +		break; + +	default: +		printk(KERN_ERR DRV_NAME ": Unsupported NAND flash!\n"); +		retval = -ENXIO; +		goto error; +	} + +	return 0; +error: +	return retval; +} diff --git a/roms/u-boot/drivers/mtd/nand/mxc_nand.c b/roms/u-boot/drivers/mtd/nand/mxc_nand.c new file mode 100644 index 00000000..ed0ca3ac --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/mxc_nand.c @@ -0,0 +1,1342 @@ +/* + * Copyright 2004-2007 Freescale Semiconductor, Inc. + * Copyright 2008 Sascha Hauer, kernel@pengutronix.de + * Copyright 2009 Ilya Yanok, <yanok@emcraft.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <linux/err.h> +#include <asm/io.h> +#if defined(CONFIG_MX25) || defined(CONFIG_MX27) || defined(CONFIG_MX35) || \ +	defined(CONFIG_MX51) || defined(CONFIG_MX53) +#include <asm/arch/imx-regs.h> +#endif +#include "mxc_nand.h" + +#define DRIVER_NAME "mxc_nand" + +struct mxc_nand_host { +	struct mtd_info			mtd; +	struct nand_chip		*nand; + +	struct mxc_nand_regs __iomem	*regs; +#ifdef MXC_NFC_V3_2 +	struct mxc_nand_ip_regs __iomem	*ip_regs; +#endif +	int				spare_only; +	int				status_request; +	int				pagesize_2k; +	int				clk_act; +	uint16_t			col_addr; +	unsigned int			page_addr; +}; + +static struct mxc_nand_host mxc_host; +static struct mxc_nand_host *host = &mxc_host; + +/* Define delays in microsec for NAND device operations */ +#define TROP_US_DELAY   2000 +/* Macros to get byte and bit positions of ECC */ +#define COLPOS(x)  ((x) >> 3) +#define BITPOS(x) ((x) & 0xf) + +/* Define single bit Error positions in Main & Spare area */ +#define MAIN_SINGLEBIT_ERROR 0x4 +#define SPARE_SINGLEBIT_ERROR 0x1 + +/* OOB placement block for use with hardware ecc generation */ +#if defined(MXC_NFC_V1) +#ifndef CONFIG_SYS_NAND_LARGEPAGE +static struct nand_ecclayout nand_hw_eccoob = { +	.eccbytes = 5, +	.eccpos = {6, 7, 8, 9, 10}, +	.oobfree = { {0, 5}, {11, 5}, } +}; +#else +static struct nand_ecclayout nand_hw_eccoob2k = { +	.eccbytes = 20, +	.eccpos = { +		6, 7, 8, 9, 10, +		22, 23, 24, 25, 26, +		38, 39, 40, 41, 42, +		54, 55, 56, 57, 58, +	}, +	.oobfree = { {2, 4}, {11, 11}, {27, 11}, {43, 11}, {59, 5} }, +}; +#endif +#elif defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2) +#ifndef CONFIG_SYS_NAND_LARGEPAGE +static struct nand_ecclayout nand_hw_eccoob = { +	.eccbytes = 9, +	.eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, +	.oobfree = { {2, 5} } +}; +#else +static struct nand_ecclayout nand_hw_eccoob2k = { +	.eccbytes = 36, +	.eccpos = { +		7, 8, 9, 10, 11, 12, 13, 14, 15, +		23, 24, 25, 26, 27, 28, 29, 30, 31, +		39, 40, 41, 42, 43, 44, 45, 46, 47, +		55, 56, 57, 58, 59, 60, 61, 62, 63, +	}, +	.oobfree = { {2, 5}, {16, 7}, {32, 7}, {48, 7} }, +}; +#endif +#endif + +static int is_16bit_nand(void) +{ +#if defined(CONFIG_SYS_NAND_BUSWIDTH_16BIT) +	return 1; +#else +	return 0; +#endif +} + +static uint32_t *mxc_nand_memcpy32(uint32_t *dest, uint32_t *source, size_t size) +{ +	uint32_t *d = dest; + +	size >>= 2; +	while (size--) +		__raw_writel(__raw_readl(source++), d++); +	return dest; +} + +/* + * This function polls the NANDFC to wait for the basic operation to + * complete by checking the INT bit. + */ +static void wait_op_done(struct mxc_nand_host *host, int max_retries, +				uint16_t param) +{ +	uint32_t tmp; + +	while (max_retries-- > 0) { +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +		tmp = readnfc(&host->regs->config2); +		if (tmp & NFC_V1_V2_CONFIG2_INT) { +			tmp &= ~NFC_V1_V2_CONFIG2_INT; +			writenfc(tmp, &host->regs->config2); +#elif defined(MXC_NFC_V3_2) +		tmp = readnfc(&host->ip_regs->ipc); +		if (tmp & NFC_V3_IPC_INT) { +			tmp &= ~NFC_V3_IPC_INT; +			writenfc(tmp, &host->ip_regs->ipc); +#endif +			break; +		} +		udelay(1); +	} +	if (max_retries < 0) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s(%d): INT not set\n", +				__func__, param); +	} +} + +/* + * This function issues the specified command to the NAND device and + * waits for completion. + */ +static void send_cmd(struct mxc_nand_host *host, uint16_t cmd) +{ +	MTDDEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x)\n", cmd); + +	writenfc(cmd, &host->regs->flash_cmd); +	writenfc(NFC_CMD, &host->regs->operation); + +	/* Wait for operation to complete */ +	wait_op_done(host, TROP_US_DELAY, cmd); +} + +/* + * This function sends an address (or partial address) to the + * NAND device. The address is used to select the source/destination for + * a NAND command. + */ +static void send_addr(struct mxc_nand_host *host, uint16_t addr) +{ +	MTDDEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x)\n", addr); + +	writenfc(addr, &host->regs->flash_addr); +	writenfc(NFC_ADDR, &host->regs->operation); + +	/* Wait for operation to complete */ +	wait_op_done(host, TROP_US_DELAY, addr); +} + +/* + * This function requests the NANDFC to initiate the transfer + * of data currently in the NANDFC RAM buffer to the NAND device. + */ +static void send_prog_page(struct mxc_nand_host *host, uint8_t buf_id, +			int spare_only) +{ +	if (spare_only) +		MTDDEBUG(MTD_DEBUG_LEVEL1, "send_prog_page (%d)\n", spare_only); + +	if (is_mxc_nfc_21() || is_mxc_nfc_32()) { +		int i; +		/* +		 *  The controller copies the 64 bytes of spare data from +		 *  the first 16 bytes of each of the 4 64 byte spare buffers. +		 *  Copy the contiguous data starting in spare_area[0] to +		 *  the four spare area buffers. +		 */ +		for (i = 1; i < 4; i++) { +			void __iomem *src = &host->regs->spare_area[0][i * 16]; +			void __iomem *dst = &host->regs->spare_area[i][0]; + +			mxc_nand_memcpy32(dst, src, 16); +		} +	} + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	writenfc(buf_id, &host->regs->buf_addr); +#elif defined(MXC_NFC_V3_2) +	uint32_t tmp = readnfc(&host->regs->config1); +	tmp &= ~NFC_V3_CONFIG1_RBA_MASK; +	tmp |= NFC_V3_CONFIG1_RBA(buf_id); +	writenfc(tmp, &host->regs->config1); +#endif + +	/* Configure spare or page+spare access */ +	if (!host->pagesize_2k) { +		uint32_t config1 = readnfc(&host->regs->config1); +		if (spare_only) +			config1 |= NFC_CONFIG1_SP_EN; +		else +			config1 &= ~NFC_CONFIG1_SP_EN; +		writenfc(config1, &host->regs->config1); +	} + +	writenfc(NFC_INPUT, &host->regs->operation); + +	/* Wait for operation to complete */ +	wait_op_done(host, TROP_US_DELAY, spare_only); +} + +/* + * Requests NANDFC to initiate the transfer of data from the + * NAND device into in the NANDFC ram buffer. + */ +static void send_read_page(struct mxc_nand_host *host, uint8_t buf_id, +		int spare_only) +{ +	MTDDEBUG(MTD_DEBUG_LEVEL3, "send_read_page (%d)\n", spare_only); + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	writenfc(buf_id, &host->regs->buf_addr); +#elif defined(MXC_NFC_V3_2) +	uint32_t tmp = readnfc(&host->regs->config1); +	tmp &= ~NFC_V3_CONFIG1_RBA_MASK; +	tmp |= NFC_V3_CONFIG1_RBA(buf_id); +	writenfc(tmp, &host->regs->config1); +#endif + +	/* Configure spare or page+spare access */ +	if (!host->pagesize_2k) { +		uint32_t config1 = readnfc(&host->regs->config1); +		if (spare_only) +			config1 |= NFC_CONFIG1_SP_EN; +		else +			config1 &= ~NFC_CONFIG1_SP_EN; +		writenfc(config1, &host->regs->config1); +	} + +	writenfc(NFC_OUTPUT, &host->regs->operation); + +	/* Wait for operation to complete */ +	wait_op_done(host, TROP_US_DELAY, spare_only); + +	if (is_mxc_nfc_21() || is_mxc_nfc_32()) { +		int i; + +		/* +		 *  The controller copies the 64 bytes of spare data to +		 *  the first 16 bytes of each of the 4 spare buffers. +		 *  Make the data contiguous starting in spare_area[0]. +		 */ +		for (i = 1; i < 4; i++) { +			void __iomem *src = &host->regs->spare_area[i][0]; +			void __iomem *dst = &host->regs->spare_area[0][i * 16]; + +			mxc_nand_memcpy32(dst, src, 16); +		} +	} +} + +/* Request the NANDFC to perform a read of the NAND device ID. */ +static void send_read_id(struct mxc_nand_host *host) +{ +	uint32_t tmp; + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	/* NANDFC buffer 0 is used for device ID output */ +	writenfc(0x0, &host->regs->buf_addr); +#elif defined(MXC_NFC_V3_2) +	tmp = readnfc(&host->regs->config1); +	tmp &= ~NFC_V3_CONFIG1_RBA_MASK; +	writenfc(tmp, &host->regs->config1); +#endif + +	/* Read ID into main buffer */ +	tmp = readnfc(&host->regs->config1); +	tmp &= ~NFC_CONFIG1_SP_EN; +	writenfc(tmp, &host->regs->config1); + +	writenfc(NFC_ID, &host->regs->operation); + +	/* Wait for operation to complete */ +	wait_op_done(host, TROP_US_DELAY, 0); +} + +/* + * This function requests the NANDFC to perform a read of the + * NAND device status and returns the current status. + */ +static uint16_t get_dev_status(struct mxc_nand_host *host) +{ +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	void __iomem *main_buf = host->regs->main_area[1]; +	uint32_t store; +#endif +	uint32_t ret, tmp; +	/* Issue status request to NAND device */ + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	/* store the main area1 first word, later do recovery */ +	store = readl(main_buf); +	/* NANDFC buffer 1 is used for device status */ +	writenfc(1, &host->regs->buf_addr); +#endif + +	/* Read status into main buffer */ +	tmp = readnfc(&host->regs->config1); +	tmp &= ~NFC_CONFIG1_SP_EN; +	writenfc(tmp, &host->regs->config1); + +	writenfc(NFC_STATUS, &host->regs->operation); + +	/* Wait for operation to complete */ +	wait_op_done(host, TROP_US_DELAY, 0); + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	/* +	 *  Status is placed in first word of main buffer +	 * get status, then recovery area 1 data +	 */ +	ret = readw(main_buf); +	writel(store, main_buf); +#elif defined(MXC_NFC_V3_2) +	ret = readnfc(&host->regs->config1) >> 16; +#endif + +	return ret; +} + +/* This function is used by upper layer to checks if device is ready */ +static int mxc_nand_dev_ready(struct mtd_info *mtd) +{ +	/* +	 * NFC handles R/B internally. Therefore, this function +	 * always returns status as ready. +	 */ +	return 1; +} + +static void _mxc_nand_enable_hwecc(struct mtd_info *mtd, int on) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	uint16_t tmp = readnfc(&host->regs->config1); + +	if (on) +		tmp |= NFC_V1_V2_CONFIG1_ECC_EN; +	else +		tmp &= ~NFC_V1_V2_CONFIG1_ECC_EN; +	writenfc(tmp, &host->regs->config1); +#elif defined(MXC_NFC_V3_2) +	uint32_t tmp = readnfc(&host->ip_regs->config2); + +	if (on) +		tmp |= NFC_V3_CONFIG2_ECC_EN; +	else +		tmp &= ~NFC_V3_CONFIG2_ECC_EN; +	writenfc(tmp, &host->ip_regs->config2); +#endif +} + +#ifdef CONFIG_MXC_NAND_HWECC +static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode) +{ +	/* +	 * If HW ECC is enabled, we turn it on during init. There is +	 * no need to enable again here. +	 */ +} + +#if defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2) +static int mxc_nand_read_oob_syndrome(struct mtd_info *mtd, +				      struct nand_chip *chip, +				      int page) +{ +	struct mxc_nand_host *host = chip->priv; +	uint8_t *buf = chip->oob_poi; +	int length = mtd->oobsize; +	int eccpitch = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; +	uint8_t *bufpoi = buf; +	int i, toread; + +	MTDDEBUG(MTD_DEBUG_LEVEL0, +			"%s: Reading OOB area of page %u to oob %p\n", +			 __func__, page, buf); + +	chip->cmdfunc(mtd, NAND_CMD_READOOB, mtd->writesize, page); +	for (i = 0; i < chip->ecc.steps; i++) { +		toread = min_t(int, length, chip->ecc.prepad); +		if (toread) { +			chip->read_buf(mtd, bufpoi, toread); +			bufpoi += toread; +			length -= toread; +		} +		bufpoi += chip->ecc.bytes; +		host->col_addr += chip->ecc.bytes; +		length -= chip->ecc.bytes; + +		toread = min_t(int, length, chip->ecc.postpad); +		if (toread) { +			chip->read_buf(mtd, bufpoi, toread); +			bufpoi += toread; +			length -= toread; +		} +	} +	if (length > 0) +		chip->read_buf(mtd, bufpoi, length); + +	_mxc_nand_enable_hwecc(mtd, 0); +	chip->cmdfunc(mtd, NAND_CMD_READOOB, +			mtd->writesize + chip->ecc.prepad, page); +	bufpoi = buf + chip->ecc.prepad; +	length = mtd->oobsize - chip->ecc.prepad; +	for (i = 0; i < chip->ecc.steps; i++) { +		toread = min_t(int, length, chip->ecc.bytes); +		chip->read_buf(mtd, bufpoi, toread); +		bufpoi += eccpitch; +		length -= eccpitch; +		host->col_addr += chip->ecc.postpad + chip->ecc.prepad; +	} +	_mxc_nand_enable_hwecc(mtd, 1); +	return 1; +} + +static int mxc_nand_read_page_raw_syndrome(struct mtd_info *mtd, +					   struct nand_chip *chip, +					   uint8_t *buf, +					   int oob_required, +					   int page) +{ +	struct mxc_nand_host *host = chip->priv; +	int eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad; +	uint8_t *oob = chip->oob_poi; +	int steps, size; +	int n; + +	_mxc_nand_enable_hwecc(mtd, 0); +	chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); + +	for (n = 0, steps = chip->ecc.steps; steps > 0; n++, steps--) { +		host->col_addr = n * eccsize; +		chip->read_buf(mtd, buf, eccsize); +		buf += eccsize; + +		host->col_addr = mtd->writesize + n * eccpitch; +		if (chip->ecc.prepad) { +			chip->read_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		chip->read_buf(mtd, oob, eccbytes); +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->read_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	size = mtd->oobsize - (oob - chip->oob_poi); +	if (size) +		chip->read_buf(mtd, oob, size); +	_mxc_nand_enable_hwecc(mtd, 1); + +	return 0; +} + +static int mxc_nand_read_page_syndrome(struct mtd_info *mtd, +				       struct nand_chip *chip, +				       uint8_t *buf, +				       int oob_required, +				       int page) +{ +	struct mxc_nand_host *host = chip->priv; +	int n, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad; +	int eccsteps = chip->ecc.steps; +	uint8_t *p = buf; +	uint8_t *oob = chip->oob_poi; + +	MTDDEBUG(MTD_DEBUG_LEVEL1, "Reading page %u to buf %p oob %p\n", +	      page, buf, oob); + +	/* first read the data area and the available portion of OOB */ +	for (n = 0; eccsteps; n++, eccsteps--, p += eccsize) { +		int stat; + +		host->col_addr = n * eccsize; + +		chip->read_buf(mtd, p, eccsize); + +		host->col_addr = mtd->writesize + n * eccpitch; + +		if (chip->ecc.prepad) { +			chip->read_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		stat = chip->ecc.correct(mtd, p, oob, NULL); + +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->read_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	/* Calculate remaining oob bytes */ +	n = mtd->oobsize - (oob - chip->oob_poi); +	if (n) +		chip->read_buf(mtd, oob, n); + +	/* Then switch ECC off and read the OOB area to get the ECC code */ +	_mxc_nand_enable_hwecc(mtd, 0); +	chip->cmdfunc(mtd, NAND_CMD_READOOB, mtd->writesize, page); +	eccsteps = chip->ecc.steps; +	oob = chip->oob_poi + chip->ecc.prepad; +	for (n = 0; eccsteps; n++, eccsteps--, p += eccsize) { +		host->col_addr = mtd->writesize + +				 n * eccpitch + +				 chip->ecc.prepad; +		chip->read_buf(mtd, oob, eccbytes); +		oob += eccbytes + chip->ecc.postpad; +	} +	_mxc_nand_enable_hwecc(mtd, 1); +	return 0; +} + +static int mxc_nand_write_oob_syndrome(struct mtd_info *mtd, +				       struct nand_chip *chip, int page) +{ +	struct mxc_nand_host *host = chip->priv; +	int eccpitch = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; +	int length = mtd->oobsize; +	int i, len, status, steps = chip->ecc.steps; +	const uint8_t *bufpoi = chip->oob_poi; + +	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); +	for (i = 0; i < steps; i++) { +		len = min_t(int, length, eccpitch); + +		chip->write_buf(mtd, bufpoi, len); +		bufpoi += len; +		length -= len; +		host->col_addr += chip->ecc.prepad + chip->ecc.postpad; +	} +	if (length > 0) +		chip->write_buf(mtd, bufpoi, length); + +	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); +	status = chip->waitfunc(mtd, chip); +	return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +static int mxc_nand_write_page_raw_syndrome(struct mtd_info *mtd, +					     struct nand_chip *chip, +					     const uint8_t *buf, +					     int oob_required) +{ +	struct mxc_nand_host *host = chip->priv; +	int eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad; +	uint8_t *oob = chip->oob_poi; +	int steps, size; +	int n; + +	for (n = 0, steps = chip->ecc.steps; steps > 0; n++, steps--) { +		host->col_addr = n * eccsize; +		chip->write_buf(mtd, buf, eccsize); +		buf += eccsize; + +		host->col_addr = mtd->writesize + n * eccpitch; + +		if (chip->ecc.prepad) { +			chip->write_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		host->col_addr += eccbytes; +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->write_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	size = mtd->oobsize - (oob - chip->oob_poi); +	if (size) +		chip->write_buf(mtd, oob, size); +	return 0; +} + +static int mxc_nand_write_page_syndrome(struct mtd_info *mtd, +					 struct nand_chip *chip, +					 const uint8_t *buf, +					 int oob_required) +{ +	struct mxc_nand_host *host = chip->priv; +	int i, n, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccpitch = eccbytes + chip->ecc.prepad + chip->ecc.postpad; +	int eccsteps = chip->ecc.steps; +	const uint8_t *p = buf; +	uint8_t *oob = chip->oob_poi; + +	chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + +	for (i = n = 0; +	     eccsteps; +	     n++, eccsteps--, i += eccbytes, p += eccsize) { +		host->col_addr = n * eccsize; + +		chip->write_buf(mtd, p, eccsize); + +		host->col_addr = mtd->writesize + n * eccpitch; + +		if (chip->ecc.prepad) { +			chip->write_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		chip->write_buf(mtd, oob, eccbytes); +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->write_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	/* Calculate remaining oob bytes */ +	i = mtd->oobsize - (oob - chip->oob_poi); +	if (i) +		chip->write_buf(mtd, oob, i); +	return 0; +} + +static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat, +				 u_char *read_ecc, u_char *calc_ecc) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; +	uint32_t ecc_status = readl(&host->regs->ecc_status_result); +	int subpages = mtd->writesize / nand_chip->subpagesize; +	int pg2blk_shift = nand_chip->phys_erase_shift - +			   nand_chip->page_shift; + +	do { +		if ((ecc_status & 0xf) > 4) { +			static int last_bad = -1; + +			if (last_bad != host->page_addr >> pg2blk_shift) { +				last_bad = host->page_addr >> pg2blk_shift; +				printk(KERN_DEBUG +				       "MXC_NAND: HWECC uncorrectable ECC error" +				       " in block %u page %u subpage %d\n", +				       last_bad, host->page_addr, +				       mtd->writesize / nand_chip->subpagesize +					    - subpages); +			} +			return -1; +		} +		ecc_status >>= 4; +		subpages--; +	} while (subpages > 0); + +	return 0; +} +#else +#define mxc_nand_read_page_syndrome NULL +#define mxc_nand_read_page_raw_syndrome NULL +#define mxc_nand_read_oob_syndrome NULL +#define mxc_nand_write_page_syndrome NULL +#define mxc_nand_write_page_raw_syndrome NULL +#define mxc_nand_write_oob_syndrome NULL + +static int mxc_nand_correct_data(struct mtd_info *mtd, u_char *dat, +				 u_char *read_ecc, u_char *calc_ecc) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; + +	/* +	 * 1-Bit errors are automatically corrected in HW.  No need for +	 * additional correction.  2-Bit errors cannot be corrected by +	 * HW ECC, so we need to return failure +	 */ +	uint16_t ecc_status = readnfc(&host->regs->ecc_status_result); + +	if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, +		      "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); +		return -1; +	} + +	return 0; +} +#endif + +static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, +				  u_char *ecc_code) +{ +	return 0; +} +#endif + +static u_char mxc_nand_read_byte(struct mtd_info *mtd) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; +	uint8_t ret = 0; +	uint16_t col; +	uint16_t __iomem *main_buf = +		(uint16_t __iomem *)host->regs->main_area[0]; +	uint16_t __iomem *spare_buf = +		(uint16_t __iomem *)host->regs->spare_area[0]; +	union { +		uint16_t word; +		uint8_t bytes[2]; +	} nfc_word; + +	/* Check for status request */ +	if (host->status_request) +		return get_dev_status(host) & 0xFF; + +	/* Get column for 16-bit access */ +	col = host->col_addr >> 1; + +	/* If we are accessing the spare region */ +	if (host->spare_only) +		nfc_word.word = readw(&spare_buf[col]); +	else +		nfc_word.word = readw(&main_buf[col]); + +	/* Pick upper/lower byte of word from RAM buffer */ +	ret = nfc_word.bytes[host->col_addr & 0x1]; + +	/* Update saved column address */ +	if (nand_chip->options & NAND_BUSWIDTH_16) +		host->col_addr += 2; +	else +		host->col_addr++; + +	return ret; +} + +static uint16_t mxc_nand_read_word(struct mtd_info *mtd) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; +	uint16_t col, ret; +	uint16_t __iomem *p; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, +	      "mxc_nand_read_word(col = %d)\n", host->col_addr); + +	col = host->col_addr; +	/* Adjust saved column address */ +	if (col < mtd->writesize && host->spare_only) +		col += mtd->writesize; + +	if (col < mtd->writesize) { +		p = (uint16_t __iomem *)(host->regs->main_area[0] + +				(col >> 1)); +	} else { +		p = (uint16_t __iomem *)(host->regs->spare_area[0] + +				((col - mtd->writesize) >> 1)); +	} + +	if (col & 1) { +		union { +			uint16_t word; +			uint8_t bytes[2]; +		} nfc_word[3]; + +		nfc_word[0].word = readw(p); +		nfc_word[1].word = readw(p + 1); + +		nfc_word[2].bytes[0] = nfc_word[0].bytes[1]; +		nfc_word[2].bytes[1] = nfc_word[1].bytes[0]; + +		ret = nfc_word[2].word; +	} else { +		ret = readw(p); +	} + +	/* Update saved column address */ +	host->col_addr = col + 2; + +	return ret; +} + +/* + * Write data of length len to buffer buf. The data to be + * written on NAND Flash is first copied to RAMbuffer. After the Data Input + * Operation by the NFC, the data is written to NAND Flash + */ +static void mxc_nand_write_buf(struct mtd_info *mtd, +				const u_char *buf, int len) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; +	int n, col, i = 0; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, +	      "mxc_nand_write_buf(col = %d, len = %d)\n", host->col_addr, +	      len); + +	col = host->col_addr; + +	/* Adjust saved column address */ +	if (col < mtd->writesize && host->spare_only) +		col += mtd->writesize; + +	n = mtd->writesize + mtd->oobsize - col; +	n = min(len, n); + +	MTDDEBUG(MTD_DEBUG_LEVEL3, +	      "%s:%d: col = %d, n = %d\n", __func__, __LINE__, col, n); + +	while (n > 0) { +		void __iomem *p; + +		if (col < mtd->writesize) { +			p = host->regs->main_area[0] + (col & ~3); +		} else { +			p = host->regs->spare_area[0] - +						mtd->writesize + (col & ~3); +		} + +		MTDDEBUG(MTD_DEBUG_LEVEL3, "%s:%d: p = %p\n", __func__, +		      __LINE__, p); + +		if (((col | (unsigned long)&buf[i]) & 3) || n < 4) { +			union { +				uint32_t word; +				uint8_t bytes[4]; +			} nfc_word; + +			nfc_word.word = readl(p); +			nfc_word.bytes[col & 3] = buf[i++]; +			n--; +			col++; + +			writel(nfc_word.word, p); +		} else { +			int m = mtd->writesize - col; + +			if (col >= mtd->writesize) +				m += mtd->oobsize; + +			m = min(n, m) & ~3; + +			MTDDEBUG(MTD_DEBUG_LEVEL3, +			      "%s:%d: n = %d, m = %d, i = %d, col = %d\n", +			      __func__,  __LINE__, n, m, i, col); + +			mxc_nand_memcpy32(p, (uint32_t *)&buf[i], m); +			col += m; +			i += m; +			n -= m; +		} +	} +	/* Update saved column address */ +	host->col_addr = col; +} + +/* + * Read the data buffer from the NAND Flash. To read the data from NAND + * Flash first the data output cycle is initiated by the NFC, which copies + * the data to RAMbuffer. This data of length len is then copied to buffer buf. + */ +static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; +	int n, col, i = 0; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, +	      "mxc_nand_read_buf(col = %d, len = %d)\n", host->col_addr, len); + +	col = host->col_addr; + +	/* Adjust saved column address */ +	if (col < mtd->writesize && host->spare_only) +		col += mtd->writesize; + +	n = mtd->writesize + mtd->oobsize - col; +	n = min(len, n); + +	while (n > 0) { +		void __iomem *p; + +		if (col < mtd->writesize) { +			p = host->regs->main_area[0] + (col & ~3); +		} else { +			p = host->regs->spare_area[0] - +					mtd->writesize + (col & ~3); +		} + +		if (((col | (int)&buf[i]) & 3) || n < 4) { +			union { +				uint32_t word; +				uint8_t bytes[4]; +			} nfc_word; + +			nfc_word.word = readl(p); +			buf[i++] = nfc_word.bytes[col & 3]; +			n--; +			col++; +		} else { +			int m = mtd->writesize - col; + +			if (col >= mtd->writesize) +				m += mtd->oobsize; + +			m = min(n, m) & ~3; +			mxc_nand_memcpy32((uint32_t *)&buf[i], p, m); + +			col += m; +			i += m; +			n -= m; +		} +	} +	/* Update saved column address */ +	host->col_addr = col; +} + +/* + * Used by the upper layer to verify the data in NAND Flash + * with the data in the buf. + */ +static int mxc_nand_verify_buf(struct mtd_info *mtd, +				const u_char *buf, int len) +{ +	u_char tmp[256]; +	uint bsize; + +	while (len) { +		bsize = min(len, 256); +		mxc_nand_read_buf(mtd, tmp, bsize); + +		if (memcmp(buf, tmp, bsize)) +			return 1; + +		buf += bsize; +		len -= bsize; +	} + +	return 0; +} + +/* + * This function is used by upper layer for select and + * deselect of the NAND chip + */ +static void mxc_nand_select_chip(struct mtd_info *mtd, int chip) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; + +	switch (chip) { +	case -1: +		/* TODO: Disable the NFC clock */ +		if (host->clk_act) +			host->clk_act = 0; +		break; +	case 0: +		/* TODO: Enable the NFC clock */ +		if (!host->clk_act) +			host->clk_act = 1; +		break; + +	default: +		break; +	} +} + +/* + * Used by the upper layer to write command to NAND Flash for + * different operations to be carried out on NAND Flash + */ +void mxc_nand_command(struct mtd_info *mtd, unsigned command, +				int column, int page_addr) +{ +	struct nand_chip *nand_chip = mtd->priv; +	struct mxc_nand_host *host = nand_chip->priv; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, +	      "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", +	      command, column, page_addr); + +	/* Reset command state information */ +	host->status_request = false; + +	/* Command pre-processing step */ +	switch (command) { + +	case NAND_CMD_STATUS: +		host->col_addr = 0; +		host->status_request = true; +		break; + +	case NAND_CMD_READ0: +		host->page_addr = page_addr; +		host->col_addr = column; +		host->spare_only = false; +		break; + +	case NAND_CMD_READOOB: +		host->col_addr = column; +		host->spare_only = true; +		if (host->pagesize_2k) +			command = NAND_CMD_READ0; /* only READ0 is valid */ +		break; + +	case NAND_CMD_SEQIN: +		if (column >= mtd->writesize) { +			/* +			 * before sending SEQIN command for partial write, +			 * we need read one page out. FSL NFC does not support +			 * partial write. It always sends out 512+ecc+512+ecc +			 * for large page nand flash. But for small page nand +			 * flash, it does support SPARE ONLY operation. +			 */ +			if (host->pagesize_2k) { +				/* call ourself to read a page */ +				mxc_nand_command(mtd, NAND_CMD_READ0, 0, +						page_addr); +			} + +			host->col_addr = column - mtd->writesize; +			host->spare_only = true; + +			/* Set program pointer to spare region */ +			if (!host->pagesize_2k) +				send_cmd(host, NAND_CMD_READOOB); +		} else { +			host->spare_only = false; +			host->col_addr = column; + +			/* Set program pointer to page start */ +			if (!host->pagesize_2k) +				send_cmd(host, NAND_CMD_READ0); +		} +		break; + +	case NAND_CMD_PAGEPROG: +		send_prog_page(host, 0, host->spare_only); + +		if (host->pagesize_2k && is_mxc_nfc_1()) { +			/* data in 4 areas */ +			send_prog_page(host, 1, host->spare_only); +			send_prog_page(host, 2, host->spare_only); +			send_prog_page(host, 3, host->spare_only); +		} + +		break; +	} + +	/* Write out the command to the device. */ +	send_cmd(host, command); + +	/* Write out column address, if necessary */ +	if (column != -1) { +		/* +		 * MXC NANDFC can only perform full page+spare or +		 * spare-only read/write. When the upper layers perform +		 * a read/write buffer operation, we will use the saved +		 * column address to index into the full page. +		 */ +		send_addr(host, 0); +		if (host->pagesize_2k) +			/* another col addr cycle for 2k page */ +			send_addr(host, 0); +	} + +	/* Write out page address, if necessary */ +	if (page_addr != -1) { +		u32 page_mask = nand_chip->pagemask; +		do { +			send_addr(host, page_addr & 0xFF); +			page_addr >>= 8; +			page_mask >>= 8; +		} while (page_mask); +	} + +	/* Command post-processing step */ +	switch (command) { + +	case NAND_CMD_RESET: +		break; + +	case NAND_CMD_READOOB: +	case NAND_CMD_READ0: +		if (host->pagesize_2k) { +			/* send read confirm command */ +			send_cmd(host, NAND_CMD_READSTART); +			/* read for each AREA */ +			send_read_page(host, 0, host->spare_only); +			if (is_mxc_nfc_1()) { +				send_read_page(host, 1, host->spare_only); +				send_read_page(host, 2, host->spare_only); +				send_read_page(host, 3, host->spare_only); +			} +		} else { +			send_read_page(host, 0, host->spare_only); +		} +		break; + +	case NAND_CMD_READID: +		host->col_addr = 0; +		send_read_id(host); +		break; + +	case NAND_CMD_PAGEPROG: +		break; + +	case NAND_CMD_STATUS: +		break; + +	case NAND_CMD_ERASE2: +		break; +	} +} + +#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT + +static u8 bbt_pattern[] = {'B', 'b', 't', '0' }; +static u8 mirror_pattern[] = {'1', 't', 'b', 'B' }; + +static struct nand_bbt_descr bbt_main_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | +		   NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, +	.offs =	0, +	.len = 4, +	.veroffs = 4, +	.maxblocks = 4, +	.pattern = bbt_pattern, +}; + +static struct nand_bbt_descr bbt_mirror_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE | +		   NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, +	.offs =	0, +	.len = 4, +	.veroffs = 4, +	.maxblocks = 4, +	.pattern = mirror_pattern, +}; + +#endif + +int board_nand_init(struct nand_chip *this) +{ +	struct mtd_info *mtd; +#if defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2) +	uint32_t tmp; +#endif + +#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT +	this->bbt_options |= NAND_BBT_USE_FLASH; +	this->bbt_td = &bbt_main_descr; +	this->bbt_md = &bbt_mirror_descr; +#endif + +	/* structures must be linked */ +	mtd = &host->mtd; +	mtd->priv = this; +	host->nand = this; + +	/* 5 us command delay time */ +	this->chip_delay = 5; + +	this->priv = host; +	this->dev_ready = mxc_nand_dev_ready; +	this->cmdfunc = mxc_nand_command; +	this->select_chip = mxc_nand_select_chip; +	this->read_byte = mxc_nand_read_byte; +	this->read_word = mxc_nand_read_word; +	this->write_buf = mxc_nand_write_buf; +	this->read_buf = mxc_nand_read_buf; +	this->verify_buf = mxc_nand_verify_buf; + +	host->regs = (struct mxc_nand_regs __iomem *)CONFIG_MXC_NAND_REGS_BASE; +#ifdef MXC_NFC_V3_2 +	host->ip_regs = +		(struct mxc_nand_ip_regs __iomem *)CONFIG_MXC_NAND_IP_REGS_BASE; +#endif +	host->clk_act = 1; + +#ifdef CONFIG_MXC_NAND_HWECC +	this->ecc.calculate = mxc_nand_calculate_ecc; +	this->ecc.hwctl = mxc_nand_enable_hwecc; +	this->ecc.correct = mxc_nand_correct_data; +	if (is_mxc_nfc_21() || is_mxc_nfc_32()) { +		this->ecc.mode = NAND_ECC_HW_SYNDROME; +		this->ecc.read_page = mxc_nand_read_page_syndrome; +		this->ecc.read_page_raw = mxc_nand_read_page_raw_syndrome; +		this->ecc.read_oob = mxc_nand_read_oob_syndrome; +		this->ecc.write_page = mxc_nand_write_page_syndrome; +		this->ecc.write_page_raw = mxc_nand_write_page_raw_syndrome; +		this->ecc.write_oob = mxc_nand_write_oob_syndrome; +		this->ecc.bytes = 9; +		this->ecc.prepad = 7; +	} else { +		this->ecc.mode = NAND_ECC_HW; +	} + +	if (is_mxc_nfc_1()) +		this->ecc.strength = 1; +	else +		this->ecc.strength = 4; + +	host->pagesize_2k = 0; + +	this->ecc.size = 512; +	_mxc_nand_enable_hwecc(mtd, 1); +#else +	this->ecc.layout = &nand_soft_eccoob; +	this->ecc.mode = NAND_ECC_SOFT; +	_mxc_nand_enable_hwecc(mtd, 0); +#endif +	/* Reset NAND */ +	this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + +	/* NAND bus width determines access functions used by upper layer */ +	if (is_16bit_nand()) +		this->options |= NAND_BUSWIDTH_16; + +#ifdef CONFIG_SYS_NAND_LARGEPAGE +	host->pagesize_2k = 1; +	this->ecc.layout = &nand_hw_eccoob2k; +#else +	host->pagesize_2k = 0; +	this->ecc.layout = &nand_hw_eccoob; +#endif + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +#ifdef MXC_NFC_V2_1 +	tmp = readnfc(&host->regs->config1); +	tmp |= NFC_V2_CONFIG1_ONE_CYCLE; +	tmp |= NFC_V2_CONFIG1_ECC_MODE_4; +	writenfc(tmp, &host->regs->config1); +	if (host->pagesize_2k) +		writenfc(64/2, &host->regs->spare_area_size); +	else +		writenfc(16/2, &host->regs->spare_area_size); +#endif + +	/* +	 * preset operation +	 * Unlock the internal RAM Buffer +	 */ +	writenfc(0x2, &host->regs->config); + +	/* Blocks to be unlocked */ +	writenfc(0x0, &host->regs->unlockstart_blkaddr); +	/* Originally (Freescale LTIB 2.6.21) 0x4000 was written to the +	 * unlockend_blkaddr, but the magic 0x4000 does not always work +	 * when writing more than some 32 megabytes (on 2k page nands) +	 * However 0xFFFF doesn't seem to have this kind +	 * of limitation (tried it back and forth several times). +	 * The linux kernel driver sets this to 0xFFFF for the v2 controller +	 * only, but probably this was not tested there for v1. +	 * The very same limitation seems to apply to this kernel driver. +	 * This might be NAND chip specific and the i.MX31 datasheet is +	 * extremely vague about the semantics of this register. +	 */ +	writenfc(0xFFFF, &host->regs->unlockend_blkaddr); + +	/* Unlock Block Command for given address range */ +	writenfc(0x4, &host->regs->wrprot); +#elif defined(MXC_NFC_V3_2) +	writenfc(NFC_V3_CONFIG1_RBA(0), &host->regs->config1); +	writenfc(NFC_V3_IPC_CREQ, &host->ip_regs->ipc); + +	/* Unlock the internal RAM Buffer */ +	writenfc(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK, +			&host->ip_regs->wrprot); + +	/* Blocks to be unlocked */ +	for (tmp = 0; tmp < CONFIG_SYS_NAND_MAX_CHIPS; tmp++) +		writenfc(0x0 | 0xFFFF << 16, +				&host->ip_regs->wrprot_unlock_blkaddr[tmp]); + +	writenfc(0, &host->ip_regs->ipc); + +	tmp = readnfc(&host->ip_regs->config2); +	tmp &= ~(NFC_V3_CONFIG2_SPAS_MASK | NFC_V3_CONFIG2_EDC_MASK | +			NFC_V3_CONFIG2_ECC_MODE_8 | NFC_V3_CONFIG2_PS_MASK); +	tmp |= NFC_V3_CONFIG2_ONE_CYCLE; + +	if (host->pagesize_2k) { +		tmp |= NFC_V3_CONFIG2_SPAS(64/2); +		tmp |= NFC_V3_CONFIG2_PS_2048; +	} else { +		tmp |= NFC_V3_CONFIG2_SPAS(16/2); +		tmp |= NFC_V3_CONFIG2_PS_512; +	} + +	writenfc(tmp, &host->ip_regs->config2); + +	tmp = NFC_V3_CONFIG3_NUM_OF_DEVS(0) | +			NFC_V3_CONFIG3_NO_SDMA | +			NFC_V3_CONFIG3_RBB_MODE | +			NFC_V3_CONFIG3_SBB(6) | /* Reset default */ +			NFC_V3_CONFIG3_ADD_OP(0); + +	if (!(this->options & NAND_BUSWIDTH_16)) +		tmp |= NFC_V3_CONFIG3_FW8; + +	writenfc(tmp, &host->ip_regs->config3); + +	writenfc(0, &host->ip_regs->delay_line); +#endif + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/mxc_nand.h b/roms/u-boot/drivers/mtd/nand/mxc_nand.h new file mode 100644 index 00000000..a02d6e0a --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/mxc_nand.h @@ -0,0 +1,209 @@ +/* + * (c) 2009 Magnus Lilja <lilja.magnus@gmail.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#ifndef __MXC_NAND_H +#define __MXC_NAND_H + +/* + * Register map and bit definitions for the Freescale NAND Flash Controller + * present in various i.MX devices. + * + * MX31 and MX27 have version 1, which has: + *	4 512-byte main buffers and + *	4 16-byte spare buffers + *	to support up to 2K byte pagesize nand. + *	Reading or writing a 2K page requires 4 FDI/FDO cycles. + * + * MX25 and MX35 have version 2.1, and MX51 and MX53 have version 3.2, which + * have: + *	8 512-byte main buffers and + *	8 64-byte spare buffers + *	to support up to 4K byte pagesize nand. + *	Reading or writing a 2K or 4K page requires only 1 FDI/FDO cycle. + *	Also some of registers are moved and/or changed meaning as seen below. + */ +#if defined(CONFIG_MX27) || defined(CONFIG_MX31) +#define MXC_NFC_V1 +#define is_mxc_nfc_1()		1 +#define is_mxc_nfc_21()		0 +#define is_mxc_nfc_32()		0 +#elif defined(CONFIG_MX25) || defined(CONFIG_MX35) +#define MXC_NFC_V2_1 +#define is_mxc_nfc_1()		0 +#define is_mxc_nfc_21()		1 +#define is_mxc_nfc_32()		0 +#elif defined(CONFIG_MX51) || defined(CONFIG_MX53) +#define MXC_NFC_V3 +#define MXC_NFC_V3_2 +#define is_mxc_nfc_1()		0 +#define is_mxc_nfc_21()		0 +#define is_mxc_nfc_32()		1 +#else +#error "MXC NFC implementation not supported" +#endif +#define is_mxc_nfc_3()		is_mxc_nfc_32() + +#if defined(MXC_NFC_V1) +#define NAND_MXC_NR_BUFS		4 +#define NAND_MXC_SPARE_BUF_SIZE		16 +#define NAND_MXC_REG_OFFSET		0xe00 +#define NAND_MXC_2K_MULTI_CYCLE +#elif defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2) +#define NAND_MXC_NR_BUFS		8 +#define NAND_MXC_SPARE_BUF_SIZE		64 +#define NAND_MXC_REG_OFFSET		0x1e00 +#endif + +struct mxc_nand_regs { +	u8 main_area[NAND_MXC_NR_BUFS][0x200]; +	u8 spare_area[NAND_MXC_NR_BUFS][NAND_MXC_SPARE_BUF_SIZE]; +	/* +	 * reserved size is offset of nfc registers +	 * minus total main and spare sizes +	 */ +	u8 reserved1[NAND_MXC_REG_OFFSET +		- NAND_MXC_NR_BUFS * (512 + NAND_MXC_SPARE_BUF_SIZE)]; +#if defined(MXC_NFC_V1) +	u16 buf_size; +	u16 reserved2; +	u16 buf_addr; +	u16 flash_addr; +	u16 flash_cmd; +	u16 config; +	u16 ecc_status_result; +	u16 rsltmain_area; +	u16 rsltspare_area; +	u16 wrprot; +	u16 unlockstart_blkaddr; +	u16 unlockend_blkaddr; +	u16 nf_wrprst; +	u16 config1; +	u16 config2; +#elif defined(MXC_NFC_V2_1) +	u16 reserved2[2]; +	u16 buf_addr; +	u16 flash_addr; +	u16 flash_cmd; +	u16 config; +	u32 ecc_status_result; +	u16 spare_area_size; +	u16 wrprot; +	u16 reserved3[2]; +	u16 nf_wrprst; +	u16 config1; +	u16 config2; +	u16 reserved4; +	u16 unlockstart_blkaddr; +	u16 unlockend_blkaddr; +	u16 unlockstart_blkaddr1; +	u16 unlockend_blkaddr1; +	u16 unlockstart_blkaddr2; +	u16 unlockend_blkaddr2; +	u16 unlockstart_blkaddr3; +	u16 unlockend_blkaddr3; +#elif defined(MXC_NFC_V3_2) +	u32 flash_cmd; +	u32 flash_addr[12]; +	u32 config1; +	u32 ecc_status_result; +	u32 status_sum; +	u32 launch; +#endif +}; + +#ifdef MXC_NFC_V3_2 +struct mxc_nand_ip_regs { +	u32 wrprot; +	u32 wrprot_unlock_blkaddr[8]; +	u32 config2; +	u32 config3; +	u32 ipc; +	u32 err_addr; +	u32 delay_line; +}; +#endif + +/* Set FCMD to 1, rest to 0 for Command operation */ +#define NFC_CMD				0x1 + +/* Set FADD to 1, rest to 0 for Address operation */ +#define NFC_ADDR			0x2 + +/* Set FDI to 1, rest to 0 for Input operation */ +#define NFC_INPUT			0x4 + +/* Set FDO to 001, rest to 0 for Data Output operation */ +#define NFC_OUTPUT			0x8 + +/* Set FDO to 010, rest to 0 for Read ID operation */ +#define NFC_ID				0x10 + +/* Set FDO to 100, rest to 0 for Read Status operation */ +#define NFC_STATUS			0x20 + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +#define NFC_CONFIG1_SP_EN		(1 << 2) +#define NFC_CONFIG1_RST			(1 << 6) +#define NFC_CONFIG1_CE			(1 << 7) +#elif defined(MXC_NFC_V3_2) +#define NFC_CONFIG1_SP_EN		(1 << 0) +#define NFC_CONFIG1_CE			(1 << 1) +#define NFC_CONFIG1_RST			(1 << 2) +#endif +#define NFC_V1_V2_CONFIG1_ECC_EN	(1 << 3) +#define NFC_V1_V2_CONFIG1_INT_MSK	(1 << 4) +#define NFC_V1_V2_CONFIG1_BIG		(1 << 5) +#define NFC_V2_CONFIG1_ECC_MODE_4	(1 << 0) +#define NFC_V2_CONFIG1_ONE_CYCLE	(1 << 8) +#define NFC_V2_CONFIG1_FP_INT		(1 << 11) +#define NFC_V3_CONFIG1_RBA_MASK		(0x7 << 4) +#define NFC_V3_CONFIG1_RBA(x)		(((x) & 0x7) << 4) + +#define NFC_V1_V2_CONFIG2_INT		(1 << 15) +#define NFC_V3_CONFIG2_PS_MASK		(0x3 << 0) +#define NFC_V3_CONFIG2_PS_512		(0 << 0) +#define NFC_V3_CONFIG2_PS_2048		(1 << 0) +#define NFC_V3_CONFIG2_PS_4096		(2 << 0) +#define NFC_V3_CONFIG2_ONE_CYCLE	(1 << 2) +#define NFC_V3_CONFIG2_ECC_EN		(1 << 3) +#define NFC_V3_CONFIG2_2CMD_PHASES	(1 << 4) +#define NFC_V3_CONFIG2_NUM_ADDR_PH0	(1 << 5) +#define NFC_V3_CONFIG2_ECC_MODE_8	(1 << 6) +#define NFC_V3_CONFIG2_PPB_MASK		(0x3 << 7) +#define NFC_V3_CONFIG2_PPB(x)		(((x) & 0x3) << 7) +#define NFC_V3_CONFIG2_EDC_MASK		(0x7 << 9) +#define NFC_V3_CONFIG2_EDC(x)		(((x) & 0x7) << 9) +#define NFC_V3_CONFIG2_NUM_ADDR_PH1(x)	(((x) & 0x3) << 12) +#define NFC_V3_CONFIG2_INT_MSK		(1 << 15) +#define NFC_V3_CONFIG2_SPAS_MASK	(0xff << 16) +#define NFC_V3_CONFIG2_SPAS(x)		(((x) & 0xff) << 16) +#define NFC_V3_CONFIG2_ST_CMD_MASK	(0xff << 24) +#define NFC_V3_CONFIG2_ST_CMD(x)	(((x) & 0xff) << 24) + +#define NFC_V3_CONFIG3_ADD_OP(x)	(((x) & 0x3) << 0) +#define NFC_V3_CONFIG3_FW8		(1 << 3) +#define NFC_V3_CONFIG3_SBB(x)		(((x) & 0x7) << 8) +#define NFC_V3_CONFIG3_NUM_OF_DEVS(x)	(((x) & 0x7) << 12) +#define NFC_V3_CONFIG3_RBB_MODE		(1 << 15) +#define NFC_V3_CONFIG3_NO_SDMA		(1 << 20) + +#define NFC_V3_WRPROT_UNLOCK		(1 << 2) +#define NFC_V3_WRPROT_BLS_UNLOCK	(2 << 6) + +#define NFC_V3_IPC_CREQ			(1 << 0) +#define NFC_V3_IPC_INT			(1 << 31) + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +#define operation	config2 +#define readnfc		readw +#define writenfc	writew +#elif defined(MXC_NFC_V3_2) +#define operation	launch +#define readnfc		readl +#define writenfc	writel +#endif + +#endif /* __MXC_NAND_H */ diff --git a/roms/u-boot/drivers/mtd/nand/mxc_nand_spl.c b/roms/u-boot/drivers/mtd/nand/mxc_nand_spl.c new file mode 100644 index 00000000..69b736a8 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/mxc_nand_spl.c @@ -0,0 +1,351 @@ +/* + * (C) Copyright 2009 + * Magnus Lilja <lilja.magnus@gmail.com> + * + * (C) Copyright 2008 + * Maxim Artamonov, <scn1874 at yandex.ru> + * + * (C) Copyright 2006-2008 + * Stefan Roese, DENX Software Engineering, sr at denx.de. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <asm/arch/imx-regs.h> +#include <asm/io.h> +#include "mxc_nand.h" + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +static struct mxc_nand_regs *const nfc = (void *)NFC_BASE_ADDR; +#elif defined(MXC_NFC_V3_2) +static struct mxc_nand_regs *const nfc = (void *)NFC_BASE_ADDR_AXI; +static struct mxc_nand_ip_regs *const nfc_ip = (void *)NFC_BASE_ADDR; +#endif + +static void nfc_wait_ready(void) +{ +	uint32_t tmp; + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	while (!(readnfc(&nfc->config2) & NFC_V1_V2_CONFIG2_INT)) +		; + +	/* Reset interrupt flag */ +	tmp = readnfc(&nfc->config2); +	tmp &= ~NFC_V1_V2_CONFIG2_INT; +	writenfc(tmp, &nfc->config2); +#elif defined(MXC_NFC_V3_2) +	while (!(readnfc(&nfc_ip->ipc) & NFC_V3_IPC_INT)) +		; + +	/* Reset interrupt flag */ +	tmp = readnfc(&nfc_ip->ipc); +	tmp &= ~NFC_V3_IPC_INT; +	writenfc(tmp, &nfc_ip->ipc); +#endif +} + +static void nfc_nand_init(void) +{ +#if defined(MXC_NFC_V3_2) +	int ecc_per_page = CONFIG_SYS_NAND_PAGE_SIZE / 512; +	int tmp; + +	tmp = (readnfc(&nfc_ip->config2) & ~(NFC_V3_CONFIG2_SPAS_MASK | +			NFC_V3_CONFIG2_EDC_MASK | NFC_V3_CONFIG2_PS_MASK)) | +		NFC_V3_CONFIG2_SPAS(CONFIG_SYS_NAND_OOBSIZE / 2) | +		NFC_V3_CONFIG2_INT_MSK | NFC_V3_CONFIG2_ECC_EN | +		NFC_V3_CONFIG2_ONE_CYCLE; +	if (CONFIG_SYS_NAND_PAGE_SIZE == 4096) +		tmp |= NFC_V3_CONFIG2_PS_4096; +	else if (CONFIG_SYS_NAND_PAGE_SIZE == 2048) +		tmp |= NFC_V3_CONFIG2_PS_2048; +	else if (CONFIG_SYS_NAND_PAGE_SIZE == 512) +		tmp |= NFC_V3_CONFIG2_PS_512; +	/* +	 * if spare size is larger that 16 bytes per 512 byte hunk +	 * then use 8 symbol correction instead of 4 +	 */ +	if (CONFIG_SYS_NAND_OOBSIZE / ecc_per_page > 16) +		tmp |= NFC_V3_CONFIG2_ECC_MODE_8; +	else +		tmp &= ~NFC_V3_CONFIG2_ECC_MODE_8; +	writenfc(tmp, &nfc_ip->config2); + +	tmp = NFC_V3_CONFIG3_NUM_OF_DEVS(0) | +			NFC_V3_CONFIG3_NO_SDMA | +			NFC_V3_CONFIG3_RBB_MODE | +			NFC_V3_CONFIG3_SBB(6) | /* Reset default */ +			NFC_V3_CONFIG3_ADD_OP(0); +#ifndef CONFIG_SYS_NAND_BUSWIDTH_16 +	tmp |= NFC_V3_CONFIG3_FW8; +#endif +	writenfc(tmp, &nfc_ip->config3); + +	writenfc(0, &nfc_ip->delay_line); +#elif defined(MXC_NFC_V2_1) +	int ecc_per_page = CONFIG_SYS_NAND_PAGE_SIZE / 512; +	int config1; + +	writenfc(CONFIG_SYS_NAND_OOBSIZE / 2, &nfc->spare_area_size); + +	/* unlocking RAM Buff */ +	writenfc(0x2, &nfc->config); + +	/* hardware ECC checking and correct */ +	config1 = readnfc(&nfc->config1) | NFC_V1_V2_CONFIG1_ECC_EN | +			NFC_V1_V2_CONFIG1_INT_MSK | NFC_V2_CONFIG1_ONE_CYCLE | +			NFC_V2_CONFIG1_FP_INT; +	/* +	 * if spare size is larger that 16 bytes per 512 byte hunk +	 * then use 8 symbol correction instead of 4 +	 */ +	if (CONFIG_SYS_NAND_OOBSIZE / ecc_per_page > 16) +		config1 &= ~NFC_V2_CONFIG1_ECC_MODE_4; +	else +		config1 |= NFC_V2_CONFIG1_ECC_MODE_4; +	writenfc(config1, &nfc->config1); +#elif defined(MXC_NFC_V1) +	/* unlocking RAM Buff */ +	writenfc(0x2, &nfc->config); + +	/* hardware ECC checking and correct */ +	writenfc(NFC_V1_V2_CONFIG1_ECC_EN | NFC_V1_V2_CONFIG1_INT_MSK, +			&nfc->config1); +#endif +} + +static void nfc_nand_command(unsigned short command) +{ +	writenfc(command, &nfc->flash_cmd); +	writenfc(NFC_CMD, &nfc->operation); +	nfc_wait_ready(); +} + +static void nfc_nand_address(unsigned short address) +{ +	writenfc(address, &nfc->flash_addr); +	writenfc(NFC_ADDR, &nfc->operation); +	nfc_wait_ready(); +} + +static void nfc_nand_page_address(unsigned int page_address) +{ +	unsigned int page_count; + +	nfc_nand_address(0x00); + +	/* code only for large page flash */ +	if (CONFIG_SYS_NAND_PAGE_SIZE > 512) +		nfc_nand_address(0x00); + +	page_count = CONFIG_SYS_NAND_SIZE / CONFIG_SYS_NAND_PAGE_SIZE; + +	if (page_address <= page_count) { +		page_count--; /* transform 0x01000000 to 0x00ffffff */ +		do { +			nfc_nand_address(page_address & 0xff); +			page_address = page_address >> 8; +			page_count = page_count >> 8; +		} while (page_count); +	} + +	nfc_nand_address(0x00); +} + +static void nfc_nand_data_output(void) +{ +#ifdef NAND_MXC_2K_MULTI_CYCLE +	int i; +#endif + +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	writenfc(0, &nfc->buf_addr); +#elif defined(MXC_NFC_V3_2) +	int config1 = readnfc(&nfc->config1); +	config1 &= ~NFC_V3_CONFIG1_RBA_MASK; +	writenfc(config1, &nfc->config1); +#endif +	writenfc(NFC_OUTPUT, &nfc->operation); +	nfc_wait_ready(); +#ifdef NAND_MXC_2K_MULTI_CYCLE +	/* +	 * This NAND controller requires multiple input commands +	 * for pages larger than 512 bytes. +	 */ +	for (i = 1; i < CONFIG_SYS_NAND_PAGE_SIZE / 512; i++) { +		writenfc(i, &nfc->buf_addr); +		writenfc(NFC_OUTPUT, &nfc->operation); +		nfc_wait_ready(); +	} +#endif +} + +static int nfc_nand_check_ecc(void) +{ +#if defined(MXC_NFC_V1) +	u16 ecc_status = readw(&nfc->ecc_status_result); +	return (ecc_status & 0x3) == 2 || (ecc_status >> 2) == 2; +#elif defined(MXC_NFC_V2_1) || defined(MXC_NFC_V3_2) +	u32 ecc_status = readl(&nfc->ecc_status_result); +	int ecc_per_page = CONFIG_SYS_NAND_PAGE_SIZE / 512; +	int err_limit = CONFIG_SYS_NAND_OOBSIZE / ecc_per_page > 16 ? 8 : 4; +	int subpages = CONFIG_SYS_NAND_PAGE_SIZE / 512; + +	do { +		if ((ecc_status & 0xf) > err_limit) +			return 1; +		ecc_status >>= 4; +	} while (--subpages); + +	return 0; +#endif +} + +static void nfc_nand_read_page(unsigned int page_address) +{ +	/* read in first 0 buffer */ +#if defined(MXC_NFC_V1) || defined(MXC_NFC_V2_1) +	writenfc(0, &nfc->buf_addr); +#elif defined(MXC_NFC_V3_2) +	int config1 = readnfc(&nfc->config1); +	config1 &= ~NFC_V3_CONFIG1_RBA_MASK; +	writenfc(config1, &nfc->config1); +#endif +	nfc_nand_command(NAND_CMD_READ0); +	nfc_nand_page_address(page_address); + +	if (CONFIG_SYS_NAND_PAGE_SIZE > 512) +		nfc_nand_command(NAND_CMD_READSTART); + +	nfc_nand_data_output(); /* fill the main buffer 0 */ +} + +static int nfc_read_page(unsigned int page_address, unsigned char *buf) +{ +	int i; +	u32 *src; +	u32 *dst; + +	nfc_nand_read_page(page_address); + +	if (nfc_nand_check_ecc()) +		return -1; + +	src = (u32 *)&nfc->main_area[0][0]; +	dst = (u32 *)buf; + +	/* main copy loop from NAND-buffer to SDRAM memory */ +	for (i = 0; i < CONFIG_SYS_NAND_PAGE_SIZE / 4; i++) { +		writel(readl(src), dst); +		src++; +		dst++; +	} + +	return 0; +} + +static int is_badblock(int pagenumber) +{ +	int page = pagenumber; +	u32 badblock; +	u32 *src; + +	/* Check the first two pages for bad block markers */ +	for (page = pagenumber; page < pagenumber + 2; page++) { +		nfc_nand_read_page(page); + +		src = (u32 *)&nfc->spare_area[0][0]; + +		/* +		 * IMPORTANT NOTE: The nand flash controller uses a non- +		 * standard layout for large page devices. This can +		 * affect the position of the bad block marker. +		 */ +		/* Get the bad block marker */ +		badblock = readl(&src[CONFIG_SYS_NAND_BAD_BLOCK_POS / 4]); +		badblock >>= 8 * (CONFIG_SYS_NAND_BAD_BLOCK_POS % 4); +		badblock &= 0xff; + +		/* bad block marker verify */ +		if (badblock != 0xff) +			return 1; /* potential bad block */ +	} + +	return 0; +} + +int nand_spl_load_image(uint32_t from, unsigned int size, void *buf) +{ +	int i; +	unsigned int page; +	unsigned int maxpages = CONFIG_SYS_NAND_SIZE / +				CONFIG_SYS_NAND_PAGE_SIZE; + +	nfc_nand_init(); + +	/* Convert to page number */ +	page = from / CONFIG_SYS_NAND_PAGE_SIZE; +	i = 0; + +	size = roundup(size, CONFIG_SYS_NAND_PAGE_SIZE); +	while (i < size / CONFIG_SYS_NAND_PAGE_SIZE) { +		if (nfc_read_page(page, buf) < 0) +			return -1; + +		page++; +		i++; +		buf = buf + CONFIG_SYS_NAND_PAGE_SIZE; + +		/* +		 * Check if we have crossed a block boundary, and if so +		 * check for bad block. +		 */ +		if (!(page % CONFIG_SYS_NAND_PAGE_COUNT)) { +			/* +			 * Yes, new block. See if this block is good. If not, +			 * loop until we find a good block. +			 */ +			while (is_badblock(page)) { +				page = page + CONFIG_SYS_NAND_PAGE_COUNT; +				/* Check i we've reached the end of flash. */ +				if (page >= maxpages) +					return -1; +			} +		} +	} + +	return 0; +} + +#ifndef CONFIG_SPL_FRAMEWORK +/* + * The main entry for NAND booting. It's necessary that SDRAM is already + * configured and available since this code loads the main U-Boot image + * from NAND into SDRAM and starts it from there. + */ +void nand_boot(void) +{ +	__attribute__((noreturn)) void (*uboot)(void); + +	/* +	 * CONFIG_SYS_NAND_U_BOOT_OFFS and CONFIG_SYS_NAND_U_BOOT_SIZE must +	 * be aligned to full pages +	 */ +	if (!nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS, +			CONFIG_SYS_NAND_U_BOOT_SIZE, +			(uchar *)CONFIG_SYS_NAND_U_BOOT_DST)) { +		/* Copy from NAND successful, start U-boot */ +		uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START; +		uboot(); +	} else { +		/* Unrecoverable error when copying from NAND */ +		hang(); +	} +} +#endif + +void nand_init(void) {} +void nand_deselect(void) {} diff --git a/roms/u-boot/drivers/mtd/nand/mxs_nand.c b/roms/u-boot/drivers/mtd/nand/mxs_nand.c new file mode 100644 index 00000000..036c113a --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/mxs_nand.c @@ -0,0 +1,1179 @@ +/* + * Freescale i.MX28 NAND flash driver + * + * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> + * on behalf of DENX Software Engineering GmbH + * + * Based on code from LTIB: + * Freescale GPMI NFC NAND Flash Driver + * + * Copyright (C) 2010 Freescale Semiconductor, Inc. + * Copyright (C) 2008 Embedded Alley Solutions, Inc. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/types.h> +#include <malloc.h> +#include <asm/errno.h> +#include <asm/io.h> +#include <asm/arch/clock.h> +#include <asm/arch/imx-regs.h> +#include <asm/imx-common/regs-bch.h> +#include <asm/imx-common/regs-gpmi.h> +#include <asm/arch/sys_proto.h> +#include <asm/imx-common/dma.h> + +#define	MXS_NAND_DMA_DESCRIPTOR_COUNT		4 + +#define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE		512 +#if defined(CONFIG_MX6) +#define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT	2 +#else +#define	MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT	0 +#endif +#define	MXS_NAND_METADATA_SIZE			10 + +#define	MXS_NAND_COMMAND_BUFFER_SIZE		32 + +#define	MXS_NAND_BCH_TIMEOUT			10000 + +struct mxs_nand_info { +	int		cur_chip; + +	uint32_t	cmd_queue_len; +	uint32_t	data_buf_size; + +	uint8_t		*cmd_buf; +	uint8_t		*data_buf; +	uint8_t		*oob_buf; + +	uint8_t		marking_block_bad; +	uint8_t		raw_oob_mode; + +	/* Functions with altered behaviour */ +	int		(*hooked_read_oob)(struct mtd_info *mtd, +				loff_t from, struct mtd_oob_ops *ops); +	int		(*hooked_write_oob)(struct mtd_info *mtd, +				loff_t to, struct mtd_oob_ops *ops); +	int		(*hooked_block_markbad)(struct mtd_info *mtd, +				loff_t ofs); + +	/* DMA descriptors */ +	struct mxs_dma_desc	**desc; +	uint32_t		desc_index; +}; + +struct nand_ecclayout fake_ecc_layout; + +/* + * Cache management functions + */ +#ifndef	CONFIG_SYS_DCACHE_OFF +static void mxs_nand_flush_data_buf(struct mxs_nand_info *info) +{ +	uint32_t addr = (uint32_t)info->data_buf; + +	flush_dcache_range(addr, addr + info->data_buf_size); +} + +static void mxs_nand_inval_data_buf(struct mxs_nand_info *info) +{ +	uint32_t addr = (uint32_t)info->data_buf; + +	invalidate_dcache_range(addr, addr + info->data_buf_size); +} + +static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) +{ +	uint32_t addr = (uint32_t)info->cmd_buf; + +	flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE); +} +#else +static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {} +static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {} +static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {} +#endif + +static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info) +{ +	struct mxs_dma_desc *desc; + +	if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) { +		printf("MXS NAND: Too many DMA descriptors requested\n"); +		return NULL; +	} + +	desc = info->desc[info->desc_index]; +	info->desc_index++; + +	return desc; +} + +static void mxs_nand_return_dma_descs(struct mxs_nand_info *info) +{ +	int i; +	struct mxs_dma_desc *desc; + +	for (i = 0; i < info->desc_index; i++) { +		desc = info->desc[i]; +		memset(desc, 0, sizeof(struct mxs_dma_desc)); +		desc->address = (dma_addr_t)desc; +	} + +	info->desc_index = 0; +} + +static uint32_t mxs_nand_ecc_chunk_cnt(uint32_t page_data_size) +{ +	return page_data_size / MXS_NAND_CHUNK_DATA_CHUNK_SIZE; +} + +static uint32_t mxs_nand_ecc_size_in_bits(uint32_t ecc_strength) +{ +	return ecc_strength * 13; +} + +static uint32_t mxs_nand_aux_status_offset(void) +{ +	return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; +} + +static inline uint32_t mxs_nand_get_ecc_strength(uint32_t page_data_size, +						uint32_t page_oob_size) +{ +	if (page_data_size == 2048) +		return 8; + +	if (page_data_size == 4096) { +		if (page_oob_size == 128) +			return 8; + +		if (page_oob_size == 218) +			return 16; + +		if (page_oob_size == 224) +			return 16; +	} + +	return 0; +} + +static inline uint32_t mxs_nand_get_mark_offset(uint32_t page_data_size, +						uint32_t ecc_strength) +{ +	uint32_t chunk_data_size_in_bits; +	uint32_t chunk_ecc_size_in_bits; +	uint32_t chunk_total_size_in_bits; +	uint32_t block_mark_chunk_number; +	uint32_t block_mark_chunk_bit_offset; +	uint32_t block_mark_bit_offset; + +	chunk_data_size_in_bits = MXS_NAND_CHUNK_DATA_CHUNK_SIZE * 8; +	chunk_ecc_size_in_bits  = mxs_nand_ecc_size_in_bits(ecc_strength); + +	chunk_total_size_in_bits = +			chunk_data_size_in_bits + chunk_ecc_size_in_bits; + +	/* Compute the bit offset of the block mark within the physical page. */ +	block_mark_bit_offset = page_data_size * 8; + +	/* Subtract the metadata bits. */ +	block_mark_bit_offset -= MXS_NAND_METADATA_SIZE * 8; + +	/* +	 * Compute the chunk number (starting at zero) in which the block mark +	 * appears. +	 */ +	block_mark_chunk_number = +			block_mark_bit_offset / chunk_total_size_in_bits; + +	/* +	 * Compute the bit offset of the block mark within its chunk, and +	 * validate it. +	 */ +	block_mark_chunk_bit_offset = block_mark_bit_offset - +			(block_mark_chunk_number * chunk_total_size_in_bits); + +	if (block_mark_chunk_bit_offset > chunk_data_size_in_bits) +		return 1; + +	/* +	 * Now that we know the chunk number in which the block mark appears, +	 * we can subtract all the ECC bits that appear before it. +	 */ +	block_mark_bit_offset -= +		block_mark_chunk_number * chunk_ecc_size_in_bits; + +	return block_mark_bit_offset; +} + +static uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd) +{ +	uint32_t ecc_strength; +	ecc_strength = mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize); +	return mxs_nand_get_mark_offset(mtd->writesize, ecc_strength) >> 3; +} + +static uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd) +{ +	uint32_t ecc_strength; +	ecc_strength = mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize); +	return mxs_nand_get_mark_offset(mtd->writesize, ecc_strength) & 0x7; +} + +/* + * Wait for BCH complete IRQ and clear the IRQ + */ +static int mxs_nand_wait_for_bch_complete(void) +{ +	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; +	int timeout = MXS_NAND_BCH_TIMEOUT; +	int ret; + +	ret = mxs_wait_mask_set(&bch_regs->hw_bch_ctrl_reg, +		BCH_CTRL_COMPLETE_IRQ, timeout); + +	writel(BCH_CTRL_COMPLETE_IRQ, &bch_regs->hw_bch_ctrl_clr); + +	return ret; +} + +/* + * This is the function that we install in the cmd_ctrl function pointer of the + * owning struct nand_chip. The only functions in the reference implementation + * that use these functions pointers are cmdfunc and select_chip. + * + * In this driver, we implement our own select_chip, so this function will only + * be called by the reference implementation's cmdfunc. For this reason, we can + * ignore the chip enable bit and concentrate only on sending bytes to the NAND + * Flash. + */ +static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) +{ +	struct nand_chip *nand = mtd->priv; +	struct mxs_nand_info *nand_info = nand->priv; +	struct mxs_dma_desc *d; +	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; +	int ret; + +	/* +	 * If this condition is true, something is _VERY_ wrong in MTD +	 * subsystem! +	 */ +	if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) { +		printf("MXS NAND: Command queue too long\n"); +		return; +	} + +	/* +	 * Every operation begins with a command byte and a series of zero or +	 * more address bytes. These are distinguished by either the Address +	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being +	 * asserted. When MTD is ready to execute the command, it will +	 * deasert both latch enables. +	 * +	 * Rather than run a separate DMA operation for every single byte, we +	 * queue them up and run a single DMA operation for the entire series +	 * of command and data bytes. +	 */ +	if (ctrl & (NAND_ALE | NAND_CLE)) { +		if (data != NAND_CMD_NONE) +			nand_info->cmd_buf[nand_info->cmd_queue_len++] = data; +		return; +	} + +	/* +	 * If control arrives here, MTD has deasserted both the ALE and CLE, +	 * which means it's ready to run an operation. Check if we have any +	 * bytes to send. +	 */ +	if (nand_info->cmd_queue_len == 0) +		return; + +	/* Compile the DMA descriptor -- a descriptor that sends command. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | +		MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM | +		MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | +		(nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET); + +	d->cmd.address = (dma_addr_t)nand_info->cmd_buf; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_WRITE | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_CLE | +		GPMI_CTRL0_ADDRESS_INCREMENT | +		nand_info->cmd_queue_len; + +	mxs_dma_desc_append(channel, d); + +	/* Flush caches */ +	mxs_nand_flush_cmd_buf(nand_info); + +	/* Execute the DMA chain. */ +	ret = mxs_dma_go(channel); +	if (ret) +		printf("MXS NAND: Error sending command\n"); + +	mxs_nand_return_dma_descs(nand_info); + +	/* Reset the command queue. */ +	nand_info->cmd_queue_len = 0; +} + +/* + * Test if the NAND flash is ready. + */ +static int mxs_nand_device_ready(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct mxs_nand_info *nand_info = chip->priv; +	struct mxs_gpmi_regs *gpmi_regs = +		(struct mxs_gpmi_regs *)MXS_GPMI_BASE; +	uint32_t tmp; + +	tmp = readl(&gpmi_regs->hw_gpmi_stat); +	tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip); + +	return tmp & 1; +} + +/* + * Select the NAND chip. + */ +static void mxs_nand_select_chip(struct mtd_info *mtd, int chip) +{ +	struct nand_chip *nand = mtd->priv; +	struct mxs_nand_info *nand_info = nand->priv; + +	nand_info->cur_chip = chip; +} + +/* + * Handle block mark swapping. + * + * Note that, when this function is called, it doesn't know whether it's + * swapping the block mark, or swapping it *back* -- but it doesn't matter + * because the the operation is the same. + */ +static void mxs_nand_swap_block_mark(struct mtd_info *mtd, +					uint8_t *data_buf, uint8_t *oob_buf) +{ +	uint32_t bit_offset; +	uint32_t buf_offset; + +	uint32_t src; +	uint32_t dst; + +	bit_offset = mxs_nand_mark_bit_offset(mtd); +	buf_offset = mxs_nand_mark_byte_offset(mtd); + +	/* +	 * Get the byte from the data area that overlays the block mark. Since +	 * the ECC engine applies its own view to the bits in the page, the +	 * physical block mark won't (in general) appear on a byte boundary in +	 * the data. +	 */ +	src = data_buf[buf_offset] >> bit_offset; +	src |= data_buf[buf_offset + 1] << (8 - bit_offset); + +	dst = oob_buf[0]; + +	oob_buf[0] = src; + +	data_buf[buf_offset] &= ~(0xff << bit_offset); +	data_buf[buf_offset + 1] &= 0xff << bit_offset; + +	data_buf[buf_offset] |= dst << bit_offset; +	data_buf[buf_offset + 1] |= dst >> (8 - bit_offset); +} + +/* + * Read data from NAND. + */ +static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length) +{ +	struct nand_chip *nand = mtd->priv; +	struct mxs_nand_info *nand_info = nand->priv; +	struct mxs_dma_desc *d; +	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; +	int ret; + +	if (length > NAND_MAX_PAGESIZE) { +		printf("MXS NAND: DMA buffer too big\n"); +		return; +	} + +	if (!buf) { +		printf("MXS NAND: DMA buffer is NULL\n"); +		return; +	} + +	/* Compile the DMA descriptor - a descriptor that reads data. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ | +		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | +		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | +		(length << MXS_DMA_DESC_BYTES_OFFSET); + +	d->cmd.address = (dma_addr_t)nand_info->data_buf; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_READ | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_DATA | +		length; + +	mxs_dma_desc_append(channel, d); + +	/* +	 * A DMA descriptor that waits for the command to end and the chip to +	 * become ready. +	 * +	 * I think we actually should *not* be waiting for the chip to become +	 * ready because, after all, we don't care. I think the original code +	 * did that and no one has re-thought it yet. +	 */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | +		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM | +		MXS_DMA_DESC_WAIT4END | (4 << MXS_DMA_DESC_PIO_WORDS_OFFSET); + +	d->cmd.address = 0; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_DATA; + +	mxs_dma_desc_append(channel, d); + +	/* Execute the DMA chain. */ +	ret = mxs_dma_go(channel); +	if (ret) { +		printf("MXS NAND: DMA read error\n"); +		goto rtn; +	} + +	/* Invalidate caches */ +	mxs_nand_inval_data_buf(nand_info); + +	memcpy(buf, nand_info->data_buf, length); + +rtn: +	mxs_nand_return_dma_descs(nand_info); +} + +/* + * Write data to NAND. + */ +static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, +				int length) +{ +	struct nand_chip *nand = mtd->priv; +	struct mxs_nand_info *nand_info = nand->priv; +	struct mxs_dma_desc *d; +	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; +	int ret; + +	if (length > NAND_MAX_PAGESIZE) { +		printf("MXS NAND: DMA buffer too big\n"); +		return; +	} + +	if (!buf) { +		printf("MXS NAND: DMA buffer is NULL\n"); +		return; +	} + +	memcpy(nand_info->data_buf, buf, length); + +	/* Compile the DMA descriptor - a descriptor that writes data. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | +		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | +		(4 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | +		(length << MXS_DMA_DESC_BYTES_OFFSET); + +	d->cmd.address = (dma_addr_t)nand_info->data_buf; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_WRITE | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_DATA | +		length; + +	mxs_dma_desc_append(channel, d); + +	/* Flush caches */ +	mxs_nand_flush_data_buf(nand_info); + +	/* Execute the DMA chain. */ +	ret = mxs_dma_go(channel); +	if (ret) +		printf("MXS NAND: DMA write error\n"); + +	mxs_nand_return_dma_descs(nand_info); +} + +/* + * Read a single byte from NAND. + */ +static uint8_t mxs_nand_read_byte(struct mtd_info *mtd) +{ +	uint8_t buf; +	mxs_nand_read_buf(mtd, &buf, 1); +	return buf; +} + +/* + * Read a page from NAND. + */ +static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand, +					uint8_t *buf, int oob_required, +					int page) +{ +	struct mxs_nand_info *nand_info = nand->priv; +	struct mxs_dma_desc *d; +	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; +	uint32_t corrected = 0, failed = 0; +	uint8_t	*status; +	int i, ret; + +	/* Compile the DMA descriptor - wait for ready. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | +		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | +		(1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); + +	d->cmd.address = 0; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_DATA; + +	mxs_dma_desc_append(channel, d); + +	/* Compile the DMA descriptor - enable the BCH block and read. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | +		MXS_DMA_DESC_WAIT4END |	(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); + +	d->cmd.address = 0; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_READ | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_DATA | +		(mtd->writesize + mtd->oobsize); +	d->cmd.pio_words[1] = 0; +	d->cmd.pio_words[2] = +		GPMI_ECCCTRL_ENABLE_ECC | +		GPMI_ECCCTRL_ECC_CMD_DECODE | +		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; +	d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize; +	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; +	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; + +	mxs_dma_desc_append(channel, d); + +	/* Compile the DMA descriptor - disable the BCH block. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | +		MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | +		(3 << MXS_DMA_DESC_PIO_WORDS_OFFSET); + +	d->cmd.address = 0; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_DATA | +		(mtd->writesize + mtd->oobsize); +	d->cmd.pio_words[1] = 0; +	d->cmd.pio_words[2] = 0; + +	mxs_dma_desc_append(channel, d); + +	/* Compile the DMA descriptor - deassert the NAND lock and interrupt. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | +		MXS_DMA_DESC_DEC_SEM; + +	d->cmd.address = 0; + +	mxs_dma_desc_append(channel, d); + +	/* Execute the DMA chain. */ +	ret = mxs_dma_go(channel); +	if (ret) { +		printf("MXS NAND: DMA read error\n"); +		goto rtn; +	} + +	ret = mxs_nand_wait_for_bch_complete(); +	if (ret) { +		printf("MXS NAND: BCH read timeout\n"); +		goto rtn; +	} + +	/* Invalidate caches */ +	mxs_nand_inval_data_buf(nand_info); + +	/* Read DMA completed, now do the mark swapping. */ +	mxs_nand_swap_block_mark(mtd, nand_info->data_buf, nand_info->oob_buf); + +	/* Loop over status bytes, accumulating ECC status. */ +	status = nand_info->oob_buf + mxs_nand_aux_status_offset(); +	for (i = 0; i < mxs_nand_ecc_chunk_cnt(mtd->writesize); i++) { +		if (status[i] == 0x00) +			continue; + +		if (status[i] == 0xff) +			continue; + +		if (status[i] == 0xfe) { +			failed++; +			continue; +		} + +		corrected += status[i]; +	} + +	/* Propagate ECC status to the owning MTD. */ +	mtd->ecc_stats.failed += failed; +	mtd->ecc_stats.corrected += corrected; + +	/* +	 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for +	 * details about our policy for delivering the OOB. +	 * +	 * We fill the caller's buffer with set bits, and then copy the block +	 * mark to the caller's buffer. Note that, if block mark swapping was +	 * necessary, it has already been done, so we can rely on the first +	 * byte of the auxiliary buffer to contain the block mark. +	 */ +	memset(nand->oob_poi, 0xff, mtd->oobsize); + +	nand->oob_poi[0] = nand_info->oob_buf[0]; + +	memcpy(buf, nand_info->data_buf, mtd->writesize); + +rtn: +	mxs_nand_return_dma_descs(nand_info); + +	return ret; +} + +/* + * Write a page to NAND. + */ +static int mxs_nand_ecc_write_page(struct mtd_info *mtd, +				struct nand_chip *nand, const uint8_t *buf, +				int oob_required) +{ +	struct mxs_nand_info *nand_info = nand->priv; +	struct mxs_dma_desc *d; +	uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; +	int ret; + +	memcpy(nand_info->data_buf, buf, mtd->writesize); +	memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize); + +	/* Handle block mark swapping. */ +	mxs_nand_swap_block_mark(mtd, nand_info->data_buf, nand_info->oob_buf); + +	/* Compile the DMA descriptor - write data. */ +	d = mxs_nand_get_dma_desc(nand_info); +	d->cmd.data = +		MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | +		MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | +		(6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); + +	d->cmd.address = 0; + +	d->cmd.pio_words[0] = +		GPMI_CTRL0_COMMAND_MODE_WRITE | +		GPMI_CTRL0_WORD_LENGTH | +		(nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | +		GPMI_CTRL0_ADDRESS_NAND_DATA; +	d->cmd.pio_words[1] = 0; +	d->cmd.pio_words[2] = +		GPMI_ECCCTRL_ENABLE_ECC | +		GPMI_ECCCTRL_ECC_CMD_ENCODE | +		GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; +	d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize); +	d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; +	d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; + +	mxs_dma_desc_append(channel, d); + +	/* Flush caches */ +	mxs_nand_flush_data_buf(nand_info); + +	/* Execute the DMA chain. */ +	ret = mxs_dma_go(channel); +	if (ret) { +		printf("MXS NAND: DMA write error\n"); +		goto rtn; +	} + +	ret = mxs_nand_wait_for_bch_complete(); +	if (ret) { +		printf("MXS NAND: BCH write timeout\n"); +		goto rtn; +	} + +rtn: +	mxs_nand_return_dma_descs(nand_info); +	return 0; +} + +/* + * Read OOB from NAND. + * + * This function is a veneer that replaces the function originally installed by + * the NAND Flash MTD code. + */ +static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from, +					struct mtd_oob_ops *ops) +{ +	struct nand_chip *chip = mtd->priv; +	struct mxs_nand_info *nand_info = chip->priv; +	int ret; + +	if (ops->mode == MTD_OPS_RAW) +		nand_info->raw_oob_mode = 1; +	else +		nand_info->raw_oob_mode = 0; + +	ret = nand_info->hooked_read_oob(mtd, from, ops); + +	nand_info->raw_oob_mode = 0; + +	return ret; +} + +/* + * Write OOB to NAND. + * + * This function is a veneer that replaces the function originally installed by + * the NAND Flash MTD code. + */ +static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to, +					struct mtd_oob_ops *ops) +{ +	struct nand_chip *chip = mtd->priv; +	struct mxs_nand_info *nand_info = chip->priv; +	int ret; + +	if (ops->mode == MTD_OPS_RAW) +		nand_info->raw_oob_mode = 1; +	else +		nand_info->raw_oob_mode = 0; + +	ret = nand_info->hooked_write_oob(mtd, to, ops); + +	nand_info->raw_oob_mode = 0; + +	return ret; +} + +/* + * Mark a block bad in NAND. + * + * This function is a veneer that replaces the function originally installed by + * the NAND Flash MTD code. + */ +static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct nand_chip *chip = mtd->priv; +	struct mxs_nand_info *nand_info = chip->priv; +	int ret; + +	nand_info->marking_block_bad = 1; + +	ret = nand_info->hooked_block_markbad(mtd, ofs); + +	nand_info->marking_block_bad = 0; + +	return ret; +} + +/* + * There are several places in this driver where we have to handle the OOB and + * block marks. This is the function where things are the most complicated, so + * this is where we try to explain it all. All the other places refer back to + * here. + * + * These are the rules, in order of decreasing importance: + * + * 1) Nothing the caller does can be allowed to imperil the block mark, so all + *    write operations take measures to protect it. + * + * 2) In read operations, the first byte of the OOB we return must reflect the + *    true state of the block mark, no matter where that block mark appears in + *    the physical page. + * + * 3) ECC-based read operations return an OOB full of set bits (since we never + *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads + *    return). + * + * 4) "Raw" read operations return a direct view of the physical bytes in the + *    page, using the conventional definition of which bytes are data and which + *    are OOB. This gives the caller a way to see the actual, physical bytes + *    in the page, without the distortions applied by our ECC engine. + * + * What we do for this specific read operation depends on whether we're doing + * "raw" read, or an ECC-based read. + * + * It turns out that knowing whether we want an "ECC-based" or "raw" read is not + * easy. When reading a page, for example, the NAND Flash MTD code calls our + * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an + * ECC-based or raw view of the page is implicit in which function it calls + * (there is a similar pair of ECC-based/raw functions for writing). + * + * Since MTD assumes the OOB is not covered by ECC, there is no pair of + * ECC-based/raw functions for reading or or writing the OOB. The fact that the + * caller wants an ECC-based or raw view of the page is not propagated down to + * this driver. + * + * Since our OOB *is* covered by ECC, we need this information. So, we hook the + * ecc.read_oob and ecc.write_oob function pointers in the owning + * struct mtd_info with our own functions. These hook functions set the + * raw_oob_mode field so that, when control finally arrives here, we'll know + * what to do. + */ +static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand, +				int page) +{ +	struct mxs_nand_info *nand_info = nand->priv; + +	/* +	 * First, fill in the OOB buffer. If we're doing a raw read, we need to +	 * get the bytes from the physical page. If we're not doing a raw read, +	 * we need to fill the buffer with set bits. +	 */ +	if (nand_info->raw_oob_mode) { +		/* +		 * If control arrives here, we're doing a "raw" read. Send the +		 * command to read the conventional OOB and read it. +		 */ +		nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); +		nand->read_buf(mtd, nand->oob_poi, mtd->oobsize); +	} else { +		/* +		 * If control arrives here, we're not doing a "raw" read. Fill +		 * the OOB buffer with set bits and correct the block mark. +		 */ +		memset(nand->oob_poi, 0xff, mtd->oobsize); + +		nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); +		mxs_nand_read_buf(mtd, nand->oob_poi, 1); +	} + +	return 0; + +} + +/* + * Write OOB data to NAND. + */ +static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand, +					int page) +{ +	struct mxs_nand_info *nand_info = nand->priv; +	uint8_t block_mark = 0; + +	/* +	 * There are fundamental incompatibilities between the i.MX GPMI NFC and +	 * the NAND Flash MTD model that make it essentially impossible to write +	 * the out-of-band bytes. +	 * +	 * We permit *ONE* exception. If the *intent* of writing the OOB is to +	 * mark a block bad, we can do that. +	 */ + +	if (!nand_info->marking_block_bad) { +		printf("NXS NAND: Writing OOB isn't supported\n"); +		return -EIO; +	} + +	/* Write the block mark. */ +	nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); +	nand->write_buf(mtd, &block_mark, 1); +	nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + +	/* Check if it worked. */ +	if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL) +		return -EIO; + +	return 0; +} + +/* + * Claims all blocks are good. + * + * In principle, this function is *only* called when the NAND Flash MTD system + * isn't allowed to keep an in-memory bad block table, so it is forced to ask + * the driver for bad block information. + * + * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so + * this function is *only* called when we take it away. + * + * Thus, this function is only called when we want *all* blocks to look good, + * so it *always* return success. + */ +static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +{ +	return 0; +} + +/* + * Nominally, the purpose of this function is to look for or create the bad + * block table. In fact, since the we call this function at the very end of + * the initialization process started by nand_scan(), and we doesn't have a + * more formal mechanism, we "hook" this function to continue init process. + * + * At this point, the physical NAND Flash chips have been identified and + * counted, so we know the physical geometry. This enables us to make some + * important configuration decisions. + * + * The return value of this function propogates directly back to this driver's + * call to nand_scan(). Anything other than zero will cause this driver to + * tear everything down and declare failure. + */ +static int mxs_nand_scan_bbt(struct mtd_info *mtd) +{ +	struct nand_chip *nand = mtd->priv; +	struct mxs_nand_info *nand_info = nand->priv; +	struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; +	uint32_t tmp; + +	/* Configure BCH and set NFC geometry */ +	mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); + +	/* Configure layout 0 */ +	tmp = (mxs_nand_ecc_chunk_cnt(mtd->writesize) - 1) +		<< BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; +	tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; +	tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) +		<< BCH_FLASHLAYOUT0_ECC0_OFFSET; +	tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE +		>> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; +	writel(tmp, &bch_regs->hw_bch_flash0layout0); + +	tmp = (mtd->writesize + mtd->oobsize) +		<< BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; +	tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) +		<< BCH_FLASHLAYOUT1_ECCN_OFFSET; +	tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE +		>> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; +	writel(tmp, &bch_regs->hw_bch_flash0layout1); + +	/* Set *all* chip selects to use layout 0 */ +	writel(0, &bch_regs->hw_bch_layoutselect); + +	/* Enable BCH complete interrupt */ +	writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); + +	/* Hook some operations at the MTD level. */ +	if (mtd->_read_oob != mxs_nand_hook_read_oob) { +		nand_info->hooked_read_oob = mtd->_read_oob; +		mtd->_read_oob = mxs_nand_hook_read_oob; +	} + +	if (mtd->_write_oob != mxs_nand_hook_write_oob) { +		nand_info->hooked_write_oob = mtd->_write_oob; +		mtd->_write_oob = mxs_nand_hook_write_oob; +	} + +	if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { +		nand_info->hooked_block_markbad = mtd->_block_markbad; +		mtd->_block_markbad = mxs_nand_hook_block_markbad; +	} + +	/* We use the reference implementation for bad block management. */ +	return nand_default_bbt(mtd); +} + +/* + * Allocate DMA buffers + */ +int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info) +{ +	uint8_t *buf; +	const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; + +	nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT); + +	/* DMA buffers */ +	buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size); +	if (!buf) { +		printf("MXS NAND: Error allocating DMA buffers\n"); +		return -ENOMEM; +	} + +	memset(buf, 0, nand_info->data_buf_size); + +	nand_info->data_buf = buf; +	nand_info->oob_buf = buf + NAND_MAX_PAGESIZE; +	/* Command buffers */ +	nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT, +				MXS_NAND_COMMAND_BUFFER_SIZE); +	if (!nand_info->cmd_buf) { +		free(buf); +		printf("MXS NAND: Error allocating command buffers\n"); +		return -ENOMEM; +	} +	memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE); +	nand_info->cmd_queue_len = 0; + +	return 0; +} + +/* + * Initializes the NFC hardware. + */ +int mxs_nand_init(struct mxs_nand_info *info) +{ +	struct mxs_gpmi_regs *gpmi_regs = +		(struct mxs_gpmi_regs *)MXS_GPMI_BASE; +	struct mxs_bch_regs *bch_regs = +		(struct mxs_bch_regs *)MXS_BCH_BASE; +	int i = 0, j; + +	info->desc = malloc(sizeof(struct mxs_dma_desc *) * +				MXS_NAND_DMA_DESCRIPTOR_COUNT); +	if (!info->desc) +		goto err1; + +	/* Allocate the DMA descriptors. */ +	for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { +		info->desc[i] = mxs_dma_desc_alloc(); +		if (!info->desc[i]) +			goto err2; +	} + +	/* Init the DMA controller. */ +	for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; +		j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { +		if (mxs_dma_init_channel(j)) +			goto err3; +	} + +	/* Reset the GPMI block. */ +	mxs_reset_block(&gpmi_regs->hw_gpmi_ctrl0_reg); +	mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); + +	/* +	 * Choose NAND mode, set IRQ polarity, disable write protection and +	 * select BCH ECC. +	 */ +	clrsetbits_le32(&gpmi_regs->hw_gpmi_ctrl1, +			GPMI_CTRL1_GPMI_MODE, +			GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | +			GPMI_CTRL1_BCH_MODE); + +	return 0; + +err3: +	for (--j; j >= 0; j--) +		mxs_dma_release(j); +err2: +	free(info->desc); +err1: +	for (--i; i >= 0; i--) +		mxs_dma_desc_free(info->desc[i]); +	printf("MXS NAND: Unable to allocate DMA descriptors\n"); +	return -ENOMEM; +} + +/*! + * This function is called during the driver binding process. + * + * @param   pdev  the device structure used to store device specific + *                information that is used by the suspend, resume and + *                remove functions + * + * @return  The function always returns 0. + */ +int board_nand_init(struct nand_chip *nand) +{ +	struct mxs_nand_info *nand_info; +	int err; + +	nand_info = malloc(sizeof(struct mxs_nand_info)); +	if (!nand_info) { +		printf("MXS NAND: Failed to allocate private data\n"); +		return -ENOMEM; +	} +	memset(nand_info, 0, sizeof(struct mxs_nand_info)); + +	err = mxs_nand_alloc_buffers(nand_info); +	if (err) +		goto err1; + +	err = mxs_nand_init(nand_info); +	if (err) +		goto err2; + +	memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout)); + +	nand->priv = nand_info; +	nand->options |= NAND_NO_SUBPAGE_WRITE; + +	nand->cmd_ctrl		= mxs_nand_cmd_ctrl; + +	nand->dev_ready		= mxs_nand_device_ready; +	nand->select_chip	= mxs_nand_select_chip; +	nand->block_bad		= mxs_nand_block_bad; +	nand->scan_bbt		= mxs_nand_scan_bbt; + +	nand->read_byte		= mxs_nand_read_byte; + +	nand->read_buf		= mxs_nand_read_buf; +	nand->write_buf		= mxs_nand_write_buf; + +	nand->ecc.read_page	= mxs_nand_ecc_read_page; +	nand->ecc.write_page	= mxs_nand_ecc_write_page; +	nand->ecc.read_oob	= mxs_nand_ecc_read_oob; +	nand->ecc.write_oob	= mxs_nand_ecc_write_oob; + +	nand->ecc.layout	= &fake_ecc_layout; +	nand->ecc.mode		= NAND_ECC_HW; +	nand->ecc.bytes		= 9; +	nand->ecc.size		= 512; +	nand->ecc.strength	= 8; + +	return 0; + +err2: +	free(nand_info->data_buf); +	free(nand_info->cmd_buf); +err1: +	free(nand_info); +	return err; +} diff --git a/roms/u-boot/drivers/mtd/nand/nand.c b/roms/u-boot/drivers/mtd/nand/nand.c new file mode 100644 index 00000000..4cf4c1c7 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand.c @@ -0,0 +1,120 @@ +/* + * (C) Copyright 2005 + * 2N Telekomunikace, a.s. <www.2n.cz> + * Ladislav Michl <michl@2n.cz> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <nand.h> +#include <errno.h> + +#ifndef CONFIG_SYS_NAND_BASE_LIST +#define CONFIG_SYS_NAND_BASE_LIST { CONFIG_SYS_NAND_BASE } +#endif + +DECLARE_GLOBAL_DATA_PTR; + +int nand_curr_device = -1; + + +nand_info_t nand_info[CONFIG_SYS_MAX_NAND_DEVICE]; + +#ifndef CONFIG_SYS_NAND_SELF_INIT +static struct nand_chip nand_chip[CONFIG_SYS_MAX_NAND_DEVICE]; +static ulong base_address[CONFIG_SYS_MAX_NAND_DEVICE] = CONFIG_SYS_NAND_BASE_LIST; +#endif + +static char dev_name[CONFIG_SYS_MAX_NAND_DEVICE][8]; + +static unsigned long total_nand_size; /* in kiB */ + +/* Register an initialized NAND mtd device with the U-Boot NAND command. */ +int nand_register(int devnum) +{ +	struct mtd_info *mtd; + +	if (devnum >= CONFIG_SYS_MAX_NAND_DEVICE) +		return -EINVAL; + +	mtd = &nand_info[devnum]; + +	sprintf(dev_name[devnum], "nand%d", devnum); +	mtd->name = dev_name[devnum]; + +#ifdef CONFIG_MTD_DEVICE +	/* +	 * Add MTD device so that we can reference it later +	 * via the mtdcore infrastructure (e.g. ubi). +	 */ +	add_mtd_device(mtd); +#endif + +	total_nand_size += mtd->size / 1024; + +	if (nand_curr_device == -1) +		nand_curr_device = devnum; + +	return 0; +} + +#ifndef CONFIG_SYS_NAND_SELF_INIT +static void nand_init_chip(int i) +{ +	struct mtd_info *mtd = &nand_info[i]; +	struct nand_chip *nand = &nand_chip[i]; +	ulong base_addr = base_address[i]; +	int maxchips = CONFIG_SYS_NAND_MAX_CHIPS; + +	if (maxchips < 1) +		maxchips = 1; + +	mtd->priv = nand; +	nand->IO_ADDR_R = nand->IO_ADDR_W = (void  __iomem *)base_addr; + +	if (board_nand_init(nand)) +		return; + +	if (nand_scan(mtd, maxchips)) +		return; + +	nand_register(i); +} +#endif + +void nand_init(void) +{ +#ifdef CONFIG_SYS_NAND_SELF_INIT +	board_nand_init(); +#else +	int i; + +	for (i = 0; i < CONFIG_SYS_MAX_NAND_DEVICE; i++) +		nand_init_chip(i); +#endif + +	printf("%lu MiB\n", total_nand_size / 1024); + +#ifdef CONFIG_SYS_NAND_SELECT_DEVICE +	/* +	 * Select the chip in the board/cpu specific driver +	 */ +	board_nand_select_device(nand_info[nand_curr_device].priv, nand_curr_device); +#endif +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_base.c b/roms/u-boot/drivers/mtd/nand/nand_base.c new file mode 100644 index 00000000..1ce55fde --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_base.c @@ -0,0 +1,3438 @@ +/* + *  drivers/mtd/nand.c + * + *  Overview: + *   This is the generic MTD driver for NAND flash devices. It should be + *   capable of working with almost all NAND chips currently available. + *   Basic support for AG-AND chips is provided. + * + *	Additional technical information is available on + *	http://www.linux-mtd.infradead.org/doc/nand.html + * + *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) + *		  2002-2006 Thomas Gleixner (tglx@linutronix.de) + * + *  Credits: + *	David Woodhouse for adding multichip support + * + *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the + *	rework for 2K page size chips + * + *  TODO: + *	Enable cached programming for 2k page size chips + *	Check, if mtd->ecctype should be set to MTD_ECC_HW + *	if we have HW ECC support. + *	The AG-AND chips have nice features for speed improvement, + *	which are not supported yet. Read / program 4 pages in one go. + *	BBT table is not serialized, has to be fixed + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <common.h> + +#define ENOTSUPP	524	/* Operation is not supported */ + +#include <malloc.h> +#include <watchdog.h> +#include <linux/err.h> +#include <linux/compat.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/mtd/nand_bch.h> + +#ifdef CONFIG_MTD_PARTITIONS +#include <linux/mtd/partitions.h> +#endif + +#include <asm/io.h> +#include <asm/errno.h> + +/* + * CONFIG_SYS_NAND_RESET_CNT is used as a timeout mechanism when resetting + * a flash.  NAND flash is initialized prior to interrupts so standard timers + * can't be used.  CONFIG_SYS_NAND_RESET_CNT should be set to a value + * which is greater than (max NAND reset time / NAND status read time). + * A conservative default of 200000 (500 us / 25 ns) is used as a default. + */ +#ifndef CONFIG_SYS_NAND_RESET_CNT +#define CONFIG_SYS_NAND_RESET_CNT 200000 +#endif + +/* Define default oob placement schemes for large and small page devices */ +static struct nand_ecclayout nand_oob_8 = { +	.eccbytes = 3, +	.eccpos = {0, 1, 2}, +	.oobfree = { +		{.offset = 3, +		 .length = 2}, +		{.offset = 6, +		 .length = 2} } +}; + +static struct nand_ecclayout nand_oob_16 = { +	.eccbytes = 6, +	.eccpos = {0, 1, 2, 3, 6, 7}, +	.oobfree = { +		{.offset = 8, +		 . length = 8} } +}; + +static struct nand_ecclayout nand_oob_64 = { +	.eccbytes = 24, +	.eccpos = { +		   40, 41, 42, 43, 44, 45, 46, 47, +		   48, 49, 50, 51, 52, 53, 54, 55, +		   56, 57, 58, 59, 60, 61, 62, 63}, +	.oobfree = { +		{.offset = 2, +		 .length = 38} } +}; + +static struct nand_ecclayout nand_oob_128 = { +	.eccbytes = 48, +	.eccpos = { +		   80, 81, 82, 83, 84, 85, 86, 87, +		   88, 89, 90, 91, 92, 93, 94, 95, +		   96, 97, 98, 99, 100, 101, 102, 103, +		   104, 105, 106, 107, 108, 109, 110, 111, +		   112, 113, 114, 115, 116, 117, 118, 119, +		   120, 121, 122, 123, 124, 125, 126, 127}, +	.oobfree = { +		{.offset = 2, +		 .length = 78} } +}; + +static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, +			   int new_state); + +static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, +			     struct mtd_oob_ops *ops); + +static int nand_wait(struct mtd_info *mtd, struct nand_chip *this); + +static int check_offs_len(struct mtd_info *mtd, +					loff_t ofs, uint64_t len) +{ +	struct nand_chip *chip = mtd->priv; +	int ret = 0; + +	/* Start address must align on block boundary */ +	if (ofs & ((1 << chip->phys_erase_shift) - 1)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); +		ret = -EINVAL; +	} + +	/* Length must align on block boundary */ +	if (len & ((1 << chip->phys_erase_shift) - 1)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", +					__func__); +		ret = -EINVAL; +	} + +	return ret; +} + +/** + * nand_release_device - [GENERIC] release chip + * @mtd: MTD device structure + * + * Deselect, release chip lock and wake up anyone waiting on the device. + */ +static void nand_release_device(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; + +	/* De-select the NAND device */ +	chip->select_chip(mtd, -1); +} + +/** + * nand_read_byte - [DEFAULT] read one byte from the chip + * @mtd: MTD device structure + * + * Default read function for 8bit buswidth. + */ +uint8_t nand_read_byte(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	return readb(chip->IO_ADDR_R); +} + +/** + * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip + * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip + * @mtd: MTD device structure + * + * Default read function for 16bit buswidth with endianness conversion. + * + */ +static uint8_t nand_read_byte16(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R)); +} + +/** + * nand_read_word - [DEFAULT] read one word from the chip + * @mtd: MTD device structure + * + * Default read function for 16bit buswidth without endianness conversion. + */ +static u16 nand_read_word(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	return readw(chip->IO_ADDR_R); +} + +/** + * nand_select_chip - [DEFAULT] control CE line + * @mtd: MTD device structure + * @chipnr: chipnumber to select, -1 for deselect + * + * Default select function for 1 chip devices. + */ +static void nand_select_chip(struct mtd_info *mtd, int chipnr) +{ +	struct nand_chip *chip = mtd->priv; + +	switch (chipnr) { +	case -1: +		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); +		break; +	case 0: +		break; + +	default: +		BUG(); +	} +} + +/** + * nand_write_buf - [DEFAULT] write buffer to chip + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + * + * Default write function for 8bit buswidth. + */ +void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; + +	for (i = 0; i < len; i++) +		writeb(buf[i], chip->IO_ADDR_W); +} + +/** + * nand_read_buf - [DEFAULT] read chip data into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + * + * Default read function for 8bit buswidth. + */ +void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; + +	for (i = 0; i < len; i++) +		buf[i] = readb(chip->IO_ADDR_R); +} + +/** + * nand_verify_buf - [DEFAULT] Verify chip data against buffer + * @mtd: MTD device structure + * @buf: buffer containing the data to compare + * @len: number of bytes to compare + * + * Default verify function for 8bit buswidth. + */ +static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; + +	for (i = 0; i < len; i++) +		if (buf[i] != readb(chip->IO_ADDR_R)) +			return -EFAULT; +	return 0; +} + +/** + * nand_write_buf16 - [DEFAULT] write buffer to chip + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + * + * Default write function for 16bit buswidth. + */ +void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; +	u16 *p = (u16 *) buf; +	len >>= 1; + +	for (i = 0; i < len; i++) +		writew(p[i], chip->IO_ADDR_W); + +} + +/** + * nand_read_buf16 - [DEFAULT] read chip data into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + * + * Default read function for 16bit buswidth. + */ +void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; +	u16 *p = (u16 *) buf; +	len >>= 1; + +	for (i = 0; i < len; i++) +		p[i] = readw(chip->IO_ADDR_R); +} + +/** + * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer + * @mtd: MTD device structure + * @buf: buffer containing the data to compare + * @len: number of bytes to compare + * + * Default verify function for 16bit buswidth. + */ +static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; +	u16 *p = (u16 *) buf; +	len >>= 1; + +	for (i = 0; i < len; i++) +		if (p[i] != readw(chip->IO_ADDR_R)) +			return -EFAULT; + +	return 0; +} + +/** + * nand_block_bad - [DEFAULT] Read bad block marker from the chip + * @mtd: MTD device structure + * @ofs: offset from device start + * @getchip: 0, if the chip is already selected + * + * Check, if the block is bad. + */ +static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) +{ +	int page, chipnr, res = 0, i = 0; +	struct nand_chip *chip = mtd->priv; +	u16 bad; + +	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) +		ofs += mtd->erasesize - mtd->writesize; + +	page = (int)(ofs >> chip->page_shift) & chip->pagemask; + +	if (getchip) { +		chipnr = (int)(ofs >> chip->chip_shift); + +		nand_get_device(chip, mtd, FL_READING); + +		/* Select the NAND device */ +		chip->select_chip(mtd, chipnr); +	} + +	do { +		if (chip->options & NAND_BUSWIDTH_16) { +			chip->cmdfunc(mtd, NAND_CMD_READOOB, +					chip->badblockpos & 0xFE, page); +			bad = cpu_to_le16(chip->read_word(mtd)); +			if (chip->badblockpos & 0x1) +				bad >>= 8; +			else +				bad &= 0xFF; +		} else { +			chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, +					page); +			bad = chip->read_byte(mtd); +		} + +		if (likely(chip->badblockbits == 8)) +			res = bad != 0xFF; +		else +			res = hweight8(bad) < chip->badblockbits; +		ofs += mtd->writesize; +		page = (int)(ofs >> chip->page_shift) & chip->pagemask; +		i++; +	} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); + +	if (getchip) +		nand_release_device(mtd); + +	return res; +} + +/** + * nand_default_block_markbad - [DEFAULT] mark a block bad + * @mtd: MTD device structure + * @ofs: offset from device start + * + * This is the default implementation, which can be overridden by a hardware + * specific driver. We try operations in the following order, according to our + * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH): + *  (1) erase the affected block, to allow OOB marker to be written cleanly + *  (2) update in-memory BBT + *  (3) write bad block marker to OOB area of affected block + *  (4) update flash-based BBT + * Note that we retain the first error encountered in (3) or (4), finish the + * procedures, and dump the error in the end. +*/ +static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct nand_chip *chip = mtd->priv; +	uint8_t buf[2] = { 0, 0 }; +	int block, res, ret = 0, i = 0; +	int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM); + +	if (write_oob) { +		struct erase_info einfo; + +		/* Attempt erase before marking OOB */ +		memset(&einfo, 0, sizeof(einfo)); +		einfo.mtd = mtd; +		einfo.addr = ofs; +		einfo.len = 1 << chip->phys_erase_shift; +		nand_erase_nand(mtd, &einfo, 0); +	} + +	/* Get block number */ +	block = (int)(ofs >> chip->bbt_erase_shift); +	/* Mark block bad in memory-based BBT */ +	if (chip->bbt) +		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); + +	/* Write bad block marker to OOB */ +	if (write_oob) { +		struct mtd_oob_ops ops; +		loff_t wr_ofs = ofs; + +		nand_get_device(chip, mtd, FL_WRITING); + +		ops.datbuf = NULL; +		ops.oobbuf = buf; +		ops.ooboffs = chip->badblockpos; +		if (chip->options & NAND_BUSWIDTH_16) { +			ops.ooboffs &= ~0x01; +			ops.len = ops.ooblen = 2; +		} else { +			ops.len = ops.ooblen = 1; +		} +		ops.mode = MTD_OPS_PLACE_OOB; + +		/* Write to first/last page(s) if necessary */ +		if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) +			wr_ofs += mtd->erasesize - mtd->writesize; +		do { +			res = nand_do_write_oob(mtd, wr_ofs, &ops); +			if (!ret) +				ret = res; + +			i++; +			wr_ofs += mtd->writesize; +		} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2); + +		nand_release_device(mtd); +	} + +	/* Update flash-based bad block table */ +	if (chip->bbt_options & NAND_BBT_USE_FLASH) { +		res = nand_update_bbt(mtd, ofs); +		if (!ret) +			ret = res; +	} + +	if (!ret) +		mtd->ecc_stats.badblocks++; + +	return ret; +} + +/** + * nand_check_wp - [GENERIC] check if the chip is write protected + * @mtd: MTD device structure + * + * Check, if the device is write protected. The function expects, that the + * device is already selected. + */ +static int nand_check_wp(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; + +	/* Broken xD cards report WP despite being writable */ +	if (chip->options & NAND_BROKEN_XD) +		return 0; + +	/* Check the WP bit */ +	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); +	return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; +} + +/** + * nand_block_checkbad - [GENERIC] Check if a block is marked bad + * @mtd: MTD device structure + * @ofs: offset from device start + * @getchip: 0, if the chip is already selected + * @allowbbt: 1, if its allowed to access the bbt area + * + * Check, if the block is bad. Either by reading the bad block table or + * calling of the scan function. + */ +static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, +			       int allowbbt) +{ +	struct nand_chip *chip = mtd->priv; + +	if (!(chip->options & NAND_BBT_SCANNED)) { +		chip->options |= NAND_BBT_SCANNED; +		chip->scan_bbt(mtd); +	} + +	if (!chip->bbt) +		return chip->block_bad(mtd, ofs, getchip); + +	/* Return info from the table */ +	return nand_isbad_bbt(mtd, ofs, allowbbt); +} + +/* Wait for the ready pin, after a command. The timeout is caught later. */ +void nand_wait_ready(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	u32 timeo = (CONFIG_SYS_HZ * 20) / 1000; +	u32 time_start; + +	time_start = get_timer(0); + +	/* Wait until command is processed or timeout occurs */ +	while (get_timer(time_start) < timeo) { +		if (chip->dev_ready) +			if (chip->dev_ready(mtd)) +				break; +	} +} + +/** + * nand_command - [DEFAULT] Send command to NAND device + * @mtd: MTD device structure + * @command: the command to be sent + * @column: the column address for this command, -1 if none + * @page_addr: the page address for this command, -1 if none + * + * Send command to NAND device. This function is used for small page devices + * (256/512 Bytes per page). + */ +static void nand_command(struct mtd_info *mtd, unsigned int command, +			 int column, int page_addr) +{ +	register struct nand_chip *chip = mtd->priv; +	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE; +	uint32_t rst_sts_cnt = CONFIG_SYS_NAND_RESET_CNT; + +	/* Write out the command to the device */ +	if (command == NAND_CMD_SEQIN) { +		int readcmd; + +		if (column >= mtd->writesize) { +			/* OOB area */ +			column -= mtd->writesize; +			readcmd = NAND_CMD_READOOB; +		} else if (column < 256) { +			/* First 256 bytes --> READ0 */ +			readcmd = NAND_CMD_READ0; +		} else { +			column -= 256; +			readcmd = NAND_CMD_READ1; +		} +		chip->cmd_ctrl(mtd, readcmd, ctrl); +		ctrl &= ~NAND_CTRL_CHANGE; +	} +	chip->cmd_ctrl(mtd, command, ctrl); + +	/* Address cycle, when necessary */ +	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; +	/* Serially input address */ +	if (column != -1) { +		/* Adjust columns for 16 bit buswidth */ +		if (chip->options & NAND_BUSWIDTH_16) +			column >>= 1; +		chip->cmd_ctrl(mtd, column, ctrl); +		ctrl &= ~NAND_CTRL_CHANGE; +	} +	if (page_addr != -1) { +		chip->cmd_ctrl(mtd, page_addr, ctrl); +		ctrl &= ~NAND_CTRL_CHANGE; +		chip->cmd_ctrl(mtd, page_addr >> 8, ctrl); +		/* One more address cycle for devices > 32MiB */ +		if (chip->chipsize > (32 << 20)) +			chip->cmd_ctrl(mtd, page_addr >> 16, ctrl); +	} +	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +	/* +	 * Program and erase have their own busy handlers status and sequential +	 * in needs no delay +	 */ +	switch (command) { + +	case NAND_CMD_PAGEPROG: +	case NAND_CMD_ERASE1: +	case NAND_CMD_ERASE2: +	case NAND_CMD_SEQIN: +	case NAND_CMD_STATUS: +		return; + +	case NAND_CMD_RESET: +		if (chip->dev_ready) +			break; +		udelay(chip->chip_delay); +		chip->cmd_ctrl(mtd, NAND_CMD_STATUS, +			       NAND_CTRL_CLE | NAND_CTRL_CHANGE); +		chip->cmd_ctrl(mtd, +			       NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); +		while (!(chip->read_byte(mtd) & NAND_STATUS_READY) && +			(rst_sts_cnt--)); +		return; + +		/* This applies to read commands */ +	default: +		/* +		 * If we don't have access to the busy pin, we apply the given +		 * command delay +		 */ +		if (!chip->dev_ready) { +			udelay(chip->chip_delay); +			return; +		} +	} +	/* +	 * Apply this short delay always to ensure that we do wait tWB in +	 * any case on any machine. +	 */ +	ndelay(100); + +	nand_wait_ready(mtd); +} + +/** + * nand_command_lp - [DEFAULT] Send command to NAND large page device + * @mtd: MTD device structure + * @command: the command to be sent + * @column: the column address for this command, -1 if none + * @page_addr: the page address for this command, -1 if none + * + * Send command to NAND device. This is the version for the new large page + * devices. We don't have the separate regions as we have in the small page + * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. + */ +static void nand_command_lp(struct mtd_info *mtd, unsigned int command, +			    int column, int page_addr) +{ +	register struct nand_chip *chip = mtd->priv; +	uint32_t rst_sts_cnt = CONFIG_SYS_NAND_RESET_CNT; + +	/* Emulate NAND_CMD_READOOB */ +	if (command == NAND_CMD_READOOB) { +		column += mtd->writesize; +		command = NAND_CMD_READ0; +	} + +	/* Command latch cycle */ +	chip->cmd_ctrl(mtd, command & 0xff, +		       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + +	if (column != -1 || page_addr != -1) { +		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; + +		/* Serially input address */ +		if (column != -1) { +			/* Adjust columns for 16 bit buswidth */ +			if (chip->options & NAND_BUSWIDTH_16) +				column >>= 1; +			chip->cmd_ctrl(mtd, column, ctrl); +			ctrl &= ~NAND_CTRL_CHANGE; +			chip->cmd_ctrl(mtd, column >> 8, ctrl); +		} +		if (page_addr != -1) { +			chip->cmd_ctrl(mtd, page_addr, ctrl); +			chip->cmd_ctrl(mtd, page_addr >> 8, +				       NAND_NCE | NAND_ALE); +			/* One more address cycle for devices > 128MiB */ +			if (chip->chipsize > (128 << 20)) +				chip->cmd_ctrl(mtd, page_addr >> 16, +					       NAND_NCE | NAND_ALE); +		} +	} +	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +	/* +	 * Program and erase have their own busy handlers status, sequential +	 * in, and deplete1 need no delay. +	 */ +	switch (command) { + +	case NAND_CMD_CACHEDPROG: +	case NAND_CMD_PAGEPROG: +	case NAND_CMD_ERASE1: +	case NAND_CMD_ERASE2: +	case NAND_CMD_SEQIN: +	case NAND_CMD_RNDIN: +	case NAND_CMD_STATUS: +	case NAND_CMD_DEPLETE1: +		return; + +	case NAND_CMD_STATUS_ERROR: +	case NAND_CMD_STATUS_ERROR0: +	case NAND_CMD_STATUS_ERROR1: +	case NAND_CMD_STATUS_ERROR2: +	case NAND_CMD_STATUS_ERROR3: +		/* Read error status commands require only a short delay */ +		udelay(chip->chip_delay); +		return; + +	case NAND_CMD_RESET: +		if (chip->dev_ready) +			break; +		udelay(chip->chip_delay); +		chip->cmd_ctrl(mtd, NAND_CMD_STATUS, +			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); +		chip->cmd_ctrl(mtd, NAND_CMD_NONE, +			       NAND_NCE | NAND_CTRL_CHANGE); +		while (!(chip->read_byte(mtd) & NAND_STATUS_READY) && +			(rst_sts_cnt--)); +		return; + +	case NAND_CMD_RNDOUT: +		/* No ready / busy check necessary */ +		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART, +			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); +		chip->cmd_ctrl(mtd, NAND_CMD_NONE, +			       NAND_NCE | NAND_CTRL_CHANGE); +		return; + +	case NAND_CMD_READ0: +		chip->cmd_ctrl(mtd, NAND_CMD_READSTART, +			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); +		chip->cmd_ctrl(mtd, NAND_CMD_NONE, +			       NAND_NCE | NAND_CTRL_CHANGE); + +		/* This applies to read commands */ +	default: +		/* +		 * If we don't have access to the busy pin, we apply the given +		 * command delay. +		 */ +		if (!chip->dev_ready) { +			udelay(chip->chip_delay); +			return; +		} +	} + +	/* +	 * Apply this short delay always to ensure that we do wait tWB in +	 * any case on any machine. +	 */ +	ndelay(100); + +	nand_wait_ready(mtd); +} + +/** + * nand_get_device - [GENERIC] Get chip for selected access + * @chip: the nand chip descriptor + * @mtd: MTD device structure + * @new_state: the state which is requested + * + * Get the device and lock it for exclusive access + */ +static int +nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) +{ +	chip->state = new_state; +	return 0; +} + +/** + * nand_wait - [DEFAULT] wait until the command is done + * @mtd: MTD device structure + * @chip: NAND chip structure + * + * Wait for command done. This applies to erase and program only. Erase can + * take up to 400ms and program up to 20ms according to general NAND and + * SmartMedia specs. + */ +static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) +{ +	unsigned long	timeo; +	int state = chip->state; +	u32 time_start; + +	if (state == FL_ERASING) +		timeo = (CONFIG_SYS_HZ * 400) / 1000; +	else +		timeo = (CONFIG_SYS_HZ * 20) / 1000; + +	if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) +		chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1); +	else +		chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + +	time_start = get_timer(0); + +	while (1) { +		if (get_timer(time_start) > timeo) { +			printf("Timeout!"); +			return 0x01; +		} + +		if (chip->dev_ready) { +			if (chip->dev_ready(mtd)) +				break; +		} else { +			if (chip->read_byte(mtd) & NAND_STATUS_READY) +				break; +		} +	} +#ifdef PPCHAMELON_NAND_TIMER_HACK +	time_start = get_timer(0); +	while (get_timer(time_start) < 10) +		; +#endif /*  PPCHAMELON_NAND_TIMER_HACK */ + +	return (int)chip->read_byte(mtd); +} + +/** + * nand_read_page_raw - [INTERN] read raw page data without ecc + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * Not for syndrome calculating ECC controllers, which use a special oob layout. + */ +static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, +			      uint8_t *buf, int oob_required, int page) +{ +	chip->read_buf(mtd, buf, mtd->writesize); +	if (oob_required) +		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); +	return 0; +} + +/** + * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * We need a special oob layout and handling even when OOB isn't used. + */ +static int nand_read_page_raw_syndrome(struct mtd_info *mtd, +				       struct nand_chip *chip, uint8_t *buf, +				       int oob_required, int page) +{ +	int eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	uint8_t *oob = chip->oob_poi; +	int steps, size; + +	for (steps = chip->ecc.steps; steps > 0; steps--) { +		chip->read_buf(mtd, buf, eccsize); +		buf += eccsize; + +		if (chip->ecc.prepad) { +			chip->read_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		chip->read_buf(mtd, oob, eccbytes); +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->read_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	size = mtd->oobsize - (oob - chip->oob_poi); +	if (size) +		chip->read_buf(mtd, oob, size); + +	return 0; +} + +/** + * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + */ +static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, +				uint8_t *buf, int oob_required, int page) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *p = buf; +	uint8_t *ecc_calc = chip->buffers->ecccalc; +	uint8_t *ecc_code = chip->buffers->ecccode; +	uint32_t *eccpos = chip->ecc.layout->eccpos; + +	chip->ecc.read_page_raw(mtd, chip, buf, 1, page); + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) +		chip->ecc.calculate(mtd, p, &ecc_calc[i]); + +	for (i = 0; i < chip->ecc.total; i++) +		ecc_code[i] = chip->oob_poi[eccpos[i]]; + +	eccsteps = chip->ecc.steps; +	p = buf; + +	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		int stat; + +		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; +	} +	return 0; +} + +/** + * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @data_offs: offset of requested data within the page + * @readlen: data length + * @bufpoi: buffer to store read data + */ +static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, +			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi) +{ +	int start_step, end_step, num_steps; +	uint32_t *eccpos = chip->ecc.layout->eccpos; +	uint8_t *p; +	int data_col_addr, i, gaps = 0; +	int datafrag_len, eccfrag_len, aligned_len, aligned_pos; +	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; +	int index = 0; + +	/* Column address within the page aligned to ECC size (256bytes) */ +	start_step = data_offs / chip->ecc.size; +	end_step = (data_offs + readlen - 1) / chip->ecc.size; +	num_steps = end_step - start_step + 1; + +	/* Data size aligned to ECC ecc.size */ +	datafrag_len = num_steps * chip->ecc.size; +	eccfrag_len = num_steps * chip->ecc.bytes; + +	data_col_addr = start_step * chip->ecc.size; +	/* If we read not a page aligned data */ +	if (data_col_addr != 0) +		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1); + +	p = bufpoi + data_col_addr; +	chip->read_buf(mtd, p, datafrag_len); + +	/* Calculate ECC */ +	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) +		chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]); + +	/* +	 * The performance is faster if we position offsets according to +	 * ecc.pos. Let's make sure that there are no gaps in ECC positions. +	 */ +	for (i = 0; i < eccfrag_len - 1; i++) { +		if (eccpos[i + start_step * chip->ecc.bytes] + 1 != +			eccpos[i + start_step * chip->ecc.bytes + 1]) { +			gaps = 1; +			break; +		} +	} +	if (gaps) { +		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); +		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); +	} else { +		/* +		 * Send the command to read the particular ECC bytes take care +		 * about buswidth alignment in read_buf. +		 */ +		index = start_step * chip->ecc.bytes; + +		aligned_pos = eccpos[index] & ~(busw - 1); +		aligned_len = eccfrag_len; +		if (eccpos[index] & (busw - 1)) +			aligned_len++; +		if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1)) +			aligned_len++; + +		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, +					mtd->writesize + aligned_pos, -1); +		chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len); +	} + +	for (i = 0; i < eccfrag_len; i++) +		chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]]; + +	p = bufpoi + data_col_addr; +	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) { +		int stat; + +		stat = chip->ecc.correct(mtd, p, +			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]); +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; +	} +	return 0; +} + +/** + * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * Not for syndrome calculating ECC controllers which need a special oob layout. + */ +static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, +				uint8_t *buf, int oob_required, int page) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *p = buf; +	uint8_t *ecc_calc = chip->buffers->ecccalc; +	uint8_t *ecc_code = chip->buffers->ecccode; +	uint32_t *eccpos = chip->ecc.layout->eccpos; + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		chip->ecc.hwctl(mtd, NAND_ECC_READ); +		chip->read_buf(mtd, p, eccsize); +		chip->ecc.calculate(mtd, p, &ecc_calc[i]); +	} +	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + +	for (i = 0; i < chip->ecc.total; i++) +		ecc_code[i] = chip->oob_poi[eccpos[i]]; + +	eccsteps = chip->ecc.steps; +	p = buf; + +	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		int stat; + +		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; +	} +	return 0; +} + +/** + * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * Hardware ECC for large page chips, require OOB to be read first. For this + * ECC mode, the write_page method is re-used from ECC_HW. These methods + * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with + * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from + * the data area, by overwriting the NAND manufacturer bad block markings. + */ +static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, +	struct nand_chip *chip, uint8_t *buf, int oob_required, int page) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *p = buf; +	uint8_t *ecc_code = chip->buffers->ecccode; +	uint32_t *eccpos = chip->ecc.layout->eccpos; +	uint8_t *ecc_calc = chip->buffers->ecccalc; + +	/* Read the OOB area first */ +	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); +	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); +	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + +	for (i = 0; i < chip->ecc.total; i++) +		ecc_code[i] = chip->oob_poi[eccpos[i]]; + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		int stat; + +		chip->ecc.hwctl(mtd, NAND_ECC_READ); +		chip->read_buf(mtd, p, eccsize); +		chip->ecc.calculate(mtd, p, &ecc_calc[i]); + +		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; +	} +	return 0; +} + +/** + * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: buffer to store read data + * @oob_required: caller requires OOB data read to chip->oob_poi + * @page: page number to read + * + * The hw generator calculates the error syndrome automatically. Therefore we + * need a special oob layout and handling. + */ +static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, +				   uint8_t *buf, int oob_required, int page) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *p = buf; +	uint8_t *oob = chip->oob_poi; + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		int stat; + +		chip->ecc.hwctl(mtd, NAND_ECC_READ); +		chip->read_buf(mtd, p, eccsize); + +		if (chip->ecc.prepad) { +			chip->read_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		chip->ecc.hwctl(mtd, NAND_ECC_READSYN); +		chip->read_buf(mtd, oob, eccbytes); +		stat = chip->ecc.correct(mtd, p, oob, NULL); + +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; + +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->read_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	/* Calculate remaining oob bytes */ +	i = mtd->oobsize - (oob - chip->oob_poi); +	if (i) +		chip->read_buf(mtd, oob, i); + +	return 0; +} + +/** + * nand_transfer_oob - [INTERN] Transfer oob to client buffer + * @chip: nand chip structure + * @oob: oob destination address + * @ops: oob ops structure + * @len: size of oob to transfer + */ +static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob, +				  struct mtd_oob_ops *ops, size_t len) +{ +	switch (ops->mode) { + +	case MTD_OPS_PLACE_OOB: +	case MTD_OPS_RAW: +		memcpy(oob, chip->oob_poi + ops->ooboffs, len); +		return oob + len; + +	case MTD_OPS_AUTO_OOB: { +		struct nand_oobfree *free = chip->ecc.layout->oobfree; +		uint32_t boffs = 0, roffs = ops->ooboffs; +		size_t bytes = 0; + +		for (; free->length && len; free++, len -= bytes) { +			/* Read request not from offset 0? */ +			if (unlikely(roffs)) { +				if (roffs >= free->length) { +					roffs -= free->length; +					continue; +				} +				boffs = free->offset + roffs; +				bytes = min_t(size_t, len, +					      (free->length - roffs)); +				roffs = 0; +			} else { +				bytes = min_t(size_t, len, free->length); +				boffs = free->offset; +			} +			memcpy(oob, chip->oob_poi + boffs, bytes); +			oob += bytes; +		} +		return oob; +	} +	default: +		BUG(); +	} +	return NULL; +} + +/** + * nand_do_read_ops - [INTERN] Read data with ECC + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob ops structure + * + * Internal function. Called with chip held. + */ +static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, +			    struct mtd_oob_ops *ops) +{ +	int chipnr, page, realpage, col, bytes, aligned, oob_required; +	struct nand_chip *chip = mtd->priv; +	struct mtd_ecc_stats stats; +	int ret = 0; +	uint32_t readlen = ops->len; +	uint32_t oobreadlen = ops->ooblen; +	uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ? +		mtd->oobavail : mtd->oobsize; + +	uint8_t *bufpoi, *oob, *buf; +	unsigned int max_bitflips = 0; + +	stats = mtd->ecc_stats; + +	chipnr = (int)(from >> chip->chip_shift); +	chip->select_chip(mtd, chipnr); + +	realpage = (int)(from >> chip->page_shift); +	page = realpage & chip->pagemask; + +	col = (int)(from & (mtd->writesize - 1)); + +	buf = ops->datbuf; +	oob = ops->oobbuf; +	oob_required = oob ? 1 : 0; + +	while (1) { +		WATCHDOG_RESET(); + +		bytes = min(mtd->writesize - col, readlen); +		aligned = (bytes == mtd->writesize); + +		/* Is the current page in the buffer? */ +		if (realpage != chip->pagebuf || oob) { +			bufpoi = aligned ? buf : chip->buffers->databuf; + +			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); + +			/* +			 * Now read the page into the buffer.  Absent an error, +			 * the read methods return max bitflips per ecc step. +			 */ +			if (unlikely(ops->mode == MTD_OPS_RAW)) +				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi, +							      oob_required, +							      page); +			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) && +			    !oob) +				ret = chip->ecc.read_subpage(mtd, chip, +							col, bytes, bufpoi); +			else +				ret = chip->ecc.read_page(mtd, chip, bufpoi, +							  oob_required, page); +			if (ret < 0) { +				if (!aligned) +					/* Invalidate page cache */ +					chip->pagebuf = -1; +				break; +			} + +			max_bitflips = max_t(unsigned int, max_bitflips, ret); + +			/* Transfer not aligned data */ +			if (!aligned) { +				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob && +				    !(mtd->ecc_stats.failed - stats.failed) && +				    (ops->mode != MTD_OPS_RAW)) { +					chip->pagebuf = realpage; +					chip->pagebuf_bitflips = ret; +				} else { +					/* Invalidate page cache */ +					chip->pagebuf = -1; +				} +				memcpy(buf, chip->buffers->databuf + col, bytes); +			} + +			buf += bytes; + +			if (unlikely(oob)) { +				int toread = min(oobreadlen, max_oobsize); + +				if (toread) { +					oob = nand_transfer_oob(chip, +						oob, ops, toread); +					oobreadlen -= toread; +				} +			} +		} else { +			memcpy(buf, chip->buffers->databuf + col, bytes); +			buf += bytes; +			max_bitflips = max_t(unsigned int, max_bitflips, +					     chip->pagebuf_bitflips); +		} + +		readlen -= bytes; + +		if (!readlen) +			break; + +		/* For subsequent reads align to page boundary */ +		col = 0; +		/* Increment page address */ +		realpage++; + +		page = realpage & chip->pagemask; +		/* Check, if we cross a chip boundary */ +		if (!page) { +			chipnr++; +			chip->select_chip(mtd, -1); +			chip->select_chip(mtd, chipnr); +		} +	} + +	ops->retlen = ops->len - (size_t) readlen; +	if (oob) +		ops->oobretlen = ops->ooblen - oobreadlen; + +	if (ret) +		return ret; + +	if (mtd->ecc_stats.failed - stats.failed) +		return -EBADMSG; + +	return max_bitflips; +} + +/** + * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc + * @mtd: MTD device structure + * @from: offset to read from + * @len: number of bytes to read + * @retlen: pointer to variable to store the number of read bytes + * @buf: the databuffer to put data + * + * Get hold of the chip and call nand_do_read. + */ +static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, +		     size_t *retlen, uint8_t *buf) +{ +	struct nand_chip *chip = mtd->priv; +	struct mtd_oob_ops ops; +	int ret; + +	nand_get_device(chip, mtd, FL_READING); +	ops.len = len; +	ops.datbuf = buf; +	ops.oobbuf = NULL; +	ops.mode = MTD_OPS_PLACE_OOB; +	ret = nand_do_read_ops(mtd, from, &ops); +	*retlen = ops.retlen; +	nand_release_device(mtd); +	return ret; +} + +/** + * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @page: page number to read + */ +static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, +			     int page) +{ +	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); +	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); +	return 0; +} + +/** + * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC + *			    with syndromes + * @mtd: mtd info structure + * @chip: nand chip info structure + * @page: page number to read + */ +static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, +				  int page) +{ +	uint8_t *buf = chip->oob_poi; +	int length = mtd->oobsize; +	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; +	int eccsize = chip->ecc.size; +	uint8_t *bufpoi = buf; +	int i, toread, sndrnd = 0, pos; + +	chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page); +	for (i = 0; i < chip->ecc.steps; i++) { +		if (sndrnd) { +			pos = eccsize + i * (eccsize + chunk); +			if (mtd->writesize > 512) +				chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1); +			else +				chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page); +		} else +			sndrnd = 1; +		toread = min_t(int, length, chunk); +		chip->read_buf(mtd, bufpoi, toread); +		bufpoi += toread; +		length -= toread; +	} +	if (length > 0) +		chip->read_buf(mtd, bufpoi, length); + +	return 0; +} + +/** + * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @page: page number to write + */ +static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, +			      int page) +{ +	int status = 0; +	const uint8_t *buf = chip->oob_poi; +	int length = mtd->oobsize; + +	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); +	chip->write_buf(mtd, buf, length); +	/* Send command to program the OOB data */ +	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + +	status = chip->waitfunc(mtd, chip); + +	return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +/** + * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC + *			     with syndrome - only for large page flash + * @mtd: mtd info structure + * @chip: nand chip info structure + * @page: page number to write + */ +static int nand_write_oob_syndrome(struct mtd_info *mtd, +				   struct nand_chip *chip, int page) +{ +	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; +	int eccsize = chip->ecc.size, length = mtd->oobsize; +	int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps; +	const uint8_t *bufpoi = chip->oob_poi; + +	/* +	 * data-ecc-data-ecc ... ecc-oob +	 * or +	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob +	 */ +	if (!chip->ecc.prepad && !chip->ecc.postpad) { +		pos = steps * (eccsize + chunk); +		steps = 0; +	} else +		pos = eccsize; + +	chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page); +	for (i = 0; i < steps; i++) { +		if (sndcmd) { +			if (mtd->writesize <= 512) { +				uint32_t fill = 0xFFFFFFFF; + +				len = eccsize; +				while (len > 0) { +					int num = min_t(int, len, 4); +					chip->write_buf(mtd, (uint8_t *)&fill, +							num); +					len -= num; +				} +			} else { +				pos = eccsize + i * (eccsize + chunk); +				chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1); +			} +		} else +			sndcmd = 1; +		len = min_t(int, length, chunk); +		chip->write_buf(mtd, bufpoi, len); +		bufpoi += len; +		length -= len; +	} +	if (length > 0) +		chip->write_buf(mtd, bufpoi, length); + +	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); +	status = chip->waitfunc(mtd, chip); + +	return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +/** + * nand_do_read_oob - [INTERN] NAND read out-of-band + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operations description structure + * + * NAND read out-of-band data from the spare area. + */ +static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, +			    struct mtd_oob_ops *ops) +{ +	int page, realpage, chipnr; +	struct nand_chip *chip = mtd->priv; +	struct mtd_ecc_stats stats; +	int readlen = ops->ooblen; +	int len; +	uint8_t *buf = ops->oobbuf; +	int ret = 0; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n", +			__func__, (unsigned long long)from, readlen); + +	stats = mtd->ecc_stats; + +	if (ops->mode == MTD_OPS_AUTO_OOB) +		len = chip->ecc.layout->oobavail; +	else +		len = mtd->oobsize; + +	if (unlikely(ops->ooboffs >= len)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read " +					"outside oob\n", __func__); +		return -EINVAL; +	} + +	/* Do not allow reads past end of device */ +	if (unlikely(from >= mtd->size || +		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - +					(from >> chip->page_shift)) * len)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end " +					"of device\n", __func__); +		return -EINVAL; +	} + +	chipnr = (int)(from >> chip->chip_shift); +	chip->select_chip(mtd, chipnr); + +	/* Shift to get page */ +	realpage = (int)(from >> chip->page_shift); +	page = realpage & chip->pagemask; + +	while (1) { +		WATCHDOG_RESET(); +		if (ops->mode == MTD_OPS_RAW) +			ret = chip->ecc.read_oob_raw(mtd, chip, page); +		else +			ret = chip->ecc.read_oob(mtd, chip, page); + +		if (ret < 0) +			break; + +		len = min(len, readlen); +		buf = nand_transfer_oob(chip, buf, ops, len); + +		readlen -= len; +		if (!readlen) +			break; + +		/* Increment page address */ +		realpage++; + +		page = realpage & chip->pagemask; +		/* Check, if we cross a chip boundary */ +		if (!page) { +			chipnr++; +			chip->select_chip(mtd, -1); +			chip->select_chip(mtd, chipnr); +		} +	} + +	ops->oobretlen = ops->ooblen - readlen; + +	if (ret < 0) +		return ret; + +	if (mtd->ecc_stats.failed - stats.failed) +		return -EBADMSG; + +	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; +} + +/** + * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band + * @mtd: MTD device structure + * @from: offset to read from + * @ops: oob operation description structure + * + * NAND read data and/or out-of-band data. + */ +static int nand_read_oob(struct mtd_info *mtd, loff_t from, +			 struct mtd_oob_ops *ops) +{ +	struct nand_chip *chip = mtd->priv; +	int ret = -ENOTSUPP; + +	ops->retlen = 0; + +	/* Do not allow reads past end of device */ +	if (ops->datbuf && (from + ops->len) > mtd->size) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read " +				"beyond end of device\n", __func__); +		return -EINVAL; +	} + +	nand_get_device(chip, mtd, FL_READING); + +	switch (ops->mode) { +	case MTD_OPS_PLACE_OOB: +	case MTD_OPS_AUTO_OOB: +	case MTD_OPS_RAW: +		break; + +	default: +		goto out; +	} + +	if (!ops->datbuf) +		ret = nand_do_read_oob(mtd, from, ops); +	else +		ret = nand_do_read_ops(mtd, from, ops); + +out: +	nand_release_device(mtd); +	return ret; +} + + +/** + * nand_write_page_raw - [INTERN] raw page write function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * + * Not for syndrome calculating ECC controllers, which use a special oob layout. + */ +static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, +				const uint8_t *buf, int oob_required) +{ +	chip->write_buf(mtd, buf, mtd->writesize); +	if (oob_required) +		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + +	return 0; +} + +/** + * nand_write_page_raw_syndrome - [INTERN] raw page write function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * + * We need a special oob layout and handling even when ECC isn't checked. + */ +static int nand_write_page_raw_syndrome(struct mtd_info *mtd, +					struct nand_chip *chip, +					const uint8_t *buf, int oob_required) +{ +	int eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	uint8_t *oob = chip->oob_poi; +	int steps, size; + +	for (steps = chip->ecc.steps; steps > 0; steps--) { +		chip->write_buf(mtd, buf, eccsize); +		buf += eccsize; + +		if (chip->ecc.prepad) { +			chip->write_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		chip->read_buf(mtd, oob, eccbytes); +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->write_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	size = mtd->oobsize - (oob - chip->oob_poi); +	if (size) +		chip->write_buf(mtd, oob, size); + +	return 0; +} +/** + * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + */ +static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, +				  const uint8_t *buf, int oob_required) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *ecc_calc = chip->buffers->ecccalc; +	const uint8_t *p = buf; +	uint32_t *eccpos = chip->ecc.layout->eccpos; + +	/* Software ECC calculation */ +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) +		chip->ecc.calculate(mtd, p, &ecc_calc[i]); + +	for (i = 0; i < chip->ecc.total; i++) +		chip->oob_poi[eccpos[i]] = ecc_calc[i]; + +	return chip->ecc.write_page_raw(mtd, chip, buf, 1); +} + +/** + * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + */ +static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, +				  const uint8_t *buf, int oob_required) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *ecc_calc = chip->buffers->ecccalc; +	const uint8_t *p = buf; +	uint32_t *eccpos = chip->ecc.layout->eccpos; + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		chip->ecc.hwctl(mtd, NAND_ECC_WRITE); +		chip->write_buf(mtd, p, eccsize); +		chip->ecc.calculate(mtd, p, &ecc_calc[i]); +	} + +	for (i = 0; i < chip->ecc.total; i++) +		chip->oob_poi[eccpos[i]] = ecc_calc[i]; + +	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + +	return 0; +} + +/** + * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write + * @mtd: mtd info structure + * @chip: nand chip info structure + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * + * The hw generator calculates the error syndrome automatically. Therefore we + * need a special oob layout and handling. + */ +static int nand_write_page_syndrome(struct mtd_info *mtd, +				    struct nand_chip *chip, +				    const uint8_t *buf, int oob_required) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	const uint8_t *p = buf; +	uint8_t *oob = chip->oob_poi; + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + +		chip->ecc.hwctl(mtd, NAND_ECC_WRITE); +		chip->write_buf(mtd, p, eccsize); + +		if (chip->ecc.prepad) { +			chip->write_buf(mtd, oob, chip->ecc.prepad); +			oob += chip->ecc.prepad; +		} + +		chip->ecc.calculate(mtd, p, oob); +		chip->write_buf(mtd, oob, eccbytes); +		oob += eccbytes; + +		if (chip->ecc.postpad) { +			chip->write_buf(mtd, oob, chip->ecc.postpad); +			oob += chip->ecc.postpad; +		} +	} + +	/* Calculate remaining oob bytes */ +	i = mtd->oobsize - (oob - chip->oob_poi); +	if (i) +		chip->write_buf(mtd, oob, i); + +	return 0; +} + +/** + * nand_write_page - [REPLACEABLE] write one page + * @mtd: MTD device structure + * @chip: NAND chip descriptor + * @buf: the data to write + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * @cached: cached programming + * @raw: use _raw version of write_page + */ +static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, +			   const uint8_t *buf, int oob_required, int page, +			   int cached, int raw) +{ +	int status; + +	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); + +	if (unlikely(raw)) +		status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required); +	else +		status = chip->ecc.write_page(mtd, chip, buf, oob_required); + +	if (status < 0) +		return status; + +	/* +	 * Cached progamming disabled for now. Not sure if it's worth the +	 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s). +	 */ +	cached = 0; + +	if (!cached || !(chip->options & NAND_CACHEPRG)) { + +		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); +		status = chip->waitfunc(mtd, chip); +		/* +		 * See if operation failed and additional status checks are +		 * available. +		 */ +		if ((status & NAND_STATUS_FAIL) && (chip->errstat)) +			status = chip->errstat(mtd, chip, FL_WRITING, status, +					       page); + +		if (status & NAND_STATUS_FAIL) +			return -EIO; +	} else { +		chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1); +		status = chip->waitfunc(mtd, chip); +	} + +#ifdef CONFIG_MTD_NAND_VERIFY_WRITE +	/* Send command to read back the data */ +	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + +	if (chip->verify_buf(mtd, buf, mtd->writesize)) +		return -EIO; + +	/* Make sure the next page prog is preceded by a status read */ +	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); +#endif +	return 0; +} + +/** + * nand_fill_oob - [INTERN] Transfer client buffer to oob + * @mtd: MTD device structure + * @oob: oob data buffer + * @len: oob data write length + * @ops: oob ops structure + */ +static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len, +			      struct mtd_oob_ops *ops) +{ +	struct nand_chip *chip = mtd->priv; + +	/* +	 * Initialise to all 0xFF, to avoid the possibility of left over OOB +	 * data from a previous OOB read. +	 */ +	memset(chip->oob_poi, 0xff, mtd->oobsize); + +	switch (ops->mode) { + +	case MTD_OPS_PLACE_OOB: +	case MTD_OPS_RAW: +		memcpy(chip->oob_poi + ops->ooboffs, oob, len); +		return oob + len; + +	case MTD_OPS_AUTO_OOB: { +		struct nand_oobfree *free = chip->ecc.layout->oobfree; +		uint32_t boffs = 0, woffs = ops->ooboffs; +		size_t bytes = 0; + +		for (; free->length && len; free++, len -= bytes) { +			/* Write request not from offset 0? */ +			if (unlikely(woffs)) { +				if (woffs >= free->length) { +					woffs -= free->length; +					continue; +				} +				boffs = free->offset + woffs; +				bytes = min_t(size_t, len, +					      (free->length - woffs)); +				woffs = 0; +			} else { +				bytes = min_t(size_t, len, free->length); +				boffs = free->offset; +			} +			memcpy(chip->oob_poi + boffs, oob, bytes); +			oob += bytes; +		} +		return oob; +	} +	default: +		BUG(); +	} +	return NULL; +} + +#define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0) + +/** + * nand_do_write_ops - [INTERN] NAND write with ECC + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operations description structure + * + * NAND write with ECC. + */ +static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, +			     struct mtd_oob_ops *ops) +{ +	int chipnr, realpage, page, blockmask, column; +	struct nand_chip *chip = mtd->priv; +	uint32_t writelen = ops->len; + +	uint32_t oobwritelen = ops->ooblen; +	uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ? +				mtd->oobavail : mtd->oobsize; + +	uint8_t *oob = ops->oobbuf; +	uint8_t *buf = ops->datbuf; +	int ret, subpage; +	int oob_required = oob ? 1 : 0; + +	ops->retlen = 0; +	if (!writelen) +		return 0; + +	column = to & (mtd->writesize - 1); +	subpage = column || (writelen & (mtd->writesize - 1)); + +	if (subpage && oob) +		return -EINVAL; + +	chipnr = (int)(to >> chip->chip_shift); +	chip->select_chip(mtd, chipnr); + +	/* Check, if it is write protected */ +	if (nand_check_wp(mtd)) { +		printk (KERN_NOTICE "nand_do_write_ops: Device is write protected\n"); +		return -EIO; +	} + +	realpage = (int)(to >> chip->page_shift); +	page = realpage & chip->pagemask; +	blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; + +	/* Invalidate the page cache, when we write to the cached page */ +	if (to <= (chip->pagebuf << chip->page_shift) && +	    (chip->pagebuf << chip->page_shift) < (to + ops->len)) +		chip->pagebuf = -1; + +	/* Don't allow multipage oob writes with offset */ +	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) +		return -EINVAL; + +	while (1) { +		WATCHDOG_RESET(); + +		int bytes = mtd->writesize; +		int cached = writelen > bytes && page != blockmask; +		uint8_t *wbuf = buf; + +		/* Partial page write? */ +		if (unlikely(column || writelen < mtd->writesize)) { +			cached = 0; +			bytes = min_t(int, bytes - column, (int) writelen); +			chip->pagebuf = -1; +			memset(chip->buffers->databuf, 0xff, mtd->writesize); +			memcpy(&chip->buffers->databuf[column], buf, bytes); +			wbuf = chip->buffers->databuf; +		} + +		if (unlikely(oob)) { +			size_t len = min(oobwritelen, oobmaxlen); +			oob = nand_fill_oob(mtd, oob, len, ops); +			oobwritelen -= len; +		} else { +			/* We still need to erase leftover OOB data */ +			memset(chip->oob_poi, 0xff, mtd->oobsize); +		} + +		ret = chip->write_page(mtd, chip, wbuf, oob_required, page, +				       cached, (ops->mode == MTD_OPS_RAW)); +		if (ret) +			break; + +		writelen -= bytes; +		if (!writelen) +			break; + +		column = 0; +		buf += bytes; +		realpage++; + +		page = realpage & chip->pagemask; +		/* Check, if we cross a chip boundary */ +		if (!page) { +			chipnr++; +			chip->select_chip(mtd, -1); +			chip->select_chip(mtd, chipnr); +		} +	} + +	ops->retlen = ops->len - writelen; +	if (unlikely(oob)) +		ops->oobretlen = ops->ooblen; +	return ret; +} + +/** + * nand_write - [MTD Interface] NAND write with ECC + * @mtd: MTD device structure + * @to: offset to write to + * @len: number of bytes to write + * @retlen: pointer to variable to store the number of written bytes + * @buf: the data to write + * + * NAND write with ECC. + */ +static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, +			  size_t *retlen, const uint8_t *buf) +{ +	struct nand_chip *chip = mtd->priv; +	struct mtd_oob_ops ops; +	int ret; + +	nand_get_device(chip, mtd, FL_WRITING); +	ops.len = len; +	ops.datbuf = (uint8_t *)buf; +	ops.oobbuf = NULL; +	ops.mode = MTD_OPS_PLACE_OOB; +	ret = nand_do_write_ops(mtd, to, &ops); +	*retlen = ops.retlen; +	nand_release_device(mtd); +	return ret; +} + +/** + * nand_do_write_oob - [MTD Interface] NAND write out-of-band + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure + * + * NAND write out-of-band. + */ +static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, +			     struct mtd_oob_ops *ops) +{ +	int chipnr, page, status, len; +	struct nand_chip *chip = mtd->priv; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", +			 __func__, (unsigned int)to, (int)ops->ooblen); + +	if (ops->mode == MTD_OPS_AUTO_OOB) +		len = chip->ecc.layout->oobavail; +	else +		len = mtd->oobsize; + +	/* Do not allow write past end of page */ +	if ((ops->ooboffs + ops->ooblen) > len) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write " +				"past end of page\n", __func__); +		return -EINVAL; +	} + +	if (unlikely(ops->ooboffs >= len)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start " +				"write outside oob\n", __func__); +		return -EINVAL; +	} + +	/* Do not allow write past end of device */ +	if (unlikely(to >= mtd->size || +		     ops->ooboffs + ops->ooblen > +			((mtd->size >> chip->page_shift) - +			 (to >> chip->page_shift)) * len)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " +				"end of device\n", __func__); +		return -EINVAL; +	} + +	chipnr = (int)(to >> chip->chip_shift); +	chip->select_chip(mtd, chipnr); + +	/* Shift to get page */ +	page = (int)(to >> chip->page_shift); + +	/* +	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one +	 * of my DiskOnChip 2000 test units) will clear the whole data page too +	 * if we don't do this. I have no clue why, but I seem to have 'fixed' +	 * it in the doc2000 driver in August 1999.  dwmw2. +	 */ +	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + +	/* Check, if it is write protected */ +	if (nand_check_wp(mtd)) +		return -EROFS; + +	/* Invalidate the page cache, if we write to the cached page */ +	if (page == chip->pagebuf) +		chip->pagebuf = -1; + +	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops); + +	if (ops->mode == MTD_OPS_RAW) +		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask); +	else +		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); + +	if (status) +		return status; + +	ops->oobretlen = ops->ooblen; + +	return 0; +} + +/** + * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band + * @mtd: MTD device structure + * @to: offset to write to + * @ops: oob operation description structure + */ +static int nand_write_oob(struct mtd_info *mtd, loff_t to, +			  struct mtd_oob_ops *ops) +{ +	struct nand_chip *chip = mtd->priv; +	int ret = -ENOTSUPP; + +	ops->retlen = 0; + +	/* Do not allow writes past end of device */ +	if (ops->datbuf && (to + ops->len) > mtd->size) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " +				"end of device\n", __func__); +		return -EINVAL; +	} + +	nand_get_device(chip, mtd, FL_WRITING); + +	switch (ops->mode) { +	case MTD_OPS_PLACE_OOB: +	case MTD_OPS_AUTO_OOB: +	case MTD_OPS_RAW: +		break; + +	default: +		goto out; +	} + +	if (!ops->datbuf) +		ret = nand_do_write_oob(mtd, to, ops); +	else +		ret = nand_do_write_ops(mtd, to, ops); + +out: +	nand_release_device(mtd); +	return ret; +} + +/** + * single_erase_cmd - [GENERIC] NAND standard block erase command function + * @mtd: MTD device structure + * @page: the page address of the block which will be erased + * + * Standard erase command for NAND chips. + */ +static void single_erase_cmd(struct mtd_info *mtd, int page) +{ +	struct nand_chip *chip = mtd->priv; +	/* Send commands to erase a block */ +	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); +	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); +} + +/** + * multi_erase_cmd - [GENERIC] AND specific block erase command function + * @mtd: MTD device structure + * @page: the page address of the block which will be erased + * + * AND multi block erase command function. Erase 4 consecutive blocks. + */ +static void multi_erase_cmd(struct mtd_info *mtd, int page) +{ +	struct nand_chip *chip = mtd->priv; +	/* Send commands to erase a block */ +	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); +	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); +	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); +	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); +	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); +} + +/** + * nand_erase - [MTD Interface] erase block(s) + * @mtd: MTD device structure + * @instr: erase instruction + * + * Erase one ore more blocks. + */ +static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	return nand_erase_nand(mtd, instr, 0); +} + +#define BBT_PAGE_MASK	0xffffff3f +/** + * nand_erase_nand - [INTERN] erase block(s) + * @mtd: MTD device structure + * @instr: erase instruction + * @allowbbt: allow erasing the bbt area + * + * Erase one ore more blocks. + */ +int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, +		    int allowbbt) +{ +	int page, status, pages_per_block, ret, chipnr; +	struct nand_chip *chip = mtd->priv; +	loff_t rewrite_bbt[CONFIG_SYS_NAND_MAX_CHIPS] = {0}; +	unsigned int bbt_masked_page = 0xffffffff; +	loff_t len; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", +				__func__, (unsigned long long)instr->addr, +				(unsigned long long)instr->len); + +	if (check_offs_len(mtd, instr->addr, instr->len)) +		return -EINVAL; + +	/* Grab the lock and see if the device is available */ +	nand_get_device(chip, mtd, FL_ERASING); + +	/* Shift to get first page */ +	page = (int)(instr->addr >> chip->page_shift); +	chipnr = (int)(instr->addr >> chip->chip_shift); + +	/* Calculate pages in each block */ +	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); + +	/* Select the NAND device */ +	chip->select_chip(mtd, chipnr); + +	/* Check, if it is write protected */ +	if (nand_check_wp(mtd)) { +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", +					__func__); +		instr->state = MTD_ERASE_FAILED; +		goto erase_exit; +	} + +	/* +	 * If BBT requires refresh, set the BBT page mask to see if the BBT +	 * should be rewritten. Otherwise the mask is set to 0xffffffff which +	 * can not be matched. This is also done when the bbt is actually +	 * erased to avoid recursive updates. +	 */ +	if (chip->options & BBT_AUTO_REFRESH && !allowbbt) +		bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; + +	/* Loop through the pages */ +	len = instr->len; + +	instr->state = MTD_ERASING; + +	while (len) { +		WATCHDOG_RESET(); +		/* Check if we have a bad block, we do not erase bad blocks! */ +		if (!instr->scrub && nand_block_checkbad(mtd, ((loff_t) page) << +					chip->page_shift, 0, allowbbt)) { +			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n", +				   __func__, page); +			instr->state = MTD_ERASE_FAILED; +			goto erase_exit; +		} + +		/* +		 * Invalidate the page cache, if we erase the block which +		 * contains the current cached page. +		 */ +		if (page <= chip->pagebuf && chip->pagebuf < +		    (page + pages_per_block)) +			chip->pagebuf = -1; + +		chip->erase_cmd(mtd, page & chip->pagemask); + +		status = chip->waitfunc(mtd, chip); + +		/* +		 * See if operation failed and additional status checks are +		 * available +		 */ +		if ((status & NAND_STATUS_FAIL) && (chip->errstat)) +			status = chip->errstat(mtd, chip, FL_ERASING, +					       status, page); + +		/* See if block erase succeeded */ +		if (status & NAND_STATUS_FAIL) { +			MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, " +					"page 0x%08x\n", __func__, page); +			instr->state = MTD_ERASE_FAILED; +			instr->fail_addr = +				((loff_t)page << chip->page_shift); +			goto erase_exit; +		} + +		/* +		 * If BBT requires refresh, set the BBT rewrite flag to the +		 * page being erased. +		 */ +		if (bbt_masked_page != 0xffffffff && +		    (page & BBT_PAGE_MASK) == bbt_masked_page) +			rewrite_bbt[chipnr] = +				((loff_t)page << chip->page_shift); + +		/* Increment page address and decrement length */ +		len -= (1 << chip->phys_erase_shift); +		page += pages_per_block; + +		/* Check, if we cross a chip boundary */ +		if (len && !(page & chip->pagemask)) { +			chipnr++; +			chip->select_chip(mtd, -1); +			chip->select_chip(mtd, chipnr); + +			/* +			 * If BBT requires refresh and BBT-PERCHIP, set the BBT +			 * page mask to see if this BBT should be rewritten. +			 */ +			if (bbt_masked_page != 0xffffffff && +			    (chip->bbt_td->options & NAND_BBT_PERCHIP)) +				bbt_masked_page = chip->bbt_td->pages[chipnr] & +					BBT_PAGE_MASK; +		} +	} +	instr->state = MTD_ERASE_DONE; + +erase_exit: + +	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; + +	/* Deselect and wake up anyone waiting on the device */ +	nand_release_device(mtd); + +	/* Do call back function */ +	if (!ret) +		mtd_erase_callback(instr); + +	/* +	 * If BBT requires refresh and erase was successful, rewrite any +	 * selected bad block tables. +	 */ +	if (bbt_masked_page == 0xffffffff || ret) +		return ret; + +	for (chipnr = 0; chipnr < chip->numchips; chipnr++) { +		if (!rewrite_bbt[chipnr]) +			continue; +		/* Update the BBT for chip */ +		MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt " +			"(%d:0x%0llx 0x%0x)\n", __func__, chipnr, +			rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]); +		nand_update_bbt(mtd, rewrite_bbt[chipnr]); +	} + +	/* Return more or less happy */ +	return ret; +} + +/** + * nand_sync - [MTD Interface] sync + * @mtd: MTD device structure + * + * Sync is actually a wait for chip ready function. + */ +static void nand_sync(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); + +	/* Grab the lock and see if the device is available */ +	nand_get_device(chip, mtd, FL_SYNCING); +	/* Release it and go back */ +	nand_release_device(mtd); +} + +/** + * nand_block_isbad - [MTD Interface] Check if block at offset is bad + * @mtd: MTD device structure + * @offs: offset relative to mtd start + */ +static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) +{ +	return nand_block_checkbad(mtd, offs, 1, 0); +} + +/** + * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad + * @mtd: MTD device structure + * @ofs: offset relative to mtd start + */ +static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct nand_chip *chip = mtd->priv; +	int ret; + +	ret = nand_block_isbad(mtd, ofs); +	if (ret) { +		/* If it was bad already, return success and do nothing */ +		if (ret > 0) +			return 0; +		return ret; +	} + +	return chip->block_markbad(mtd, ofs); +} + + /** + * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand + * @mtd: MTD device structure + * @chip: nand chip info structure + * @addr: feature address. + * @subfeature_param: the subfeature parameters, a four bytes array. + */ +static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip, +			int addr, uint8_t *subfeature_param) +{ +	int status; + +	if (!chip->onfi_version) +		return -EINVAL; + +	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1); +	chip->write_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN); +	status = chip->waitfunc(mtd, chip); +	if (status & NAND_STATUS_FAIL) +		return -EIO; +	return 0; +} + +/** + * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand + * @mtd: MTD device structure + * @chip: nand chip info structure + * @addr: feature address. + * @subfeature_param: the subfeature parameters, a four bytes array. + */ +static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip, +			int addr, uint8_t *subfeature_param) +{ +	if (!chip->onfi_version) +		return -EINVAL; + +	/* clear the sub feature parameters */ +	memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN); + +	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1); +	chip->read_buf(mtd, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN); +	return 0; +} + +/* Set default functions */ +static void nand_set_defaults(struct nand_chip *chip, int busw) +{ +	/* check for proper chip_delay setup, set 20us if not */ +	if (!chip->chip_delay) +		chip->chip_delay = 20; + +	/* check, if a user supplied command function given */ +	if (chip->cmdfunc == NULL) +		chip->cmdfunc = nand_command; + +	/* check, if a user supplied wait function given */ +	if (chip->waitfunc == NULL) +		chip->waitfunc = nand_wait; + +	if (!chip->select_chip) +		chip->select_chip = nand_select_chip; +	if (!chip->read_byte) +		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte; +	if (!chip->read_word) +		chip->read_word = nand_read_word; +	if (!chip->block_bad) +		chip->block_bad = nand_block_bad; +	if (!chip->block_markbad) +		chip->block_markbad = nand_default_block_markbad; +	if (!chip->write_buf) +		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf; +	if (!chip->read_buf) +		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf; +	if (!chip->verify_buf) +		chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf; +	if (!chip->scan_bbt) +		chip->scan_bbt = nand_default_bbt; +	if (!chip->controller) +		chip->controller = &chip->hwcontrol; +} + +#ifdef CONFIG_SYS_NAND_ONFI_DETECTION +/* Sanitize ONFI strings so we can safely print them */ +static void sanitize_string(char *s, size_t len) +{ +	ssize_t i; + +	/* Null terminate */ +	s[len - 1] = 0; + +	/* Remove non printable chars */ +	for (i = 0; i < len - 1; i++) { +		if (s[i] < ' ' || s[i] > 127) +			s[i] = '?'; +	} + +	/* Remove trailing spaces */ +	strim(s); +} + +static u16 onfi_crc16(u16 crc, u8 const *p, size_t len) +{ +	int i; +	while (len--) { +		crc ^= *p++ << 8; +		for (i = 0; i < 8; i++) +			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0); +	} + +	return crc; +} + +/* + * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. + */ +static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, +					int *busw) +{ +	struct nand_onfi_params *p = &chip->onfi_params; +	int i; +	int val; + +	/* Try ONFI for unknown chip or LP */ +	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1); +	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' || +		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') +		return 0; + +	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); +	for (i = 0; i < 3; i++) { +		chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); +		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) == +				le16_to_cpu(p->crc)) { +			pr_info("ONFI param page %d valid\n", i); +			break; +		} +	} + +	if (i == 3) +		return 0; + +	/* Check version */ +	val = le16_to_cpu(p->revision); +	if (val & (1 << 5)) +		chip->onfi_version = 23; +	else if (val & (1 << 4)) +		chip->onfi_version = 22; +	else if (val & (1 << 3)) +		chip->onfi_version = 21; +	else if (val & (1 << 2)) +		chip->onfi_version = 20; +	else if (val & (1 << 1)) +		chip->onfi_version = 10; +	else +		chip->onfi_version = 0; + +	if (!chip->onfi_version) { +		pr_info("%s: unsupported ONFI version: %d\n", __func__, val); +		return 0; +	} + +	sanitize_string(p->manufacturer, sizeof(p->manufacturer)); +	sanitize_string(p->model, sizeof(p->model)); +	if (!mtd->name) +		mtd->name = p->model; +	mtd->writesize = le32_to_cpu(p->byte_per_page); +	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; +	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); +	chip->chipsize = le32_to_cpu(p->blocks_per_lun); +	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count; +	*busw = 0; +	if (le16_to_cpu(p->features) & 1) +		*busw = NAND_BUSWIDTH_16; + +	pr_info("ONFI flash detected\n"); +	return 1; +} +#else +static inline int nand_flash_detect_onfi(struct mtd_info *mtd, +					struct nand_chip *chip, +					int *busw) +{ +	return 0; +} +#endif + +/* + * nand_id_has_period - Check if an ID string has a given wraparound period + * @id_data: the ID string + * @arrlen: the length of the @id_data array + * @period: the period of repitition + * + * Check if an ID string is repeated within a given sequence of bytes at + * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a + * period of 2). This is a helper function for nand_id_len(). Returns non-zero + * if the repetition has a period of @period; otherwise, returns zero. + */ +static int nand_id_has_period(u8 *id_data, int arrlen, int period) +{ +	int i, j; +	for (i = 0; i < period; i++) +		for (j = i + period; j < arrlen; j += period) +			if (id_data[i] != id_data[j]) +				return 0; +	return 1; +} + +/* + * nand_id_len - Get the length of an ID string returned by CMD_READID + * @id_data: the ID string + * @arrlen: the length of the @id_data array + + * Returns the length of the ID string, according to known wraparound/trailing + * zero patterns. If no pattern exists, returns the length of the array. + */ +static int nand_id_len(u8 *id_data, int arrlen) +{ +	int last_nonzero, period; + +	/* Find last non-zero byte */ +	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--) +		if (id_data[last_nonzero]) +			break; + +	/* All zeros */ +	if (last_nonzero < 0) +		return 0; + +	/* Calculate wraparound period */ +	for (period = 1; period < arrlen; period++) +		if (nand_id_has_period(id_data, arrlen, period)) +			break; + +	/* There's a repeated pattern */ +	if (period < arrlen) +		return period; + +	/* There are trailing zeros */ +	if (last_nonzero < arrlen - 1) +		return last_nonzero + 1; + +	/* No pattern detected */ +	return arrlen; +} + +/* + * Many new NAND share similar device ID codes, which represent the size of the + * chip. The rest of the parameters must be decoded according to generic or + * manufacturer-specific "extended ID" decoding patterns. + */ +static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip, +				u8 id_data[8], int *busw) +{ +	int extid, id_len; +	/* The 3rd id byte holds MLC / multichip data */ +	chip->cellinfo = id_data[2]; +	/* The 4th id byte is the important one */ +	extid = id_data[3]; + +	id_len = nand_id_len(id_data, 8); + +	/* +	 * Field definitions are in the following datasheets: +	 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32) +	 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) +	 * Hynix MLC   (6 byte ID): Hynix H27UBG8T2B (p.22) +	 * +	 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung +	 * ID to decide what to do. +	 */ +	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG && +			(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && +			id_data[5] != 0x00) { +		/* Calc pagesize */ +		mtd->writesize = 2048 << (extid & 0x03); +		extid >>= 2; +		/* Calc oobsize */ +		switch (((extid >> 2) & 0x04) | (extid & 0x03)) { +		case 1: +			mtd->oobsize = 128; +			break; +		case 2: +			mtd->oobsize = 218; +			break; +		case 3: +			mtd->oobsize = 400; +			break; +		case 4: +			mtd->oobsize = 436; +			break; +		case 5: +			mtd->oobsize = 512; +			break; +		case 6: +		default: /* Other cases are "reserved" (unknown) */ +			mtd->oobsize = 640; +			break; +		} +		extid >>= 2; +		/* Calc blocksize */ +		mtd->erasesize = (128 * 1024) << +			(((extid >> 1) & 0x04) | (extid & 0x03)); +		*busw = 0; +	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX && +			(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { +		unsigned int tmp; + +		/* Calc pagesize */ +		mtd->writesize = 2048 << (extid & 0x03); +		extid >>= 2; +		/* Calc oobsize */ +		switch (((extid >> 2) & 0x04) | (extid & 0x03)) { +		case 0: +			mtd->oobsize = 128; +			break; +		case 1: +			mtd->oobsize = 224; +			break; +		case 2: +			mtd->oobsize = 448; +			break; +		case 3: +			mtd->oobsize = 64; +			break; +		case 4: +			mtd->oobsize = 32; +			break; +		case 5: +			mtd->oobsize = 16; +			break; +		default: +			mtd->oobsize = 640; +			break; +		} +		extid >>= 2; +		/* Calc blocksize */ +		tmp = ((extid >> 1) & 0x04) | (extid & 0x03); +		if (tmp < 0x03) +			mtd->erasesize = (128 * 1024) << tmp; +		else if (tmp == 0x03) +			mtd->erasesize = 768 * 1024; +		else +			mtd->erasesize = (64 * 1024) << tmp; +		*busw = 0; +	} else { +		/* Calc pagesize */ +		mtd->writesize = 1024 << (extid & 0x03); +		extid >>= 2; +		/* Calc oobsize */ +		mtd->oobsize = (8 << (extid & 0x01)) * +			(mtd->writesize >> 9); +		extid >>= 2; +		/* Calc blocksize. Blocksize is multiples of 64KiB */ +		mtd->erasesize = (64 * 1024) << (extid & 0x03); +		extid >>= 2; +		/* Get buswidth information */ +		*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0; +	} +} + + /* + * Old devices have chip data hardcoded in the device ID table. nand_decode_id + * decodes a matching ID table entry and assigns the MTD size parameters for + * the chip. + */ +static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip, +				const struct nand_flash_dev *type, u8 id_data[8], +				int *busw) +{ +	int maf_id = id_data[0]; + +	mtd->erasesize = type->erasesize; +	mtd->writesize = type->pagesize; +	mtd->oobsize = mtd->writesize / 32; +	*busw = type->options & NAND_BUSWIDTH_16; + +	/* +	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since +	 * some Spansion chips have erasesize that conflicts with size +	 * listed in nand_ids table. +	 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39) +	 */ +	if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00 +			&& id_data[6] == 0x00 && id_data[7] == 0x00 +			&& mtd->writesize == 512) { +		mtd->erasesize = 128 * 1024; +		mtd->erasesize <<= ((id_data[3] & 0x03) << 1); +	} +} + + /* + * Set the bad block marker/indicator (BBM/BBI) patterns according to some + * heuristic patterns using various detected parameters (e.g., manufacturer, + * page size, cell-type information). + */ +static void nand_decode_bbm_options(struct mtd_info *mtd, +				    struct nand_chip *chip, u8 id_data[8]) +{ +	int maf_id = id_data[0]; + +	/* Set the bad block position */ +	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16)) +		chip->badblockpos = NAND_LARGE_BADBLOCK_POS; +	else +		chip->badblockpos = NAND_SMALL_BADBLOCK_POS; + +	/* +	 * Bad block marker is stored in the last page of each block on Samsung +	 * and Hynix MLC devices; stored in first two pages of each block on +	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba, +	 * AMD/Spansion, and Macronix.  All others scan only the first page. +	 */ +	if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) && +			(maf_id == NAND_MFR_SAMSUNG || +			 maf_id == NAND_MFR_HYNIX)) +		chip->bbt_options |= NAND_BBT_SCANLASTPAGE; +	else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) && +				(maf_id == NAND_MFR_SAMSUNG || +				 maf_id == NAND_MFR_HYNIX || +				 maf_id == NAND_MFR_TOSHIBA || +				 maf_id == NAND_MFR_AMD || +				 maf_id == NAND_MFR_MACRONIX)) || +			(mtd->writesize == 2048 && +			 maf_id == NAND_MFR_MICRON)) +		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; +} + +/* + * Get the flash and manufacturer id and lookup if the type is supported. + */ +static const struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, +						  struct nand_chip *chip, +						  int busw, +						  int *maf_id, int *dev_id, +						  const struct nand_flash_dev *type) +{ +	const char *name; +	int i, maf_idx; +	u8 id_data[8]; + +	/* Select the device */ +	chip->select_chip(mtd, 0); + +	/* +	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) +	 * after power-up. +	 */ +	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); + +	/* Send the command for reading device ID */ +	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); + +	/* Read manufacturer and device IDs */ +	*maf_id = chip->read_byte(mtd); +	*dev_id = chip->read_byte(mtd); + +	/* +	 * Try again to make sure, as some systems the bus-hold or other +	 * interface concerns can cause random data which looks like a +	 * possibly credible NAND flash to appear. If the two results do +	 * not match, ignore the device completely. +	 */ + +	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); + +	/* Read entire ID string */ +	for (i = 0; i < 8; i++) +		id_data[i] = chip->read_byte(mtd); + +	if (id_data[0] != *maf_id || id_data[1] != *dev_id) { +		pr_info("%s: second ID read did not match " +			"%02x,%02x against %02x,%02x\n", __func__, +			*maf_id, *dev_id, id_data[0], id_data[1]); +		return ERR_PTR(-ENODEV); +	} + +	if (!type) +		type = nand_flash_ids; + +	for (; type->name != NULL; type++) +		if (*dev_id == type->id) +			break; + +	chip->onfi_version = 0; +	if (!type->name || !type->pagesize) { +		/* Check is chip is ONFI compliant */ +		if (nand_flash_detect_onfi(mtd, chip, &busw)) +			goto ident_done; +	} + +	if (!type->name) +		return ERR_PTR(-ENODEV); + +	if (!mtd->name) +		mtd->name = type->name; + +	chip->chipsize = (uint64_t)type->chipsize << 20; + +	if (!type->pagesize && chip->init_size) { +		/* Set the pagesize, oobsize, erasesize by the driver */ +		busw = chip->init_size(mtd, chip, id_data); +	} else if (!type->pagesize) { +		/* Decode parameters from extended ID */ +		nand_decode_ext_id(mtd, chip, id_data, &busw); +	} else { +		nand_decode_id(mtd, chip, type, id_data, &busw); +	} +	/* Get chip options, preserve non chip based options */ +	chip->options |= type->options; + +	/* +	 * Check if chip is not a Samsung device. Do not clear the +	 * options for chips which do not have an extended id. +	 */ +	if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize) +		chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; +ident_done: + +	/* Try to identify manufacturer */ +	for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { +		if (nand_manuf_ids[maf_idx].id == *maf_id) +			break; +	} + +	/* +	 * Check, if buswidth is correct. Hardware drivers should set +	 * chip correct! +	 */ +	if (busw != (chip->options & NAND_BUSWIDTH_16)) { +		pr_info("NAND device: Manufacturer ID:" +			" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, +			*dev_id, nand_manuf_ids[maf_idx].name, mtd->name); +		pr_warn("NAND bus width %d instead %d bit\n", +			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8, +			   busw ? 16 : 8); +		return ERR_PTR(-EINVAL); +	} + +	nand_decode_bbm_options(mtd, chip, id_data); + +	/* Calculate the address shift from the page size */ +	chip->page_shift = ffs(mtd->writesize) - 1; +	/* Convert chipsize to number of pages per chip -1 */ +	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; + +	chip->bbt_erase_shift = chip->phys_erase_shift = +		ffs(mtd->erasesize) - 1; +	if (chip->chipsize & 0xffffffff) +		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1; +	else { +		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32)); +		chip->chip_shift += 32 - 1; +	} + +	chip->badblockbits = 8; + +	/* Check for AND chips with 4 page planes */ +	if (chip->options & NAND_4PAGE_ARRAY) +		chip->erase_cmd = multi_erase_cmd; +	else +		chip->erase_cmd = single_erase_cmd; + +	/* Do not replace user supplied command function! */ +	if (mtd->writesize > 512 && chip->cmdfunc == nand_command) +		chip->cmdfunc = nand_command_lp; + +	name = type->name; +#ifdef CONFIG_SYS_NAND_ONFI_DETECTION +	if (chip->onfi_version) +		name = chip->onfi_params.model; +#endif +	pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s)," +		" page size: %d, OOB size: %d\n", +		*maf_id, *dev_id, nand_manuf_ids[maf_idx].name, +		name, +		mtd->writesize, mtd->oobsize); + +	return type; +} + +/** + * nand_scan_ident - [NAND Interface] Scan for the NAND device + * @mtd: MTD device structure + * @maxchips: number of chips to scan for + * @table: alternative NAND ID table + * + * This is the first phase of the normal nand_scan() function. It reads the + * flash ID and sets up MTD fields accordingly. + * + * The mtd->owner field must be set to the module of the caller. + */ +int nand_scan_ident(struct mtd_info *mtd, int maxchips, +		    const struct nand_flash_dev *table) +{ +	int i, busw, nand_maf_id, nand_dev_id; +	struct nand_chip *chip = mtd->priv; +	const struct nand_flash_dev *type; + +	/* Get buswidth to select the correct functions */ +	busw = chip->options & NAND_BUSWIDTH_16; +	/* Set the default functions */ +	nand_set_defaults(chip, busw); + +	/* Read the flash type */ +	type = nand_get_flash_type(mtd, chip, busw, +				&nand_maf_id, &nand_dev_id, table); + +	if (IS_ERR(type)) { +#ifndef CONFIG_SYS_NAND_QUIET_TEST +		pr_warn("No NAND device found\n"); +#endif +		chip->select_chip(mtd, -1); +		return PTR_ERR(type); +	} + +	/* Check for a chip array */ +	for (i = 1; i < maxchips; i++) { +		chip->select_chip(mtd, i); +		/* See comment in nand_get_flash_type for reset */ +		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); +		/* Send the command for reading device ID */ +		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); +		/* Read manufacturer and device IDs */ +		if (nand_maf_id != chip->read_byte(mtd) || +		    nand_dev_id != chip->read_byte(mtd)) +			break; +	} +#ifdef DEBUG +	if (i > 1) +		pr_info("%d NAND chips detected\n", i); +#endif + +	/* Store the number of chips and calc total size for mtd */ +	chip->numchips = i; +	mtd->size = i * chip->chipsize; + +	return 0; +} + + +/** + * nand_scan_tail - [NAND Interface] Scan for the NAND device + * @mtd: MTD device structure + * + * This is the second phase of the normal nand_scan() function. It fills out + * all the uninitialized function pointers with the defaults and scans for a + * bad block table if appropriate. + */ +int nand_scan_tail(struct mtd_info *mtd) +{ +	int i; +	struct nand_chip *chip = mtd->priv; + +	/* New bad blocks should be marked in OOB, flash-based BBT, or both */ +	BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && +			!(chip->bbt_options & NAND_BBT_USE_FLASH)); + +	if (!(chip->options & NAND_OWN_BUFFERS)) +		chip->buffers = memalign(ARCH_DMA_MINALIGN, +					 sizeof(*chip->buffers)); +	if (!chip->buffers) +		return -ENOMEM; + +	/* Set the internal oob buffer location, just after the page data */ +	chip->oob_poi = chip->buffers->databuf + mtd->writesize; + +	/* +	 * If no default placement scheme is given, select an appropriate one. +	 */ +	if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { +		switch (mtd->oobsize) { +		case 8: +			chip->ecc.layout = &nand_oob_8; +			break; +		case 16: +			chip->ecc.layout = &nand_oob_16; +			break; +		case 64: +			chip->ecc.layout = &nand_oob_64; +			break; +		case 128: +			chip->ecc.layout = &nand_oob_128; +			break; +		default: +			pr_warn("No oob scheme defined for oobsize %d\n", +				   mtd->oobsize); +		} +	} + +	if (!chip->write_page) +		chip->write_page = nand_write_page; + +	/* set for ONFI nand */ +	if (!chip->onfi_set_features) +		chip->onfi_set_features = nand_onfi_set_features; +	if (!chip->onfi_get_features) +		chip->onfi_get_features = nand_onfi_get_features; + +	/* +	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is +	 * selected and we have 256 byte pagesize fallback to software ECC +	 */ + +	switch (chip->ecc.mode) { +	case NAND_ECC_HW_OOB_FIRST: +		/* Similar to NAND_ECC_HW, but a separate read_page handle */ +		if (!chip->ecc.calculate || !chip->ecc.correct || +		     !chip->ecc.hwctl) { +			pr_warn("No ECC functions supplied; " +				   "hardware ECC not possible\n"); +			BUG(); +		} +		if (!chip->ecc.read_page) +			chip->ecc.read_page = nand_read_page_hwecc_oob_first; + +	case NAND_ECC_HW: +		/* Use standard hwecc read page function? */ +		if (!chip->ecc.read_page) +			chip->ecc.read_page = nand_read_page_hwecc; +		if (!chip->ecc.write_page) +			chip->ecc.write_page = nand_write_page_hwecc; +		if (!chip->ecc.read_page_raw) +			chip->ecc.read_page_raw = nand_read_page_raw; +		if (!chip->ecc.write_page_raw) +			chip->ecc.write_page_raw = nand_write_page_raw; +		if (!chip->ecc.read_oob) +			chip->ecc.read_oob = nand_read_oob_std; +		if (!chip->ecc.write_oob) +			chip->ecc.write_oob = nand_write_oob_std; + +	case NAND_ECC_HW_SYNDROME: +		if ((!chip->ecc.calculate || !chip->ecc.correct || +		     !chip->ecc.hwctl) && +		    (!chip->ecc.read_page || +		     chip->ecc.read_page == nand_read_page_hwecc || +		     !chip->ecc.write_page || +		     chip->ecc.write_page == nand_write_page_hwecc)) { +			pr_warn("No ECC functions supplied; " +				   "hardware ECC not possible\n"); +			BUG(); +		} +		/* Use standard syndrome read/write page function? */ +		if (!chip->ecc.read_page) +			chip->ecc.read_page = nand_read_page_syndrome; +		if (!chip->ecc.write_page) +			chip->ecc.write_page = nand_write_page_syndrome; +		if (!chip->ecc.read_page_raw) +			chip->ecc.read_page_raw = nand_read_page_raw_syndrome; +		if (!chip->ecc.write_page_raw) +			chip->ecc.write_page_raw = nand_write_page_raw_syndrome; +		if (!chip->ecc.read_oob) +			chip->ecc.read_oob = nand_read_oob_syndrome; +		if (!chip->ecc.write_oob) +			chip->ecc.write_oob = nand_write_oob_syndrome; + +		if (mtd->writesize >= chip->ecc.size) { +			if (!chip->ecc.strength) { +				pr_warn("Driver must set ecc.strength when using hardware ECC\n"); +				BUG(); +			} +			break; +		} +		pr_warn("%d byte HW ECC not possible on " +			   "%d byte page size, fallback to SW ECC\n", +			   chip->ecc.size, mtd->writesize); +		chip->ecc.mode = NAND_ECC_SOFT; + +	case NAND_ECC_SOFT: +		chip->ecc.calculate = nand_calculate_ecc; +		chip->ecc.correct = nand_correct_data; +		chip->ecc.read_page = nand_read_page_swecc; +		chip->ecc.read_subpage = nand_read_subpage; +		chip->ecc.write_page = nand_write_page_swecc; +		chip->ecc.read_page_raw = nand_read_page_raw; +		chip->ecc.write_page_raw = nand_write_page_raw; +		chip->ecc.read_oob = nand_read_oob_std; +		chip->ecc.write_oob = nand_write_oob_std; +		if (!chip->ecc.size) +			chip->ecc.size = 256; +		chip->ecc.bytes = 3; +		chip->ecc.strength = 1; +		break; + +	case NAND_ECC_SOFT_BCH: +		if (!mtd_nand_has_bch()) { +			pr_warn("CONFIG_MTD_ECC_BCH not enabled\n"); +			return -EINVAL; +		} +		chip->ecc.calculate = nand_bch_calculate_ecc; +		chip->ecc.correct = nand_bch_correct_data; +		chip->ecc.read_page = nand_read_page_swecc; +		chip->ecc.read_subpage = nand_read_subpage; +		chip->ecc.write_page = nand_write_page_swecc; +		chip->ecc.read_page_raw = nand_read_page_raw; +		chip->ecc.write_page_raw = nand_write_page_raw; +		chip->ecc.read_oob = nand_read_oob_std; +		chip->ecc.write_oob = nand_write_oob_std; +		/* +		 * Board driver should supply ecc.size and ecc.bytes values to +		 * select how many bits are correctable; see nand_bch_init() +		 * for details. Otherwise, default to 4 bits for large page +		 * devices. +		 */ +		if (!chip->ecc.size && (mtd->oobsize >= 64)) { +			chip->ecc.size = 512; +			chip->ecc.bytes = 7; +		} +		chip->ecc.priv = nand_bch_init(mtd, +					       chip->ecc.size, +					       chip->ecc.bytes, +					       &chip->ecc.layout); +		if (!chip->ecc.priv) +			pr_warn("BCH ECC initialization failed!\n"); + 		chip->ecc.strength = +			chip->ecc.bytes * 8 / fls(8 * chip->ecc.size); +		break; + +	case NAND_ECC_NONE: +		pr_warn("NAND_ECC_NONE selected by board driver. " +			"This is not recommended !!\n"); +		chip->ecc.read_page = nand_read_page_raw; +		chip->ecc.write_page = nand_write_page_raw; +		chip->ecc.read_oob = nand_read_oob_std; +		chip->ecc.read_page_raw = nand_read_page_raw; +		chip->ecc.write_page_raw = nand_write_page_raw; +		chip->ecc.write_oob = nand_write_oob_std; +		chip->ecc.size = mtd->writesize; +		chip->ecc.bytes = 0; +		break; + +	default: +		pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode); +		BUG(); +	} + +	/* For many systems, the standard OOB write also works for raw */ +	if (!chip->ecc.read_oob_raw) +		chip->ecc.read_oob_raw = chip->ecc.read_oob; +	if (!chip->ecc.write_oob_raw) +		chip->ecc.write_oob_raw = chip->ecc.write_oob; + +	/* +	 * The number of bytes available for a client to place data into +	 * the out of band area. +	 */ +	chip->ecc.layout->oobavail = 0; +	for (i = 0; chip->ecc.layout->oobfree[i].length +			&& i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++) +		chip->ecc.layout->oobavail += +			chip->ecc.layout->oobfree[i].length; +	mtd->oobavail = chip->ecc.layout->oobavail; + +	/* +	 * Set the number of read / write steps for one page depending on ECC +	 * mode. +	 */ +	chip->ecc.steps = mtd->writesize / chip->ecc.size; +	if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { +		pr_warn("Invalid ECC parameters\n"); +		BUG(); +	} +	chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; + +	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */ +	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && +	    !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { +		switch (chip->ecc.steps) { +		case 2: +			mtd->subpage_sft = 1; +			break; +		case 4: +		case 8: +		case 16: +			mtd->subpage_sft = 2; +			break; +		} +	} +	chip->subpagesize = mtd->writesize >> mtd->subpage_sft; + +	/* Initialize state */ +	chip->state = FL_READY; + +	/* De-select the device */ +	chip->select_chip(mtd, -1); + +	/* Invalidate the pagebuffer reference */ +	chip->pagebuf = -1; + +	/* Large page NAND with SOFT_ECC should support subpage reads */ +	if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9)) +		chip->options |= NAND_SUBPAGE_READ; + +	/* Fill in remaining MTD driver data */ +	mtd->type = MTD_NANDFLASH; +	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : +						MTD_CAP_NANDFLASH; +	mtd->_erase = nand_erase; +	mtd->_point = NULL; +	mtd->_unpoint = NULL; +	mtd->_read = nand_read; +	mtd->_write = nand_write; +	mtd->_read_oob = nand_read_oob; +	mtd->_write_oob = nand_write_oob; +	mtd->_sync = nand_sync; +	mtd->_lock = NULL; +	mtd->_unlock = NULL; +	mtd->_block_isbad = nand_block_isbad; +	mtd->_block_markbad = nand_block_markbad; + +	/* propagate ecc info to mtd_info */ +	mtd->ecclayout = chip->ecc.layout; +	mtd->ecc_strength = chip->ecc.strength; +	/* +	 * Initialize bitflip_threshold to its default prior scan_bbt() call. +	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be +	 * properly set. +	 */ +	if (!mtd->bitflip_threshold) +		mtd->bitflip_threshold = mtd->ecc_strength; + +	/* Check, if we should skip the bad block table scan */ +	if (chip->options & NAND_SKIP_BBTSCAN) +		chip->options |= NAND_BBT_SCANNED; + +	return 0; +} + +/** + * nand_scan - [NAND Interface] Scan for the NAND device + * @mtd: MTD device structure + * @maxchips: number of chips to scan for + * + * This fills out all the uninitialized function pointers with the defaults. + * The flash ID is read and the mtd/chip structures are filled with the + * appropriate values. The mtd->owner field must be set to the module of the + * caller. + */ +int nand_scan(struct mtd_info *mtd, int maxchips) +{ +	int ret; + +	ret = nand_scan_ident(mtd, maxchips, NULL); +	if (!ret) +		ret = nand_scan_tail(mtd); +	return ret; +} + +/** + * nand_release - [NAND Interface] Free resources held by the NAND device + * @mtd: MTD device structure + */ +void nand_release(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; + +	if (chip->ecc.mode == NAND_ECC_SOFT_BCH) +		nand_bch_free((struct nand_bch_control *)chip->ecc.priv); + +#ifdef CONFIG_MTD_PARTITIONS +	/* Deregister partitions */ +	del_mtd_partitions(mtd); +#endif + +	/* Free bad block table memory */ +	kfree(chip->bbt); +	if (!(chip->options & NAND_OWN_BUFFERS)) +		kfree(chip->buffers); + +	/* Free bad block descriptor memory */ +	if (chip->badblock_pattern && chip->badblock_pattern->options +			& NAND_BBT_DYNAMICSTRUCT) +		kfree(chip->badblock_pattern); +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_bbt.c b/roms/u-boot/drivers/mtd/nand/nand_bbt.c new file mode 100644 index 00000000..8ef58451 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_bbt.c @@ -0,0 +1,1397 @@ +/* + *  drivers/mtd/nand_bbt.c + * + *  Overview: + *   Bad block table support for the NAND driver + * + *  Copyright © 2004 Thomas Gleixner (tglx@linutronix.de) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Description: + * + * When nand_scan_bbt is called, then it tries to find the bad block table + * depending on the options in the BBT descriptor(s). If no flash based BBT + * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory + * marked good / bad blocks. This information is used to create a memory BBT. + * Once a new bad block is discovered then the "factory" information is updated + * on the device. + * If a flash based BBT is specified then the function first tries to find the + * BBT on flash. If a BBT is found then the contents are read and the memory + * based BBT is created. If a mirrored BBT is selected then the mirror is + * searched too and the versions are compared. If the mirror has a greater + * version number, then the mirror BBT is used to build the memory based BBT. + * If the tables are not versioned, then we "or" the bad block information. + * If one of the BBTs is out of date or does not exist it is (re)created. + * If no BBT exists at all then the device is scanned for factory marked + * good / bad blocks and the bad block tables are created. + * + * For manufacturer created BBTs like the one found on M-SYS DOC devices + * the BBT is searched and read but never created + * + * The auto generated bad block table is located in the last good blocks + * of the device. The table is mirrored, so it can be updated eventually. + * The table is marked in the OOB area with an ident pattern and a version + * number which indicates which of both tables is more up to date. If the NAND + * controller needs the complete OOB area for the ECC information then the + * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of + * course): it moves the ident pattern and the version byte into the data area + * and the OOB area will remain untouched. + * + * The table uses 2 bits per block + * 11b:		block is good + * 00b:		block is factory marked bad + * 01b, 10b:	block is marked bad due to wear + * + * The memory bad block table uses the following scheme: + * 00b:		block is good + * 01b:		block is marked bad due to wear + * 10b:		block is reserved (to protect the bbt area) + * 11b:		block is factory marked bad + * + * Multichip devices like DOC store the bad block info per floor. + * + * Following assumptions are made: + * - bbts start at a page boundary, if autolocated on a block boundary + * - the space necessary for a bbt in FLASH does not exceed a block boundary + * + */ + +#include <common.h> +#include <malloc.h> +#include <linux/compat.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/bbm.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/bitops.h> +#include <linux/string.h> + +#include <asm/errno.h> + +static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td) +{ +	if (memcmp(buf, td->pattern, td->len)) +		return -1; +	return 0; +} + +/** + * check_pattern - [GENERIC] check if a pattern is in the buffer + * @buf: the buffer to search + * @len: the length of buffer to search + * @paglen: the pagelength + * @td: search pattern descriptor + * + * Check for a pattern at the given place. Used to search bad block tables and + * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if + * all bytes except the pattern area contain 0xff. + */ +static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) +{ +	int end = 0; +	uint8_t *p = buf; + +	if (td->options & NAND_BBT_NO_OOB) +		return check_pattern_no_oob(buf, td); + +	end = paglen + td->offs; +	if (td->options & NAND_BBT_SCANEMPTY) +		if (memchr_inv(p, 0xff, end)) +			return -1; +	p += end; + +	/* Compare the pattern */ +	if (memcmp(p, td->pattern, td->len)) +		return -1; + +	if (td->options & NAND_BBT_SCANEMPTY) { +		p += td->len; +		end += td->len; +		if (memchr_inv(p, 0xff, len - end)) +			return -1; +	} +	return 0; +} + +/** + * check_short_pattern - [GENERIC] check if a pattern is in the buffer + * @buf: the buffer to search + * @td:	search pattern descriptor + * + * Check for a pattern at the given place. Used to search bad block tables and + * good / bad block identifiers. Same as check_pattern, but no optional empty + * check. + */ +static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td) +{ +	/* Compare the pattern */ +	if (memcmp(buf + td->offs, td->pattern, td->len)) +		return -1; +	return 0; +} + +/** + * add_marker_len - compute the length of the marker in data area + * @td: BBT descriptor used for computation + * + * The length will be 0 if the marker is located in OOB area. + */ +static u32 add_marker_len(struct nand_bbt_descr *td) +{ +	u32 len; + +	if (!(td->options & NAND_BBT_NO_OOB)) +		return 0; + +	len = td->len; +	if (td->options & NAND_BBT_VERSION) +		len++; +	return len; +} + +/** + * read_bbt - [GENERIC] Read the bad block table starting from page + * @mtd: MTD device structure + * @buf: temporary buffer + * @page: the starting page + * @num: the number of bbt descriptors to read + * @td: the bbt describtion table + * @offs: offset in the memory table + * + * Read the bad block table starting from page. + */ +static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num, +		struct nand_bbt_descr *td, int offs) +{ +	int res, ret = 0, i, j, act = 0; +	struct nand_chip *this = mtd->priv; +	size_t retlen, len, totlen; +	loff_t from; +	int bits = td->options & NAND_BBT_NRBITS_MSK; +	uint8_t msk = (uint8_t)((1 << bits) - 1); +	u32 marker_len; +	int reserved_block_code = td->reserved_block_code; + +	totlen = (num * bits) >> 3; +	marker_len = add_marker_len(td); +	from = ((loff_t)page) << this->page_shift; + +	while (totlen) { +		len = min(totlen, (size_t)(1 << this->bbt_erase_shift)); +		if (marker_len) { +			/* +			 * In case the BBT marker is not in the OOB area it +			 * will be just in the first page. +			 */ +			len -= marker_len; +			from += marker_len; +			marker_len = 0; +		} +		res = mtd_read(mtd, from, len, &retlen, buf); +		if (res < 0) { +			if (mtd_is_eccerr(res)) { +				pr_info("nand_bbt: ECC error in BBT at " +					"0x%012llx\n", from & ~mtd->writesize); +				return res; +			} else if (mtd_is_bitflip(res)) { +				pr_info("nand_bbt: corrected error in BBT at " +					"0x%012llx\n", from & ~mtd->writesize); +				ret = res; +			} else { +				pr_info("nand_bbt: error reading BBT\n"); +				return res; +			} +		} + +		/* Analyse data */ +		for (i = 0; i < len; i++) { +			uint8_t dat = buf[i]; +			for (j = 0; j < 8; j += bits, act += 2) { +				uint8_t tmp = (dat >> j) & msk; +				if (tmp == msk) +					continue; +				if (reserved_block_code && (tmp == reserved_block_code)) { +					pr_info("nand_read_bbt: reserved block at 0x%012llx\n", +						 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift); +					this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); +					mtd->ecc_stats.bbtblocks++; +					continue; +				} +				pr_info("nand_read_bbt: Bad block at 0x%012llx\n", +					(loff_t)((offs << 2) + (act >> 1)) +					<< this->bbt_erase_shift); +				/* Factory marked bad or worn out? */ +				if (tmp == 0) +					this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); +				else +					this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06); +				mtd->ecc_stats.badblocks++; +			} +		} +		totlen -= len; +		from += len; +	} +	return ret; +} + +/** + * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @chip: read the table for a specific chip, -1 read all chips; applies only if + *        NAND_BBT_PERCHIP option is set + * + * Read the bad block table for all chips starting at a given page. We assume + * that the bbt bits are in consecutive order. + */ +static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) +{ +	struct nand_chip *this = mtd->priv; +	int res = 0, i; + +	if (td->options & NAND_BBT_PERCHIP) { +		int offs = 0; +		for (i = 0; i < this->numchips; i++) { +			if (chip == -1 || chip == i) +				res = read_bbt(mtd, buf, td->pages[i], +					this->chipsize >> this->bbt_erase_shift, +					td, offs); +			if (res) +				return res; +			offs += this->chipsize >> (this->bbt_erase_shift + 2); +		} +	} else { +		res = read_bbt(mtd, buf, td->pages[0], +				mtd->size >> this->bbt_erase_shift, td, 0); +		if (res) +			return res; +	} +	return 0; +} + +/* BBT marker is in the first page, no OOB */ +static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs, +			 struct nand_bbt_descr *td) +{ +	size_t retlen; +	size_t len; + +	len = td->len; +	if (td->options & NAND_BBT_VERSION) +		len++; + +	return mtd_read(mtd, offs, len, &retlen, buf); +} + +/** + * scan_read_oob - [GENERIC] Scan data+OOB region to buffer + * @mtd: MTD device structure + * @buf: temporary buffer + * @offs: offset at which to scan + * @len: length of data region to read + * + * Scan read data from data+OOB. May traverse multiple pages, interleaving + * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest" + * ECC condition (error or bitflip). May quit on the first (non-ECC) error. + */ +static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs, +			 size_t len) +{ +	struct mtd_oob_ops ops; +	int res, ret = 0; + +	ops.mode = MTD_OPS_PLACE_OOB; +	ops.ooboffs = 0; +	ops.ooblen = mtd->oobsize; + +	while (len > 0) { +		ops.datbuf = buf; +		ops.len = min(len, (size_t)mtd->writesize); +		ops.oobbuf = buf + ops.len; + +		res = mtd_read_oob(mtd, offs, &ops); +		if (res) { +			if (!mtd_is_bitflip_or_eccerr(res)) +				return res; +			else if (mtd_is_eccerr(res) || !ret) +				ret = res; +		} + +		buf += mtd->oobsize + mtd->writesize; +		len -= mtd->writesize; +		offs += mtd->writesize; +	} +	return ret; +} + +static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs, +			 size_t len, struct nand_bbt_descr *td) +{ +	if (td->options & NAND_BBT_NO_OOB) +		return scan_read_data(mtd, buf, offs, td); +	else +		return scan_read_oob(mtd, buf, offs, len); +} + +/* Scan write data with oob to flash */ +static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len, +			  uint8_t *buf, uint8_t *oob) +{ +	struct mtd_oob_ops ops; + +	ops.mode = MTD_OPS_PLACE_OOB; +	ops.ooboffs = 0; +	ops.ooblen = mtd->oobsize; +	ops.datbuf = buf; +	ops.oobbuf = oob; +	ops.len = len; + +	return mtd_write_oob(mtd, offs, &ops); +} + +static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td) +{ +	u32 ver_offs = td->veroffs; + +	if (!(td->options & NAND_BBT_NO_OOB)) +		ver_offs += mtd->writesize; +	return ver_offs; +} + +/** + * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @md:	descriptor for the bad block table mirror + * + * Read the bad block table(s) for all chips starting at a given page. We + * assume that the bbt bits are in consecutive order. + */ +static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf, +			  struct nand_bbt_descr *td, struct nand_bbt_descr *md) +{ +	struct nand_chip *this = mtd->priv; + +	/* Read the primary version, if available */ +	if (td->options & NAND_BBT_VERSION) { +		scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift, +			      mtd->writesize, td); +		td->version[0] = buf[bbt_get_ver_offs(mtd, td)]; +		pr_info("Bad block table at page %d, version 0x%02X\n", +			 td->pages[0], td->version[0]); +	} + +	/* Read the mirror version, if available */ +	if (md && (md->options & NAND_BBT_VERSION)) { +		scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift, +			      mtd->writesize, md); +		md->version[0] = buf[bbt_get_ver_offs(mtd, md)]; +		pr_info("Bad block table at page %d, version 0x%02X\n", +			 md->pages[0], md->version[0]); +	} +} + +/* Scan a given block full */ +static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd, +			   loff_t offs, uint8_t *buf, size_t readlen, +			   int scanlen, int numpages) +{ +	int ret, j; + +	ret = scan_read_oob(mtd, buf, offs, readlen); +	/* Ignore ECC errors when checking for BBM */ +	if (ret && !mtd_is_bitflip_or_eccerr(ret)) +		return ret; + +	for (j = 0; j < numpages; j++, buf += scanlen) { +		if (check_pattern(buf, scanlen, mtd->writesize, bd)) +			return 1; +	} +	return 0; +} + +/* Scan a given block partially */ +static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd, +			   loff_t offs, uint8_t *buf, int numpages) +{ +	struct mtd_oob_ops ops; +	int j, ret; + +	ops.ooblen = mtd->oobsize; +	ops.oobbuf = buf; +	ops.ooboffs = 0; +	ops.datbuf = NULL; +	ops.mode = MTD_OPS_PLACE_OOB; + +	for (j = 0; j < numpages; j++) { +		/* +		 * Read the full oob until read_oob is fixed to handle single +		 * byte reads for 16 bit buswidth. +		 */ +		ret = mtd_read_oob(mtd, offs, &ops); +		/* Ignore ECC errors when checking for BBM */ +		if (ret && !mtd_is_bitflip_or_eccerr(ret)) +			return ret; + +		if (check_short_pattern(buf, bd)) +			return 1; + +		offs += mtd->writesize; +	} +	return 0; +} + +/** + * create_bbt - [GENERIC] Create a bad block table by scanning the device + * @mtd: MTD device structure + * @buf: temporary buffer + * @bd: descriptor for the good/bad block search pattern + * @chip: create the table for a specific chip, -1 read all chips; applies only + *        if NAND_BBT_PERCHIP option is set + * + * Create a bad block table by scanning the device for the given good/bad block + * identify pattern. + */ +static int create_bbt(struct mtd_info *mtd, uint8_t *buf, +	struct nand_bbt_descr *bd, int chip) +{ +	struct nand_chip *this = mtd->priv; +	int i, numblocks, numpages, scanlen; +	int startblock; +	loff_t from; +	size_t readlen; + +	pr_info("Scanning device for bad blocks\n"); + +	if (bd->options & NAND_BBT_SCANALLPAGES) +		numpages = 1 << (this->bbt_erase_shift - this->page_shift); +	else if (bd->options & NAND_BBT_SCAN2NDPAGE) +		numpages = 2; +	else +		numpages = 1; + +	if (!(bd->options & NAND_BBT_SCANEMPTY)) { +		/* We need only read few bytes from the OOB area */ +		scanlen = 0; +		readlen = bd->len; +	} else { +		/* Full page content should be read */ +		scanlen = mtd->writesize + mtd->oobsize; +		readlen = numpages * mtd->writesize; +	} + +	if (chip == -1) { +		/* +		 * Note that numblocks is 2 * (real numblocks) here, see i+=2 +		 * below as it makes shifting and masking less painful +		 */ +		numblocks = mtd->size >> (this->bbt_erase_shift - 1); +		startblock = 0; +		from = 0; +	} else { +		if (chip >= this->numchips) { +			pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n", +			       chip + 1, this->numchips); +			return -EINVAL; +		} +		numblocks = this->chipsize >> (this->bbt_erase_shift - 1); +		startblock = chip * numblocks; +		numblocks += startblock; +		from = (loff_t)startblock << (this->bbt_erase_shift - 1); +	} + +	if (this->bbt_options & NAND_BBT_SCANLASTPAGE) +		from += mtd->erasesize - (mtd->writesize * numpages); + +	for (i = startblock; i < numblocks;) { +		int ret; + +		BUG_ON(bd->options & NAND_BBT_NO_OOB); + +		if (bd->options & NAND_BBT_SCANALLPAGES) +			ret = scan_block_full(mtd, bd, from, buf, readlen, +					      scanlen, numpages); +		else +			ret = scan_block_fast(mtd, bd, from, buf, numpages); + +		if (ret < 0) +			return ret; + +		if (ret) { +			this->bbt[i >> 3] |= 0x03 << (i & 0x6); +			pr_warn("Bad eraseblock %d at 0x%012llx\n", +				  i >> 1, (unsigned long long)from); +			mtd->ecc_stats.badblocks++; +		} + +		i += 2; +		from += (1 << this->bbt_erase_shift); +	} +	return 0; +} + +/** + * search_bbt - [GENERIC] scan the device for a specific bad block table + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * + * Read the bad block table by searching for a given ident pattern. Search is + * preformed either from the beginning up or from the end of the device + * downwards. The search starts always at the start of a block. If the option + * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains + * the bad block information of this chip. This is necessary to provide support + * for certain DOC devices. + * + * The bbt ident pattern resides in the oob area of the first page in a block. + */ +static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) +{ +	struct nand_chip *this = mtd->priv; +	int i, chips; +	int startblock, block, dir; +	int scanlen = mtd->writesize + mtd->oobsize; +	int bbtblocks; +	int blocktopage = this->bbt_erase_shift - this->page_shift; + +	/* Search direction top -> down? */ +	if (td->options & NAND_BBT_LASTBLOCK) { +		startblock = (mtd->size >> this->bbt_erase_shift) - 1; +		dir = -1; +	} else { +		startblock = 0; +		dir = 1; +	} + +	/* Do we have a bbt per chip? */ +	if (td->options & NAND_BBT_PERCHIP) { +		chips = this->numchips; +		bbtblocks = this->chipsize >> this->bbt_erase_shift; +		startblock &= bbtblocks - 1; +	} else { +		chips = 1; +		bbtblocks = mtd->size >> this->bbt_erase_shift; +	} + +	for (i = 0; i < chips; i++) { +		/* Reset version information */ +		td->version[i] = 0; +		td->pages[i] = -1; +		/* Scan the maximum number of blocks */ +		for (block = 0; block < td->maxblocks; block++) { + +			int actblock = startblock + dir * block; +			loff_t offs = (loff_t)actblock << this->bbt_erase_shift; + +			/* Read first page */ +			scan_read(mtd, buf, offs, mtd->writesize, td); +			if (!check_pattern(buf, scanlen, mtd->writesize, td)) { +				td->pages[i] = actblock << blocktopage; +				if (td->options & NAND_BBT_VERSION) { +					offs = bbt_get_ver_offs(mtd, td); +					td->version[i] = buf[offs]; +				} +				break; +			} +		} +		startblock += this->chipsize >> this->bbt_erase_shift; +	} +	/* Check, if we found a bbt for each requested chip */ +	for (i = 0; i < chips; i++) { +		if (td->pages[i] == -1) +			pr_warn("Bad block table not found for chip %d\n", i); +		else +			pr_info("Bad block table found at page %d, version 0x%02X\n", td->pages[i], +				td->version[i]); +	} +	return 0; +} + +/** + * search_read_bbts - [GENERIC] scan the device for bad block table(s) + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @md: descriptor for the bad block table mirror + * + * Search and read the bad block table(s). + */ +static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf, +			     struct nand_bbt_descr *td, +			     struct nand_bbt_descr *md) +{ +	/* Search the primary table */ +	search_bbt(mtd, buf, td); + +	/* Search the mirror table */ +	if (md) +		search_bbt(mtd, buf, md); +} + +/** + * write_bbt - [GENERIC] (Re)write the bad block table + * @mtd: MTD device structure + * @buf: temporary buffer + * @td: descriptor for the bad block table + * @md: descriptor for the bad block table mirror + * @chipsel: selector for a specific chip, -1 for all + * + * (Re)write the bad block table. + */ +static int write_bbt(struct mtd_info *mtd, uint8_t *buf, +		     struct nand_bbt_descr *td, struct nand_bbt_descr *md, +		     int chipsel) +{ +	struct nand_chip *this = mtd->priv; +	struct erase_info einfo; +	int i, j, res, chip = 0; +	int bits, startblock, dir, page, offs, numblocks, sft, sftmsk; +	int nrchips, bbtoffs, pageoffs, ooboffs; +	uint8_t msk[4]; +	uint8_t rcode = td->reserved_block_code; +	size_t retlen, len = 0; +	loff_t to; +	struct mtd_oob_ops ops; + +	ops.ooblen = mtd->oobsize; +	ops.ooboffs = 0; +	ops.datbuf = NULL; +	ops.mode = MTD_OPS_PLACE_OOB; + +	if (!rcode) +		rcode = 0xff; +	/* Write bad block table per chip rather than per device? */ +	if (td->options & NAND_BBT_PERCHIP) { +		numblocks = (int)(this->chipsize >> this->bbt_erase_shift); +		/* Full device write or specific chip? */ +		if (chipsel == -1) { +			nrchips = this->numchips; +		} else { +			nrchips = chipsel + 1; +			chip = chipsel; +		} +	} else { +		numblocks = (int)(mtd->size >> this->bbt_erase_shift); +		nrchips = 1; +	} + +	/* Loop through the chips */ +	for (; chip < nrchips; chip++) { +		/* +		 * There was already a version of the table, reuse the page +		 * This applies for absolute placement too, as we have the +		 * page nr. in td->pages. +		 */ +		if (td->pages[chip] != -1) { +			page = td->pages[chip]; +			goto write; +		} + +		/* +		 * Automatic placement of the bad block table. Search direction +		 * top -> down? +		 */ +		if (td->options & NAND_BBT_LASTBLOCK) { +			startblock = numblocks * (chip + 1) - 1; +			dir = -1; +		} else { +			startblock = chip * numblocks; +			dir = 1; +		} + +		for (i = 0; i < td->maxblocks; i++) { +			int block = startblock + dir * i; +			/* Check, if the block is bad */ +			switch ((this->bbt[block >> 2] >> +				 (2 * (block & 0x03))) & 0x03) { +			case 0x01: +			case 0x03: +				continue; +			} +			page = block << +				(this->bbt_erase_shift - this->page_shift); +			/* Check, if the block is used by the mirror table */ +			if (!md || md->pages[chip] != page) +				goto write; +		} +		pr_err("No space left to write bad block table\n"); +		return -ENOSPC; +	write: + +		/* Set up shift count and masks for the flash table */ +		bits = td->options & NAND_BBT_NRBITS_MSK; +		msk[2] = ~rcode; +		switch (bits) { +		case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01; +			msk[3] = 0x01; +			break; +		case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01; +			msk[3] = 0x03; +			break; +		case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C; +			msk[3] = 0x0f; +			break; +		case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F; +			msk[3] = 0xff; +			break; +		default: return -EINVAL; +		} + +		bbtoffs = chip * (numblocks >> 2); + +		to = ((loff_t)page) << this->page_shift; + +		/* Must we save the block contents? */ +		if (td->options & NAND_BBT_SAVECONTENT) { +			/* Make it block aligned */ +			to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1)); +			len = 1 << this->bbt_erase_shift; +			res = mtd_read(mtd, to, len, &retlen, buf); +			if (res < 0) { +				if (retlen != len) { +					pr_info("nand_bbt: error reading block " +						"for writing the bad block table\n"); +					return res; +				} +				pr_warn("nand_bbt: ECC error while reading " +					"block for writing bad block table\n"); +			} +			/* Read oob data */ +			ops.ooblen = (len >> this->page_shift) * mtd->oobsize; +			ops.oobbuf = &buf[len]; +			res = mtd_read_oob(mtd, to + mtd->writesize, &ops); +			if (res < 0 || ops.oobretlen != ops.ooblen) +				goto outerr; + +			/* Calc the byte offset in the buffer */ +			pageoffs = page - (int)(to >> this->page_shift); +			offs = pageoffs << this->page_shift; +			/* Preset the bbt area with 0xff */ +			memset(&buf[offs], 0xff, (size_t)(numblocks >> sft)); +			ooboffs = len + (pageoffs * mtd->oobsize); + +		} else if (td->options & NAND_BBT_NO_OOB) { +			ooboffs = 0; +			offs = td->len; +			/* The version byte */ +			if (td->options & NAND_BBT_VERSION) +				offs++; +			/* Calc length */ +			len = (size_t)(numblocks >> sft); +			len += offs; +			/* Make it page aligned! */ +			len = ALIGN(len, mtd->writesize); +			/* Preset the buffer with 0xff */ +			memset(buf, 0xff, len); +			/* Pattern is located at the begin of first page */ +			memcpy(buf, td->pattern, td->len); +		} else { +			/* Calc length */ +			len = (size_t)(numblocks >> sft); +			/* Make it page aligned! */ +			len = ALIGN(len, mtd->writesize); +			/* Preset the buffer with 0xff */ +			memset(buf, 0xff, len + +			       (len >> this->page_shift)* mtd->oobsize); +			offs = 0; +			ooboffs = len; +			/* Pattern is located in oob area of first page */ +			memcpy(&buf[ooboffs + td->offs], td->pattern, td->len); +		} + +		if (td->options & NAND_BBT_VERSION) +			buf[ooboffs + td->veroffs] = td->version[chip]; + +		/* Walk through the memory table */ +		for (i = 0; i < numblocks;) { +			uint8_t dat; +			dat = this->bbt[bbtoffs + (i >> 2)]; +			for (j = 0; j < 4; j++, i++) { +				int sftcnt = (i << (3 - sft)) & sftmsk; +				/* Do not store the reserved bbt blocks! */ +				buf[offs + (i >> sft)] &= +					~(msk[dat & 0x03] << sftcnt); +				dat >>= 2; +			} +		} + +		memset(&einfo, 0, sizeof(einfo)); +		einfo.mtd = mtd; +		einfo.addr = to; +		einfo.len = 1 << this->bbt_erase_shift; +		res = nand_erase_nand(mtd, &einfo, 1); +		if (res < 0) +			goto outerr; + +		res = scan_write_bbt(mtd, to, len, buf, +				td->options & NAND_BBT_NO_OOB ? NULL : +				&buf[len]); +		if (res < 0) +			goto outerr; + +		pr_info("Bad block table written to 0x%012llx, version 0x%02X\n", +			 (unsigned long long)to, td->version[chip]); + +		/* Mark it as used */ +		td->pages[chip] = page; +	} +	return 0; + + outerr: +	pr_warn("nand_bbt: error while writing bad block table %d\n", res); +	return res; +} + +/** + * nand_memory_bbt - [GENERIC] create a memory based bad block table + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern + * + * The function creates a memory based bbt by scanning the device for + * manufacturer / software marked good / bad blocks. + */ +static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) +{ +	struct nand_chip *this = mtd->priv; + +	bd->options &= ~NAND_BBT_SCANEMPTY; +	return create_bbt(mtd, this->buffers->databuf, bd, -1); +} + +/** + * check_create - [GENERIC] create and write bbt(s) if necessary + * @mtd: MTD device structure + * @buf: temporary buffer + * @bd: descriptor for the good/bad block search pattern + * + * The function checks the results of the previous call to read_bbt and creates + * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found + * for the chip/device. Update is necessary if one of the tables is missing or + * the version nr. of one table is less than the other. + */ +static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) +{ +	int i, chips, writeops, create, chipsel, res, res2; +	struct nand_chip *this = mtd->priv; +	struct nand_bbt_descr *td = this->bbt_td; +	struct nand_bbt_descr *md = this->bbt_md; +	struct nand_bbt_descr *rd, *rd2; + +	/* Do we have a bbt per chip? */ +	if (td->options & NAND_BBT_PERCHIP) +		chips = this->numchips; +	else +		chips = 1; + +	for (i = 0; i < chips; i++) { +		writeops = 0; +		create = 0; +		rd = NULL; +		rd2 = NULL; +		res = res2 = 0; +		/* Per chip or per device? */ +		chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1; +		/* Mirrored table available? */ +		if (md) { +			if (td->pages[i] == -1 && md->pages[i] == -1) { +				create = 1; +				writeops = 0x03; +			} else if (td->pages[i] == -1) { +				rd = md; +				writeops = 0x01; +			} else if (md->pages[i] == -1) { +				rd = td; +				writeops = 0x02; +			} else if (td->version[i] == md->version[i]) { +				rd = td; +				if (!(td->options & NAND_BBT_VERSION)) +					rd2 = md; +			} else if (((int8_t)(td->version[i] - md->version[i])) > 0) { +				rd = td; +				writeops = 0x02; +			} else { +				rd = md; +				writeops = 0x01; +			} +		} else { +			if (td->pages[i] == -1) { +				create = 1; +				writeops = 0x01; +			} else { +				rd = td; +			} +		} + +		if (create) { +			/* Create the bad block table by scanning the device? */ +			if (!(td->options & NAND_BBT_CREATE)) +				continue; + +			/* Create the table in memory by scanning the chip(s) */ +			if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY)) +				create_bbt(mtd, buf, bd, chipsel); + +			td->version[i] = 1; +			if (md) +				md->version[i] = 1; +		} + +		/* Read back first? */ +		if (rd) { +			res = read_abs_bbt(mtd, buf, rd, chipsel); +			if (mtd_is_eccerr(res)) { +				/* Mark table as invalid */ +				rd->pages[i] = -1; +				rd->version[i] = 0; +				i--; +				continue; +			} +		} +		/* If they weren't versioned, read both */ +		if (rd2) { +			res2 = read_abs_bbt(mtd, buf, rd2, chipsel); +			if (mtd_is_eccerr(res2)) { +				/* Mark table as invalid */ +				rd2->pages[i] = -1; +				rd2->version[i] = 0; +				i--; +				continue; +			} +		} + +		/* Scrub the flash table(s)? */ +		if (mtd_is_bitflip(res) || mtd_is_bitflip(res2)) +			writeops = 0x03; + +		/* Update version numbers before writing */ +		if (md) { +			td->version[i] = max(td->version[i], md->version[i]); +			md->version[i] = td->version[i]; +		} + +		/* Write the bad block table to the device? */ +		if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { +			res = write_bbt(mtd, buf, td, md, chipsel); +			if (res < 0) +				return res; +		} + +		/* Write the mirror bad block table to the device? */ +		if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { +			res = write_bbt(mtd, buf, md, td, chipsel); +			if (res < 0) +				return res; +		} +	} +	return 0; +} + +/** + * mark_bbt_regions - [GENERIC] mark the bad block table regions + * @mtd: MTD device structure + * @td: bad block table descriptor + * + * The bad block table regions are marked as "bad" to prevent accidental + * erasures / writes. The regions are identified by the mark 0x02. + */ +static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) +{ +	struct nand_chip *this = mtd->priv; +	int i, j, chips, block, nrblocks, update; +	uint8_t oldval, newval; + +	/* Do we have a bbt per chip? */ +	if (td->options & NAND_BBT_PERCHIP) { +		chips = this->numchips; +		nrblocks = (int)(this->chipsize >> this->bbt_erase_shift); +	} else { +		chips = 1; +		nrblocks = (int)(mtd->size >> this->bbt_erase_shift); +	} + +	for (i = 0; i < chips; i++) { +		if ((td->options & NAND_BBT_ABSPAGE) || +		    !(td->options & NAND_BBT_WRITE)) { +			if (td->pages[i] == -1) +				continue; +			block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift); +			block <<= 1; +			oldval = this->bbt[(block >> 3)]; +			newval = oldval | (0x2 << (block & 0x06)); +			this->bbt[(block >> 3)] = newval; +			if ((oldval != newval) && td->reserved_block_code) +				nand_update_bbt(mtd, (loff_t)block << (this->bbt_erase_shift - 1)); +			continue; +		} +		update = 0; +		if (td->options & NAND_BBT_LASTBLOCK) +			block = ((i + 1) * nrblocks) - td->maxblocks; +		else +			block = i * nrblocks; +		block <<= 1; +		for (j = 0; j < td->maxblocks; j++) { +			oldval = this->bbt[(block >> 3)]; +			newval = oldval | (0x2 << (block & 0x06)); +			this->bbt[(block >> 3)] = newval; +			if (oldval != newval) +				update = 1; +			block += 2; +		} +		/* +		 * If we want reserved blocks to be recorded to flash, and some +		 * new ones have been marked, then we need to update the stored +		 * bbts.  This should only happen once. +		 */ +		if (update && td->reserved_block_code) +			nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1)); +	} +} + +/** + * verify_bbt_descr - verify the bad block description + * @mtd: MTD device structure + * @bd: the table to verify + * + * This functions performs a few sanity checks on the bad block description + * table. + */ +static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) +{ +	struct nand_chip *this = mtd->priv; +	u32 pattern_len; +	u32 bits; +	u32 table_size; + +	if (!bd) +		return; + +	pattern_len = bd->len; +	bits = bd->options & NAND_BBT_NRBITS_MSK; + +	BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) && +			!(this->bbt_options & NAND_BBT_USE_FLASH)); +	BUG_ON(!bits); + +	if (bd->options & NAND_BBT_VERSION) +		pattern_len++; + +	if (bd->options & NAND_BBT_NO_OOB) { +		BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH)); +		BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB)); +		BUG_ON(bd->offs); +		if (bd->options & NAND_BBT_VERSION) +			BUG_ON(bd->veroffs != bd->len); +		BUG_ON(bd->options & NAND_BBT_SAVECONTENT); +	} + +	if (bd->options & NAND_BBT_PERCHIP) +		table_size = this->chipsize >> this->bbt_erase_shift; +	else +		table_size = mtd->size >> this->bbt_erase_shift; +	table_size >>= 3; +	table_size *= bits; +	if (bd->options & NAND_BBT_NO_OOB) +		table_size += pattern_len; +	BUG_ON(table_size > (1 << this->bbt_erase_shift)); +} + +/** + * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s) + * @mtd: MTD device structure + * @bd: descriptor for the good/bad block search pattern + * + * The function checks, if a bad block table(s) is/are already available. If + * not it scans the device for manufacturer marked good / bad blocks and writes + * the bad block table(s) to the selected place. + * + * The bad block table memory is allocated here. It must be freed by calling + * the nand_free_bbt function. + */ +int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) +{ +	struct nand_chip *this = mtd->priv; +	int len, res = 0; +	uint8_t *buf; +	struct nand_bbt_descr *td = this->bbt_td; +	struct nand_bbt_descr *md = this->bbt_md; + +	len = mtd->size >> (this->bbt_erase_shift + 2); +	/* +	 * Allocate memory (2bit per block) and clear the memory bad block +	 * table. +	 */ +	this->bbt = kzalloc(len, GFP_KERNEL); +	if (!this->bbt) +		return -ENOMEM; + +	/* +	 * If no primary table decriptor is given, scan the device to build a +	 * memory based bad block table. +	 */ +	if (!td) { +		if ((res = nand_memory_bbt(mtd, bd))) { +			pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n"); +			kfree(this->bbt); +			this->bbt = NULL; +		} +		return res; +	} +	verify_bbt_descr(mtd, td); +	verify_bbt_descr(mtd, md); + +	/* Allocate a temporary buffer for one eraseblock incl. oob */ +	len = (1 << this->bbt_erase_shift); +	len += (len >> this->page_shift) * mtd->oobsize; +	buf = vmalloc(len); +	if (!buf) { +		kfree(this->bbt); +		this->bbt = NULL; +		return -ENOMEM; +	} + +	/* Is the bbt at a given page? */ +	if (td->options & NAND_BBT_ABSPAGE) { +		read_abs_bbts(mtd, buf, td, md); +	} else { +		/* Search the bad block table using a pattern in oob */ +		search_read_bbts(mtd, buf, td, md); +	} + +	res = check_create(mtd, buf, bd); + +	/* Prevent the bbt regions from erasing / writing */ +	mark_bbt_region(mtd, td); +	if (md) +		mark_bbt_region(mtd, md); + +	vfree(buf); +	return res; +} + +/** + * nand_update_bbt - [NAND Interface] update bad block table(s) + * @mtd: MTD device structure + * @offs: the offset of the newly marked block + * + * The function updates the bad block table(s). + */ +int nand_update_bbt(struct mtd_info *mtd, loff_t offs) +{ +	struct nand_chip *this = mtd->priv; +	int len, res = 0; +	int chip, chipsel; +	uint8_t *buf; +	struct nand_bbt_descr *td = this->bbt_td; +	struct nand_bbt_descr *md = this->bbt_md; + +	if (!this->bbt || !td) +		return -EINVAL; + +	/* Allocate a temporary buffer for one eraseblock incl. oob */ +	len = (1 << this->bbt_erase_shift); +	len += (len >> this->page_shift) * mtd->oobsize; +	buf = kmalloc(len, GFP_KERNEL); +	if (!buf) +		return -ENOMEM; + +	/* Do we have a bbt per chip? */ +	if (td->options & NAND_BBT_PERCHIP) { +		chip = (int)(offs >> this->chip_shift); +		chipsel = chip; +	} else { +		chip = 0; +		chipsel = -1; +	} + +	td->version[chip]++; +	if (md) +		md->version[chip]++; + +	/* Write the bad block table to the device? */ +	if (td->options & NAND_BBT_WRITE) { +		res = write_bbt(mtd, buf, td, md, chipsel); +		if (res < 0) +			goto out; +	} +	/* Write the mirror bad block table to the device? */ +	if (md && (md->options & NAND_BBT_WRITE)) { +		res = write_bbt(mtd, buf, md, td, chipsel); +	} + + out: +	kfree(buf); +	return res; +} + +/* + * Define some generic bad / good block scan pattern which are used + * while scanning a device for factory marked good / bad blocks. + */ +static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; + +static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; + +static struct nand_bbt_descr agand_flashbased = { +	.options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, +	.offs = 0x20, +	.len = 6, +	.pattern = scan_agand_pattern +}; + +/* Generic flash bbt descriptors */ +static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; +static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; + +static struct nand_bbt_descr bbt_main_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE +		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, +	.offs =	8, +	.len = 4, +	.veroffs = 12, +	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS, +	.pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE +		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, +	.offs =	8, +	.len = 4, +	.veroffs = 12, +	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS, +	.pattern = mirror_pattern +}; + +static struct nand_bbt_descr bbt_main_no_oob_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE +		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP +		| NAND_BBT_NO_OOB, +	.len = 4, +	.veroffs = 4, +	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS, +	.pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_no_oob_descr = { +	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE +		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP +		| NAND_BBT_NO_OOB, +	.len = 4, +	.veroffs = 4, +	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS, +	.pattern = mirror_pattern +}; + +#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB) +/** + * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure + * @this: NAND chip to create descriptor for + * + * This function allocates and initializes a nand_bbt_descr for BBM detection + * based on the properties of @this. The new descriptor is stored in + * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when + * passed to this function. + */ +static int nand_create_badblock_pattern(struct nand_chip *this) +{ +	struct nand_bbt_descr *bd; +	if (this->badblock_pattern) { +		pr_warn("Bad block pattern already allocated; not replacing\n"); +		return -EINVAL; +	} +	bd = kzalloc(sizeof(*bd), GFP_KERNEL); +	if (!bd) +		return -ENOMEM; +	bd->options = this->bbt_options & BADBLOCK_SCAN_MASK; +	bd->offs = this->badblockpos; +	bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1; +	bd->pattern = scan_ff_pattern; +	bd->options |= NAND_BBT_DYNAMICSTRUCT; +	this->badblock_pattern = bd; +	return 0; +} + +/** + * nand_default_bbt - [NAND Interface] Select a default bad block table for the device + * @mtd: MTD device structure + * + * This function selects the default bad block table support for the device and + * calls the nand_scan_bbt function. + */ +int nand_default_bbt(struct mtd_info *mtd) +{ +	struct nand_chip *this = mtd->priv; + +	/* +	 * Default for AG-AND. We must use a flash based bad block table as the +	 * devices have factory marked _good_ blocks. Erasing those blocks +	 * leads to loss of the good / bad information, so we _must_ store this +	 * information in a good / bad table during startup. +	 */ +	if (this->options & NAND_IS_AND) { +		/* Use the default pattern descriptors */ +		if (!this->bbt_td) { +			this->bbt_td = &bbt_main_descr; +			this->bbt_md = &bbt_mirror_descr; +		} +		this->bbt_options |= NAND_BBT_USE_FLASH; +		return nand_scan_bbt(mtd, &agand_flashbased); +	} + +	/* Is a flash based bad block table requested? */ +	if (this->bbt_options & NAND_BBT_USE_FLASH) { +		/* Use the default pattern descriptors */ +		if (!this->bbt_td) { +			if (this->bbt_options & NAND_BBT_NO_OOB) { +				this->bbt_td = &bbt_main_no_oob_descr; +				this->bbt_md = &bbt_mirror_no_oob_descr; +			} else { +				this->bbt_td = &bbt_main_descr; +				this->bbt_md = &bbt_mirror_descr; +			} +		} +	} else { +		this->bbt_td = NULL; +		this->bbt_md = NULL; +	} + +	if (!this->badblock_pattern) +		nand_create_badblock_pattern(this); + +	return nand_scan_bbt(mtd, this->badblock_pattern); +} + +/** + * nand_isbad_bbt - [NAND Interface] Check if a block is bad + * @mtd: MTD device structure + * @offs: offset in the device + * @allowbbt: allow access to bad block table region + */ +int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) +{ +	struct nand_chip *this = mtd->priv; +	int block; +	uint8_t res; + +	/* Get block number * 2 */ +	block = (int)(offs >> (this->bbt_erase_shift - 1)); +	res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; + +	MTDDEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", +	      (unsigned int)offs, block >> 1, res); + +	switch ((int)res) { +	case 0x00: +		return 0; +	case 0x01: +		return 1; +	case 0x02: +		return allowbbt ? 0 : 1; +	} +	return 1; +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_bch.c b/roms/u-boot/drivers/mtd/nand/nand_bch.c new file mode 100644 index 00000000..35d2140d --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_bch.c @@ -0,0 +1,224 @@ +/* + * This file provides ECC correction for more than 1 bit per block of data, + * using binary BCH codes. It relies on the generic BCH library lib/bch.c. + * + * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> + * +  * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +/*#include <asm/io.h>*/ +#include <linux/types.h> + +#include <linux/bitops.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/nand_bch.h> +#include <linux/bch.h> +#include <malloc.h> + +/** + * struct nand_bch_control - private NAND BCH control structure + * @bch:       BCH control structure + * @ecclayout: private ecc layout for this BCH configuration + * @errloc:    error location array + * @eccmask:   XOR ecc mask, allows erased pages to be decoded as valid + */ +struct nand_bch_control { +	struct bch_control   *bch; +	struct nand_ecclayout ecclayout; +	unsigned int         *errloc; +	unsigned char        *eccmask; +}; + +/** + * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block + * @mtd:	MTD block structure + * @buf:	input buffer with raw data + * @code:	output buffer with ECC + */ +int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, +			   unsigned char *code) +{ +	const struct nand_chip *chip = mtd->priv; +	struct nand_bch_control *nbc = chip->ecc.priv; +	unsigned int i; + +	memset(code, 0, chip->ecc.bytes); +	encode_bch(nbc->bch, buf, chip->ecc.size, code); + +	/* apply mask so that an erased page is a valid codeword */ +	for (i = 0; i < chip->ecc.bytes; i++) +		code[i] ^= nbc->eccmask[i]; + +	return 0; +} + +/** + * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s) + * @mtd:	MTD block structure + * @buf:	raw data read from the chip + * @read_ecc:	ECC from the chip + * @calc_ecc:	the ECC calculated from raw data + * + * Detect and correct bit errors for a data byte block + */ +int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, +			  unsigned char *read_ecc, unsigned char *calc_ecc) +{ +	const struct nand_chip *chip = mtd->priv; +	struct nand_bch_control *nbc = chip->ecc.priv; +	unsigned int *errloc = nbc->errloc; +	int i, count; + +	count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, +			   NULL, errloc); +	if (count > 0) { +		for (i = 0; i < count; i++) { +			if (errloc[i] < (chip->ecc.size*8)) +				/* error is located in data, correct it */ +				buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); +			/* else error in ecc, no action needed */ + +			MTDDEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", +			      __func__, errloc[i]); +		} +	} else if (count < 0) { +		printk(KERN_ERR "ecc unrecoverable error\n"); +		count = -1; +	} +	return count; +} + +/** + * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction + * @mtd:	MTD block structure + * @eccsize:	ecc block size in bytes + * @eccbytes:	ecc length in bytes + * @ecclayout:	output default layout + * + * Returns: + *  a pointer to a new NAND BCH control structure, or NULL upon failure + * + * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes + * are used to compute BCH parameters m (Galois field order) and t (error + * correction capability). @eccbytes should be equal to the number of bytes + * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8. + * + * Example: to configure 4 bit correction per 512 bytes, you should pass + * @eccsize = 512  (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) + * @eccbytes = 7   (7 bytes are required to store m*t = 13*4 = 52 bits) + */ +struct nand_bch_control * +nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, +	      struct nand_ecclayout **ecclayout) +{ +	unsigned int m, t, eccsteps, i; +	struct nand_ecclayout *layout; +	struct nand_bch_control *nbc = NULL; +	unsigned char *erased_page; + +	if (!eccsize || !eccbytes) { +		printk(KERN_WARNING "ecc parameters not supplied\n"); +		goto fail; +	} + +	m = fls(1+8*eccsize); +	t = (eccbytes*8)/m; + +	nbc = kzalloc(sizeof(*nbc), GFP_KERNEL); +	if (!nbc) +		goto fail; + +	nbc->bch = init_bch(m, t, 0); +	if (!nbc->bch) +		goto fail; + +	/* verify that eccbytes has the expected value */ +	if (nbc->bch->ecc_bytes != eccbytes) { +		printk(KERN_WARNING "invalid eccbytes %u, should be %u\n", +		       eccbytes, nbc->bch->ecc_bytes); +		goto fail; +	} + +	eccsteps = mtd->writesize/eccsize; + +	/* if no ecc placement scheme was provided, build one */ +	if (!*ecclayout) { + +		/* handle large page devices only */ +		if (mtd->oobsize < 64) { +			printk(KERN_WARNING "must provide an oob scheme for " +			       "oobsize %d\n", mtd->oobsize); +			goto fail; +		} + +		layout = &nbc->ecclayout; +		layout->eccbytes = eccsteps*eccbytes; + +		/* reserve 2 bytes for bad block marker */ +		if (layout->eccbytes+2 > mtd->oobsize) { +			printk(KERN_WARNING "no suitable oob scheme available " +			       "for oobsize %d eccbytes %u\n", mtd->oobsize, +			       eccbytes); +			goto fail; +		} +		/* put ecc bytes at oob tail */ +		for (i = 0; i < layout->eccbytes; i++) +			layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; + +		layout->oobfree[0].offset = 2; +		layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; + +		*ecclayout = layout; +	} + +	/* sanity checks */ +	if (8*(eccsize+eccbytes) >= (1 << m)) { +		printk(KERN_WARNING "eccsize %u is too large\n", eccsize); +		goto fail; +	} +	if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) { +		printk(KERN_WARNING "invalid ecc layout\n"); +		goto fail; +	} + +	nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL); +	nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL); +	if (!nbc->eccmask || !nbc->errloc) +		goto fail; +	/* +	 * compute and store the inverted ecc of an erased ecc block +	 */ +	erased_page = kmalloc(eccsize, GFP_KERNEL); +	if (!erased_page) +		goto fail; + +	memset(erased_page, 0xff, eccsize); +	memset(nbc->eccmask, 0, eccbytes); +	encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); +	kfree(erased_page); + +	for (i = 0; i < eccbytes; i++) +		nbc->eccmask[i] ^= 0xff; + +	return nbc; +fail: +	nand_bch_free(nbc); +	return NULL; +} + +/** + * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources + * @nbc:	NAND BCH control structure + */ +void nand_bch_free(struct nand_bch_control *nbc) +{ +	if (nbc) { +		free_bch(nbc->bch); +		kfree(nbc->errloc); +		kfree(nbc->eccmask); +		kfree(nbc); +	} +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_ecc.c b/roms/u-boot/drivers/mtd/nand/nand_ecc.c new file mode 100644 index 00000000..083e0e99 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_ecc.c @@ -0,0 +1,191 @@ +/* + * This file contains an ECC algorithm from Toshiba that detects and + * corrects 1 bit errors in a 256 byte block of data. + * + * drivers/mtd/nand/nand_ecc.c + * + * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com) + *                         Toshiba America Electronics Components, Inc. + * + * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de> + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * As a special exception, if other files instantiate templates or use + * macros or inline functions from these files, or you compile these + * files and link them with other works to produce a work based on these + * files, these files do not by themselves cause the resulting work to be + * covered by the GNU General Public License. However the source code for + * these files must still be made available in accordance with section (3) + * of the GNU General Public License. + * + * This exception does not invalidate any other reasons why a work based on + * this file might be covered by the GNU General Public License. + */ + +#include <common.h> + +#include <asm/errno.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand_ecc.h> + +/* The PPC4xx NDFC uses Smart Media (SMC) bytes order */ +#ifdef CONFIG_NAND_NDFC +#define CONFIG_MTD_NAND_ECC_SMC +#endif + +/* + * NAND-SPL has no sofware ECC for now, so don't include nand_calculate_ecc(), + * only nand_correct_data() is needed + */ + +#if !defined(CONFIG_NAND_SPL) || defined(CONFIG_SPL_NAND_SOFTECC) +/* + * Pre-calculated 256-way 1 byte column parity + */ +static const u_char nand_ecc_precalc_table[] = { +	0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00, +	0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, +	0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, +	0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, +	0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, +	0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, +	0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, +	0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, +	0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a, +	0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f, +	0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c, +	0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69, +	0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03, +	0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66, +	0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65, +	0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00 +}; + +/** + * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block + * @mtd:	MTD block structure + * @dat:	raw data + * @ecc_code:	buffer for ECC + */ +int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, +		       u_char *ecc_code) +{ +	uint8_t idx, reg1, reg2, reg3, tmp1, tmp2; +	int i; + +	/* Initialize variables */ +	reg1 = reg2 = reg3 = 0; + +	/* Build up column parity */ +	for(i = 0; i < 256; i++) { +		/* Get CP0 - CP5 from table */ +		idx = nand_ecc_precalc_table[*dat++]; +		reg1 ^= (idx & 0x3f); + +		/* All bit XOR = 1 ? */ +		if (idx & 0x40) { +			reg3 ^= (uint8_t) i; +			reg2 ^= ~((uint8_t) i); +		} +	} + +	/* Create non-inverted ECC code from line parity */ +	tmp1  = (reg3 & 0x80) >> 0; /* B7 -> B7 */ +	tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */ +	tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */ +	tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */ +	tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */ +	tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */ +	tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */ +	tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */ + +	tmp2  = (reg3 & 0x08) << 4; /* B3 -> B7 */ +	tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */ +	tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */ +	tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */ +	tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */ +	tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */ +	tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */ +	tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */ + +	/* Calculate final ECC code */ +#ifdef CONFIG_MTD_NAND_ECC_SMC +	ecc_code[0] = ~tmp2; +	ecc_code[1] = ~tmp1; +#else +	ecc_code[0] = ~tmp1; +	ecc_code[1] = ~tmp2; +#endif +	ecc_code[2] = ((~reg1) << 2) | 0x03; + +	return 0; +} +#endif /* CONFIG_NAND_SPL */ + +static inline int countbits(uint32_t byte) +{ +	int res = 0; + +	for (;byte; byte >>= 1) +		res += byte & 0x01; +	return res; +} + +/** + * nand_correct_data - [NAND Interface] Detect and correct bit error(s) + * @mtd:	MTD block structure + * @dat:	raw data read from the chip + * @read_ecc:	ECC from the chip + * @calc_ecc:	the ECC calculated from raw data + * + * Detect and correct a 1 bit error for 256 byte block + */ +int nand_correct_data(struct mtd_info *mtd, u_char *dat, +		      u_char *read_ecc, u_char *calc_ecc) +{ +	uint8_t s0, s1, s2; + +#ifdef CONFIG_MTD_NAND_ECC_SMC +	s0 = calc_ecc[0] ^ read_ecc[0]; +	s1 = calc_ecc[1] ^ read_ecc[1]; +	s2 = calc_ecc[2] ^ read_ecc[2]; +#else +	s1 = calc_ecc[0] ^ read_ecc[0]; +	s0 = calc_ecc[1] ^ read_ecc[1]; +	s2 = calc_ecc[2] ^ read_ecc[2]; +#endif +	if ((s0 | s1 | s2) == 0) +		return 0; + +	/* Check for a single bit error */ +	if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 && +	    ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 && +	    ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) { + +		uint32_t byteoffs, bitnum; + +		byteoffs = (s1 << 0) & 0x80; +		byteoffs |= (s1 << 1) & 0x40; +		byteoffs |= (s1 << 2) & 0x20; +		byteoffs |= (s1 << 3) & 0x10; + +		byteoffs |= (s0 >> 4) & 0x08; +		byteoffs |= (s0 >> 3) & 0x04; +		byteoffs |= (s0 >> 2) & 0x02; +		byteoffs |= (s0 >> 1) & 0x01; + +		bitnum = (s2 >> 5) & 0x04; +		bitnum |= (s2 >> 4) & 0x02; +		bitnum |= (s2 >> 3) & 0x01; + +		dat[byteoffs] ^= (1 << bitnum); + +		return 1; +	} + +	if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1) +		return 1; + +	return -EBADMSG; +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_ids.c b/roms/u-boot/drivers/mtd/nand/nand_ids.c new file mode 100644 index 00000000..f3f0cb67 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_ids.c @@ -0,0 +1,182 @@ +/* + *  drivers/mtd/nandids.c + * + *  Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <common.h> +#include <linux/mtd/nand.h> +/* +*	Chip ID list +* +*	Name. ID code, pagesize, chipsize in MegaByte, eraseblock size, +*	options +* +*	Pagesize; 0, 256, 512 +*	0	get this information from the extended chip ID ++	256	256 Byte page size +*	512	512 Byte page size +*/ +const struct nand_flash_dev nand_flash_ids[] = { + +#ifdef CONFIG_MTD_NAND_MUSEUM_IDS +	{"NAND 1MiB 5V 8-bit",		0x6e, 256, 1, 0x1000, 0}, +	{"NAND 2MiB 5V 8-bit",		0x64, 256, 2, 0x1000, 0}, +	{"NAND 4MiB 5V 8-bit",		0x6b, 512, 4, 0x2000, 0}, +	{"NAND 1MiB 3,3V 8-bit",	0xe8, 256, 1, 0x1000, 0}, +	{"NAND 1MiB 3,3V 8-bit",	0xec, 256, 1, 0x1000, 0}, +	{"NAND 2MiB 3,3V 8-bit",	0xea, 256, 2, 0x1000, 0}, +	{"NAND 4MiB 3,3V 8-bit", 	0xd5, 512, 4, 0x2000, 0}, +	{"NAND 4MiB 3,3V 8-bit",	0xe3, 512, 4, 0x2000, 0}, +	{"NAND 4MiB 3,3V 8-bit",	0xe5, 512, 4, 0x2000, 0}, +	{"NAND 8MiB 3,3V 8-bit",	0xd6, 512, 8, 0x2000, 0}, + +	{"NAND 8MiB 1,8V 8-bit",	0x39, 512, 8, 0x2000, 0}, +	{"NAND 8MiB 3,3V 8-bit",	0xe6, 512, 8, 0x2000, 0}, +	{"NAND 8MiB 1,8V 16-bit",	0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, +	{"NAND 8MiB 3,3V 16-bit",	0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, +#endif + +	{"NAND 16MiB 1,8V 8-bit",	0x33, 512, 16, 0x4000, 0}, +	{"NAND 16MiB 3,3V 8-bit",	0x73, 512, 16, 0x4000, 0}, +	{"NAND 16MiB 1,8V 16-bit",	0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, +	{"NAND 16MiB 3,3V 16-bit",	0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, + +	{"NAND 32MiB 1,8V 8-bit",	0x35, 512, 32, 0x4000, 0}, +	{"NAND 32MiB 3,3V 8-bit",	0x75, 512, 32, 0x4000, 0}, +	{"NAND 32MiB 1,8V 16-bit",	0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, +	{"NAND 32MiB 3,3V 16-bit",	0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, + +	{"NAND 64MiB 1,8V 8-bit",	0x36, 512, 64, 0x4000, 0}, +	{"NAND 64MiB 3,3V 8-bit",	0x76, 512, 64, 0x4000, 0}, +	{"NAND 64MiB 1,8V 16-bit",	0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, +	{"NAND 64MiB 3,3V 16-bit",	0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, + +	{"NAND 128MiB 1,8V 8-bit",	0x78, 512, 128, 0x4000, 0}, +	{"NAND 128MiB 1,8V 8-bit",	0x39, 512, 128, 0x4000, 0}, +	{"NAND 128MiB 3,3V 8-bit",	0x79, 512, 128, 0x4000, 0}, +	{"NAND 128MiB 1,8V 16-bit",	0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, +	{"NAND 128MiB 1,8V 16-bit",	0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, +	{"NAND 128MiB 3,3V 16-bit",	0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, +	{"NAND 128MiB 3,3V 16-bit",	0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, + +	{"NAND 256MiB 3,3V 8-bit",	0x71, 512, 256, 0x4000, 0}, + +	/* +	 * These are the new chips with large page size. The pagesize and the +	 * erasesize is determined from the extended id bytes +	 */ +#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS +#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) + +	/* 512 Megabit */ +	{"NAND 64MiB 1,8V 8-bit",	0xA2, 0,  64, 0, LP_OPTIONS}, +	{"NAND 64MiB 1,8V 8-bit",	0xA0, 0,  64, 0, LP_OPTIONS}, +	{"NAND 64MiB 3,3V 8-bit",	0xF2, 0,  64, 0, LP_OPTIONS}, +	{"NAND 64MiB 3,3V 8-bit",	0xD0, 0,  64, 0, LP_OPTIONS}, +	{"NAND 64MiB 3,3V 8-bit",	0xF0, 0,  64, 0, LP_OPTIONS}, +	{"NAND 64MiB 1,8V 16-bit",	0xB2, 0,  64, 0, LP_OPTIONS16}, +	{"NAND 64MiB 1,8V 16-bit",	0xB0, 0,  64, 0, LP_OPTIONS16}, +	{"NAND 64MiB 3,3V 16-bit",	0xC2, 0,  64, 0, LP_OPTIONS16}, +	{"NAND 64MiB 3,3V 16-bit",	0xC0, 0,  64, 0, LP_OPTIONS16}, + +	/* 1 Gigabit */ +	{"NAND 128MiB 1,8V 8-bit",	0xA1, 0, 128, 0, LP_OPTIONS}, +	{"NAND 128MiB 3,3V 8-bit",	0xF1, 0, 128, 0, LP_OPTIONS}, +	{"NAND 128MiB 3,3V 8-bit",	0xD1, 0, 128, 0, LP_OPTIONS}, +	{"NAND 128MiB 1,8V 16-bit",	0xB1, 0, 128, 0, LP_OPTIONS16}, +	{"NAND 128MiB 3,3V 16-bit",	0xC1, 0, 128, 0, LP_OPTIONS16}, +	{"NAND 128MiB 1,8V 16-bit",     0xAD, 0, 128, 0, LP_OPTIONS16}, + +	/* 2 Gigabit */ +	{"NAND 256MiB 1,8V 8-bit",	0xAA, 0, 256, 0, LP_OPTIONS}, +	{"NAND 256MiB 3,3V 8-bit",	0xDA, 0, 256, 0, LP_OPTIONS}, +	{"NAND 256MiB 1,8V 16-bit",	0xBA, 0, 256, 0, LP_OPTIONS16}, +	{"NAND 256MiB 3,3V 16-bit",	0xCA, 0, 256, 0, LP_OPTIONS16}, + +	/* 4 Gigabit */ +	{"NAND 512MiB 1,8V 8-bit",	0xAC, 0, 512, 0, LP_OPTIONS}, +	{"NAND 512MiB 3,3V 8-bit",	0xDC, 0, 512, 0, LP_OPTIONS}, +	{"NAND 512MiB 1,8V 16-bit",	0xBC, 0, 512, 0, LP_OPTIONS16}, +	{"NAND 512MiB 3,3V 16-bit",	0xCC, 0, 512, 0, LP_OPTIONS16}, + +	/* 8 Gigabit */ +	{"NAND 1GiB 1,8V 8-bit",	0xA3, 0, 1024, 0, LP_OPTIONS}, +	{"NAND 1GiB 3,3V 8-bit",	0xD3, 0, 1024, 0, LP_OPTIONS}, +	{"NAND 1GiB 1,8V 16-bit",	0xB3, 0, 1024, 0, LP_OPTIONS16}, +	{"NAND 1GiB 3,3V 16-bit",	0xC3, 0, 1024, 0, LP_OPTIONS16}, + +	/* 16 Gigabit */ +	{"NAND 2GiB 1,8V 8-bit",	0xA5, 0, 2048, 0, LP_OPTIONS}, +	{"NAND 2GiB 3,3V 8-bit",	0xD5, 0, 2048, 0, LP_OPTIONS}, +	{"NAND 2GiB 1,8V 16-bit",	0xB5, 0, 2048, 0, LP_OPTIONS16}, +	{"NAND 2GiB 3,3V 16-bit",	0xC5, 0, 2048, 0, LP_OPTIONS16}, + +	/* 32 Gigabit */ +	{"NAND 4GiB 1,8V 8-bit",	0xA7, 0, 4096, 0, LP_OPTIONS}, +	{"NAND 4GiB 3,3V 8-bit",	0xD7, 0, 4096, 0, LP_OPTIONS}, +	{"NAND 4GiB 1,8V 16-bit",	0xB7, 0, 4096, 0, LP_OPTIONS16}, +	{"NAND 4GiB 3,3V 16-bit",	0xC7, 0, 4096, 0, LP_OPTIONS16}, + +	/* 64 Gigabit */ +	{"NAND 8GiB 1,8V 8-bit",	0xAE, 0, 8192, 0, LP_OPTIONS}, +	{"NAND 8GiB 3,3V 8-bit",	0xDE, 0, 8192, 0, LP_OPTIONS}, +	{"NAND 8GiB 1,8V 16-bit",	0xBE, 0, 8192, 0, LP_OPTIONS16}, +	{"NAND 8GiB 3,3V 16-bit",	0xCE, 0, 8192, 0, LP_OPTIONS16}, + +	/* 128 Gigabit */ +	{"NAND 16GiB 1,8V 8-bit",	0x1A, 0, 16384, 0, LP_OPTIONS}, +	{"NAND 16GiB 3,3V 8-bit",	0x3A, 0, 16384, 0, LP_OPTIONS}, +	{"NAND 16GiB 1,8V 16-bit",	0x2A, 0, 16384, 0, LP_OPTIONS16}, +	{"NAND 16GiB 3,3V 16-bit",	0x4A, 0, 16384, 0, LP_OPTIONS16}, + +	/* 256 Gigabit */ +	{"NAND 32GiB 1,8V 8-bit",	0x1C, 0, 32768, 0, LP_OPTIONS}, +	{"NAND 32GiB 3,3V 8-bit",	0x3C, 0, 32768, 0, LP_OPTIONS}, +	{"NAND 32GiB 1,8V 16-bit",	0x2C, 0, 32768, 0, LP_OPTIONS16}, +	{"NAND 32GiB 3,3V 16-bit",	0x4C, 0, 32768, 0, LP_OPTIONS16}, + +	/* 512 Gigabit */ +	{"NAND 64GiB 1,8V 8-bit",	0x1E, 0, 65536, 0, LP_OPTIONS}, +	{"NAND 64GiB 3,3V 8-bit",	0x3E, 0, 65536, 0, LP_OPTIONS}, +	{"NAND 64GiB 1,8V 16-bit",	0x2E, 0, 65536, 0, LP_OPTIONS16}, +	{"NAND 64GiB 3,3V 16-bit",	0x4E, 0, 65536, 0, LP_OPTIONS16}, + +	/* +	 * Renesas AND 1 Gigabit. Those chips do not support extended id and +	 * have a strange page/block layout !  The chosen minimum erasesize is +	 * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page +	 * planes 1 block = 2 pages, but due to plane arrangement the blocks +	 * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would +	 * increase the eraseblock size so we chose a combined one which can be +	 * erased in one go There are more speed improvements for reads and +	 * writes possible, but not implemented now +	 */ +	{"AND 128MiB 3,3V 8-bit",	0x01, 2048, 128, 0x4000, +	 NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, + +	{NULL,} +}; + +/* +*	Manufacturer ID list +*/ +const struct nand_manufacturers nand_manuf_ids[] = { +	{NAND_MFR_TOSHIBA, "Toshiba"}, +	{NAND_MFR_SAMSUNG, "Samsung"}, +	{NAND_MFR_FUJITSU, "Fujitsu"}, +	{NAND_MFR_NATIONAL, "National"}, +	{NAND_MFR_RENESAS, "Renesas"}, +	{NAND_MFR_STMICRO, "ST Micro"}, +	{NAND_MFR_HYNIX, "Hynix"}, +	{NAND_MFR_MICRON, "Micron"}, +	{NAND_MFR_AMD, "AMD/Spansion"}, +	{NAND_MFR_MACRONIX, "Macronix"}, +	{NAND_MFR_EON, "Eon"}, +	{0x0, "Unknown"} +}; diff --git a/roms/u-boot/drivers/mtd/nand/nand_plat.c b/roms/u-boot/drivers/mtd/nand/nand_plat.c new file mode 100644 index 00000000..37a0206a --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_plat.c @@ -0,0 +1,64 @@ +/* + * Genericish driver for memory mapped NAND devices + * + * Copyright (c) 2006-2009 Analog Devices Inc. + * Licensed under the GPL-2 or later. + */ + +/* Your board must implement the following macros: + *  NAND_PLAT_WRITE_CMD(chip, cmd) + *  NAND_PLAT_WRITE_ADR(chip, cmd) + *  NAND_PLAT_INIT() + * + * It may also implement the following: + *  NAND_PLAT_DEV_READY(chip) + */ + +#include <common.h> +#include <asm/io.h> +#ifdef NAND_PLAT_GPIO_DEV_READY +# include <asm/gpio.h> +# define NAND_PLAT_DEV_READY(chip) gpio_get_value(NAND_PLAT_GPIO_DEV_READY) +#endif + +#include <nand.h> + +static void plat_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	struct nand_chip *this = mtd->priv; + +	if (cmd == NAND_CMD_NONE) +		return; + +	if (ctrl & NAND_CLE) +		NAND_PLAT_WRITE_CMD(this, cmd); +	else +		NAND_PLAT_WRITE_ADR(this, cmd); +} + +#ifdef NAND_PLAT_DEV_READY +static int plat_dev_ready(struct mtd_info *mtd) +{ +	return NAND_PLAT_DEV_READY((struct nand_chip *)mtd->priv); +} +#else +# define plat_dev_ready NULL +#endif + +int board_nand_init(struct nand_chip *nand) +{ +#ifdef NAND_PLAT_GPIO_DEV_READY +	gpio_request(NAND_PLAT_GPIO_DEV_READY, "nand-plat"); +	gpio_direction_input(NAND_PLAT_GPIO_DEV_READY); +#endif + +#ifdef NAND_PLAT_INIT +	NAND_PLAT_INIT(); +#endif + +	nand->cmd_ctrl = plat_cmd_ctrl; +	nand->dev_ready = plat_dev_ready; +	nand->ecc.mode = NAND_ECC_SOFT; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_spl_load.c b/roms/u-boot/drivers/mtd/nand/nand_spl_load.c new file mode 100644 index 00000000..5a256446 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_spl_load.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2011 + * Heiko Schocher, DENX Software Engineering, hs@denx.de. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> + +/* + * The main entry for NAND booting. It's necessary that SDRAM is already + * configured and available since this code loads the main U-Boot image + * from NAND into SDRAM and starts it from there. + */ +void nand_boot(void) +{ +	__attribute__((noreturn)) void (*uboot)(void); + +	/* +	 * Load U-Boot image from NAND into RAM +	 */ +	nand_spl_load_image(CONFIG_SYS_NAND_U_BOOT_OFFS, +			CONFIG_SYS_NAND_U_BOOT_SIZE, +			(void *)CONFIG_SYS_NAND_U_BOOT_DST); + +#ifdef CONFIG_NAND_ENV_DST +	nand_spl_load_image(CONFIG_ENV_OFFSET, CONFIG_ENV_SIZE, +			(void *)CONFIG_NAND_ENV_DST); + +#ifdef CONFIG_ENV_OFFSET_REDUND +	nand_spl_load_image(CONFIG_ENV_OFFSET_REDUND, CONFIG_ENV_SIZE, +			(void *)CONFIG_NAND_ENV_DST + CONFIG_ENV_SIZE); +#endif +#endif + +	/* +	 * Jump to U-Boot image +	 */ +	uboot = (void *)CONFIG_SYS_NAND_U_BOOT_START; +	(*uboot)(); +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_spl_simple.c b/roms/u-boot/drivers/mtd/nand/nand_spl_simple.c new file mode 100644 index 00000000..cead4b50 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_spl_simple.c @@ -0,0 +1,270 @@ +/* + * (C) Copyright 2006-2008 + * Stefan Roese, DENX Software Engineering, sr@denx.de. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <asm/io.h> +#include <linux/mtd/nand_ecc.h> + +static int nand_ecc_pos[] = CONFIG_SYS_NAND_ECCPOS; +static nand_info_t mtd; +static struct nand_chip nand_chip; + +#define ECCSTEPS	(CONFIG_SYS_NAND_PAGE_SIZE / \ +					CONFIG_SYS_NAND_ECCSIZE) +#define ECCTOTAL	(ECCSTEPS * CONFIG_SYS_NAND_ECCBYTES) + + +#if (CONFIG_SYS_NAND_PAGE_SIZE <= 512) +/* + * NAND command for small page NAND devices (512) + */ +static int nand_command(int block, int page, uint32_t offs, +	u8 cmd) +{ +	struct nand_chip *this = mtd.priv; +	int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT; + +	while (!this->dev_ready(&mtd)) +		; + +	/* Begin command latch cycle */ +	this->cmd_ctrl(&mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE); +	/* Set ALE and clear CLE to start address cycle */ +	/* Column address */ +	this->cmd_ctrl(&mtd, offs, NAND_CTRL_ALE | NAND_CTRL_CHANGE); +	this->cmd_ctrl(&mtd, page_addr & 0xff, NAND_CTRL_ALE); /* A[16:9] */ +	this->cmd_ctrl(&mtd, (page_addr >> 8) & 0xff, +		       NAND_CTRL_ALE); /* A[24:17] */ +#ifdef CONFIG_SYS_NAND_4_ADDR_CYCLE +	/* One more address cycle for devices > 32MiB */ +	this->cmd_ctrl(&mtd, (page_addr >> 16) & 0x0f, +		       NAND_CTRL_ALE); /* A[28:25] */ +#endif +	/* Latch in address */ +	this->cmd_ctrl(&mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +	/* +	 * Wait a while for the data to be ready +	 */ +	while (!this->dev_ready(&mtd)) +		; + +	return 0; +} +#else +/* + * NAND command for large page NAND devices (2k) + */ +static int nand_command(int block, int page, uint32_t offs, +	u8 cmd) +{ +	struct nand_chip *this = mtd.priv; +	int page_addr = page + block * CONFIG_SYS_NAND_PAGE_COUNT; +	void (*hwctrl)(struct mtd_info *mtd, int cmd, +			unsigned int ctrl) = this->cmd_ctrl; + +	while (!this->dev_ready(&mtd)) +		; + +	/* Emulate NAND_CMD_READOOB */ +	if (cmd == NAND_CMD_READOOB) { +		offs += CONFIG_SYS_NAND_PAGE_SIZE; +		cmd = NAND_CMD_READ0; +	} + +	/* Shift the offset from byte addressing to word addressing. */ +	if (this->options & NAND_BUSWIDTH_16) +		offs >>= 1; + +	/* Begin command latch cycle */ +	hwctrl(&mtd, cmd, NAND_CTRL_CLE | NAND_CTRL_CHANGE); +	/* Set ALE and clear CLE to start address cycle */ +	/* Column address */ +	hwctrl(&mtd, offs & 0xff, +		       NAND_CTRL_ALE | NAND_CTRL_CHANGE); /* A[7:0] */ +	hwctrl(&mtd, (offs >> 8) & 0xff, NAND_CTRL_ALE); /* A[11:9] */ +	/* Row address */ +	hwctrl(&mtd, (page_addr & 0xff), NAND_CTRL_ALE); /* A[19:12] */ +	hwctrl(&mtd, ((page_addr >> 8) & 0xff), +		       NAND_CTRL_ALE); /* A[27:20] */ +#ifdef CONFIG_SYS_NAND_5_ADDR_CYCLE +	/* One more address cycle for devices > 128MiB */ +	hwctrl(&mtd, (page_addr >> 16) & 0x0f, +		       NAND_CTRL_ALE); /* A[31:28] */ +#endif +	/* Latch in address */ +	hwctrl(&mtd, NAND_CMD_READSTART, +		       NAND_CTRL_CLE | NAND_CTRL_CHANGE); +	hwctrl(&mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); + +	/* +	 * Wait a while for the data to be ready +	 */ +	while (!this->dev_ready(&mtd)) +		; + +	return 0; +} +#endif + +static int nand_is_bad_block(int block) +{ +	struct nand_chip *this = mtd.priv; + +	nand_command(block, 0, CONFIG_SYS_NAND_BAD_BLOCK_POS, +		NAND_CMD_READOOB); + +	/* +	 * Read one byte (or two if it's a 16 bit chip). +	 */ +	if (this->options & NAND_BUSWIDTH_16) { +		if (readw(this->IO_ADDR_R) != 0xffff) +			return 1; +	} else { +		if (readb(this->IO_ADDR_R) != 0xff) +			return 1; +	} + +	return 0; +} + +#if defined(CONFIG_SYS_NAND_HW_ECC_OOBFIRST) +static int nand_read_page(int block, int page, uchar *dst) +{ +	struct nand_chip *this = mtd.priv; +	u_char ecc_calc[ECCTOTAL]; +	u_char ecc_code[ECCTOTAL]; +	u_char oob_data[CONFIG_SYS_NAND_OOBSIZE]; +	int i; +	int eccsize = CONFIG_SYS_NAND_ECCSIZE; +	int eccbytes = CONFIG_SYS_NAND_ECCBYTES; +	int eccsteps = ECCSTEPS; +	uint8_t *p = dst; + +	nand_command(block, page, 0, NAND_CMD_READOOB); +	this->read_buf(&mtd, oob_data, CONFIG_SYS_NAND_OOBSIZE); +	nand_command(block, page, 0, NAND_CMD_READ0); + +	/* Pick the ECC bytes out of the oob data */ +	for (i = 0; i < ECCTOTAL; i++) +		ecc_code[i] = oob_data[nand_ecc_pos[i]]; + + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		this->ecc.hwctl(&mtd, NAND_ECC_READ); +		this->read_buf(&mtd, p, eccsize); +		this->ecc.calculate(&mtd, p, &ecc_calc[i]); +		this->ecc.correct(&mtd, p, &ecc_code[i], &ecc_calc[i]); +	} + +	return 0; +} +#else +static int nand_read_page(int block, int page, void *dst) +{ +	struct nand_chip *this = mtd.priv; +	u_char ecc_calc[ECCTOTAL]; +	u_char ecc_code[ECCTOTAL]; +	u_char oob_data[CONFIG_SYS_NAND_OOBSIZE]; +	int i; +	int eccsize = CONFIG_SYS_NAND_ECCSIZE; +	int eccbytes = CONFIG_SYS_NAND_ECCBYTES; +	int eccsteps = ECCSTEPS; +	uint8_t *p = dst; + +	nand_command(block, page, 0, NAND_CMD_READ0); + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		if (this->ecc.mode != NAND_ECC_SOFT) +			this->ecc.hwctl(&mtd, NAND_ECC_READ); +		this->read_buf(&mtd, p, eccsize); +		this->ecc.calculate(&mtd, p, &ecc_calc[i]); +	} +	this->read_buf(&mtd, oob_data, CONFIG_SYS_NAND_OOBSIZE); + +	/* Pick the ECC bytes out of the oob data */ +	for (i = 0; i < ECCTOTAL; i++) +		ecc_code[i] = oob_data[nand_ecc_pos[i]]; + +	eccsteps = ECCSTEPS; +	p = dst; + +	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		/* No chance to do something with the possible error message +		 * from correct_data(). We just hope that all possible errors +		 * are corrected by this routine. +		 */ +		this->ecc.correct(&mtd, p, &ecc_code[i], &ecc_calc[i]); +	} + +	return 0; +} +#endif + +int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst) +{ +	unsigned int block, lastblock; +	unsigned int page; + +	/* +	 * offs has to be aligned to a page address! +	 */ +	block = offs / CONFIG_SYS_NAND_BLOCK_SIZE; +	lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE; +	page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE; + +	while (block <= lastblock) { +		if (!nand_is_bad_block(block)) { +			/* +			 * Skip bad blocks +			 */ +			while (page < CONFIG_SYS_NAND_PAGE_COUNT) { +				nand_read_page(block, page, dst); +				dst += CONFIG_SYS_NAND_PAGE_SIZE; +				page++; +			} + +			page = 0; +		} else { +			lastblock++; +		} + +		block++; +	} + +	return 0; +} + +/* nand_init() - initialize data to make nand usable by SPL */ +void nand_init(void) +{ +	/* +	 * Init board specific nand support +	 */ +	mtd.priv = &nand_chip; +	nand_chip.IO_ADDR_R = nand_chip.IO_ADDR_W = +		(void  __iomem *)CONFIG_SYS_NAND_BASE; +	board_nand_init(&nand_chip); + +#ifdef CONFIG_SPL_NAND_SOFTECC +	if (nand_chip.ecc.mode == NAND_ECC_SOFT) { +		nand_chip.ecc.calculate = nand_calculate_ecc; +		nand_chip.ecc.correct = nand_correct_data; +	} +#endif + +	if (nand_chip.select_chip) +		nand_chip.select_chip(&mtd, 0); +} + +/* Unselect after operation */ +void nand_deselect(void) +{ +	if (nand_chip.select_chip) +		nand_chip.select_chip(&mtd, -1); +} diff --git a/roms/u-boot/drivers/mtd/nand/nand_util.c b/roms/u-boot/drivers/mtd/nand/nand_util.c new file mode 100644 index 00000000..b2928260 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nand_util.c @@ -0,0 +1,861 @@ +/* + * drivers/mtd/nand/nand_util.c + * + * Copyright (C) 2006 by Weiss-Electronic GmbH. + * All rights reserved. + * + * @author:	Guido Classen <clagix@gmail.com> + * @descr:	NAND Flash support + * @references: borrowed heavily from Linux mtd-utils code: + *		flash_eraseall.c by Arcom Control System Ltd + *		nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com) + *			       and Thomas Gleixner (tglx@linutronix.de) + * + * Copyright (C) 2008 Nokia Corporation: drop_ffs() function by + * Artem Bityutskiy <dedekind1@gmail.com> from mtd-utils + * + * Copyright 2010 Freescale Semiconductor + * + * SPDX-License-Identifier:	GPL-2.0 + */ + +#include <common.h> +#include <command.h> +#include <watchdog.h> +#include <malloc.h> +#include <div64.h> + +#include <asm/errno.h> +#include <linux/mtd/mtd.h> +#include <nand.h> +#include <jffs2/jffs2.h> + +typedef struct erase_info	erase_info_t; +typedef struct mtd_info		mtd_info_t; + +/* support only for native endian JFFS2 */ +#define cpu_to_je16(x) (x) +#define cpu_to_je32(x) (x) + +/** + * nand_erase_opts: - erase NAND flash with support for various options + *		      (jffs2 formatting) + * + * @param meminfo	NAND device to erase + * @param opts		options,  @see struct nand_erase_options + * @return		0 in case of success + * + * This code is ported from flash_eraseall.c from Linux mtd utils by + * Arcom Control System Ltd. + */ +int nand_erase_opts(nand_info_t *meminfo, const nand_erase_options_t *opts) +{ +	struct jffs2_unknown_node cleanmarker; +	erase_info_t erase; +	unsigned long erase_length, erased_length; /* in blocks */ +	int result; +	int percent_complete = -1; +	const char *mtd_device = meminfo->name; +	struct mtd_oob_ops oob_opts; +	struct nand_chip *chip = meminfo->priv; + +	if ((opts->offset & (meminfo->erasesize - 1)) != 0) { +		printf("Attempt to erase non block-aligned data\n"); +		return -1; +	} + +	memset(&erase, 0, sizeof(erase)); +	memset(&oob_opts, 0, sizeof(oob_opts)); + +	erase.mtd = meminfo; +	erase.len  = meminfo->erasesize; +	erase.addr = opts->offset; +	erase_length = lldiv(opts->length + meminfo->erasesize - 1, +			     meminfo->erasesize); + +	cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +	cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER); +	cleanmarker.totlen = cpu_to_je32(8); + +	/* scrub option allows to erase badblock. To prevent internal +	 * check from erase() method, set block check method to dummy +	 * and disable bad block table while erasing. +	 */ +	if (opts->scrub) { +		erase.scrub = opts->scrub; +		/* +		 * We don't need the bad block table anymore... +		 * after scrub, there are no bad blocks left! +		 */ +		if (chip->bbt) { +			kfree(chip->bbt); +		} +		chip->bbt = NULL; +	} + +	for (erased_length = 0; +	     erased_length < erase_length; +	     erase.addr += meminfo->erasesize) { + +		WATCHDOG_RESET(); + +		if (opts->lim && (erase.addr >= (opts->offset + opts->lim))) { +			puts("Size of erase exceeds limit\n"); +			return -EFBIG; +		} +		if (!opts->scrub) { +			int ret = mtd_block_isbad(meminfo, erase.addr); +			if (ret > 0) { +				if (!opts->quiet) +					printf("\rSkipping bad block at  " +					       "0x%08llx                 " +					       "                         \n", +					       erase.addr); + +				if (!opts->spread) +					erased_length++; + +				continue; + +			} else if (ret < 0) { +				printf("\n%s: MTD get bad block failed: %d\n", +				       mtd_device, +				       ret); +				return -1; +			} +		} + +		erased_length++; + +		result = mtd_erase(meminfo, &erase); +		if (result != 0) { +			printf("\n%s: MTD Erase failure: %d\n", +			       mtd_device, result); +			continue; +		} + +		/* format for JFFS2 ? */ +		if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) { +			struct mtd_oob_ops ops; +			ops.ooblen = 8; +			ops.datbuf = NULL; +			ops.oobbuf = (uint8_t *)&cleanmarker; +			ops.ooboffs = 0; +			ops.mode = MTD_OPS_AUTO_OOB; + +			result = mtd_write_oob(meminfo, +						    erase.addr, +						    &ops); +			if (result != 0) { +				printf("\n%s: MTD writeoob failure: %d\n", +				       mtd_device, result); +				continue; +			} +		} + +		if (!opts->quiet) { +			unsigned long long n = erased_length * 100ULL; +			int percent; + +			do_div(n, erase_length); +			percent = (int)n; + +			/* output progress message only at whole percent +			 * steps to reduce the number of messages printed +			 * on (slow) serial consoles +			 */ +			if (percent != percent_complete) { +				percent_complete = percent; + +				printf("\rErasing at 0x%llx -- %3d%% complete.", +				       erase.addr, percent); + +				if (opts->jffs2 && result == 0) +					printf(" Cleanmarker written at 0x%llx.", +					       erase.addr); +			} +		} +	} +	if (!opts->quiet) +		printf("\n"); + +	if (opts->scrub) +		chip->scan_bbt(meminfo); + +	return 0; +} + +#ifdef CONFIG_CMD_NAND_LOCK_UNLOCK + +/****************************************************************************** + * Support for locking / unlocking operations of some NAND devices + *****************************************************************************/ + +/** + * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT + *	      state + * + * @param mtd		nand mtd instance + * @param tight		bring device in lock tight mode + * + * @return		0 on success, -1 in case of error + * + * The lock / lock-tight command only applies to the whole chip. To get some + * parts of the chip lock and others unlocked use the following sequence: + * + * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin) + * - Call nand_unlock() once for each consecutive area to be unlocked + * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1) + * + *   If the device is in lock-tight state software can't change the + *   current active lock/unlock state of all pages. nand_lock() / nand_unlock() + *   calls will fail. It is only posible to leave lock-tight state by + *   an hardware signal (low pulse on _WP pin) or by power down. + */ +int nand_lock(struct mtd_info *mtd, int tight) +{ +	int ret = 0; +	int status; +	struct nand_chip *chip = mtd->priv; + +	/* select the NAND device */ +	chip->select_chip(mtd, 0); + +	/* check the Lock Tight Status */ +	chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, 0); +	if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) { +		printf("nand_lock: Device is locked tight!\n"); +		ret = -1; +		goto out; +	} + +	chip->cmdfunc(mtd, +		      (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK), +		      -1, -1); + +	/* call wait ready function */ +	status = chip->waitfunc(mtd, chip); + +	/* see if device thinks it succeeded */ +	if (status & 0x01) { +		ret = -1; +	} + + out: +	/* de-select the NAND device */ +	chip->select_chip(mtd, -1); +	return ret; +} + +/** + * nand_get_lock_status: - query current lock state from one page of NAND + *			   flash + * + * @param mtd		nand mtd instance + * @param offset	page address to query (must be page-aligned!) + * + * @return		-1 in case of error + *			>0 lock status: + *			  bitfield with the following combinations: + *			  NAND_LOCK_STATUS_TIGHT: page in tight state + *			  NAND_LOCK_STATUS_UNLOCK: page unlocked + * + */ +int nand_get_lock_status(struct mtd_info *mtd, loff_t offset) +{ +	int ret = 0; +	int chipnr; +	int page; +	struct nand_chip *chip = mtd->priv; + +	/* select the NAND device */ +	chipnr = (int)(offset >> chip->chip_shift); +	chip->select_chip(mtd, chipnr); + + +	if ((offset & (mtd->writesize - 1)) != 0) { +		printf("nand_get_lock_status: " +			"Start address must be beginning of " +			"nand page!\n"); +		ret = -1; +		goto out; +	} + +	/* check the Lock Status */ +	page = (int)(offset >> chip->page_shift); +	chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask); + +	ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT +					  | NAND_LOCK_STATUS_UNLOCK); + + out: +	/* de-select the NAND device */ +	chip->select_chip(mtd, -1); +	return ret; +} + +/** + * nand_unlock: - Unlock area of NAND pages + *		  only one consecutive area can be unlocked at one time! + * + * @param mtd		nand mtd instance + * @param start		start byte address + * @param length	number of bytes to unlock (must be a multiple of + *			page size nand->writesize) + * @param allexcept	if set, unlock everything not selected + * + * @return		0 on success, -1 in case of error + */ +int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length, +	int allexcept) +{ +	int ret = 0; +	int chipnr; +	int status; +	int page; +	struct nand_chip *chip = mtd->priv; + +	debug("nand_unlock%s: start: %08llx, length: %zd!\n", +		allexcept ? " (allexcept)" : "", start, length); + +	/* select the NAND device */ +	chipnr = (int)(start >> chip->chip_shift); +	chip->select_chip(mtd, chipnr); + +	/* check the WP bit */ +	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); +	if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) { +		printf("nand_unlock: Device is write protected!\n"); +		ret = -1; +		goto out; +	} + +	/* check the Lock Tight Status */ +	page = (int)(start >> chip->page_shift); +	chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask); +	if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) { +		printf("nand_unlock: Device is locked tight!\n"); +		ret = -1; +		goto out; +	} + +	if ((start & (mtd->erasesize - 1)) != 0) { +		printf("nand_unlock: Start address must be beginning of " +			"nand block!\n"); +		ret = -1; +		goto out; +	} + +	if (length == 0 || (length & (mtd->erasesize - 1)) != 0) { +		printf("nand_unlock: Length must be a multiple of nand block " +			"size %08x!\n", mtd->erasesize); +		ret = -1; +		goto out; +	} + +	/* +	 * Set length so that the last address is set to the +	 * starting address of the last block +	 */ +	length -= mtd->erasesize; + +	/* submit address of first page to unlock */ +	chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask); + +	/* submit ADDRESS of LAST page to unlock */ +	page += (int)(length >> chip->page_shift); + +	/* +	 * Page addresses for unlocking are supposed to be block-aligned. +	 * At least some NAND chips use the low bit to indicate that the +	 * page range should be inverted. +	 */ +	if (allexcept) +		page |= 1; + +	chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask); + +	/* call wait ready function */ +	status = chip->waitfunc(mtd, chip); +	/* see if device thinks it succeeded */ +	if (status & 0x01) { +		/* there was an error */ +		ret = -1; +		goto out; +	} + + out: +	/* de-select the NAND device */ +	chip->select_chip(mtd, -1); +	return ret; +} +#endif + +/** + * check_skip_len + * + * Check if there are any bad blocks, and whether length including bad + * blocks fits into device + * + * @param nand NAND device + * @param offset offset in flash + * @param length image length + * @param used length of flash needed for the requested length + * @return 0 if the image fits and there are no bad blocks + *         1 if the image fits, but there are bad blocks + *        -1 if the image does not fit + */ +static int check_skip_len(nand_info_t *nand, loff_t offset, size_t length, +		size_t *used) +{ +	size_t len_excl_bad = 0; +	int ret = 0; + +	while (len_excl_bad < length) { +		size_t block_len, block_off; +		loff_t block_start; + +		if (offset >= nand->size) +			return -1; + +		block_start = offset & ~(loff_t)(nand->erasesize - 1); +		block_off = offset & (nand->erasesize - 1); +		block_len = nand->erasesize - block_off; + +		if (!nand_block_isbad(nand, block_start)) +			len_excl_bad += block_len; +		else +			ret = 1; + +		offset += block_len; +		*used += block_len; +	} + +	/* If the length is not a multiple of block_len, adjust. */ +	if (len_excl_bad > length) +		*used -= (len_excl_bad - length); + +	return ret; +} + +#ifdef CONFIG_CMD_NAND_TRIMFFS +static size_t drop_ffs(const nand_info_t *nand, const u_char *buf, +			const size_t *len) +{ +	size_t l = *len; +	ssize_t i; + +	for (i = l - 1; i >= 0; i--) +		if (buf[i] != 0xFF) +			break; + +	/* The resulting length must be aligned to the minimum flash I/O size */ +	l = i + 1; +	l = (l + nand->writesize - 1) / nand->writesize; +	l *=  nand->writesize; + +	/* +	 * since the input length may be unaligned, prevent access past the end +	 * of the buffer +	 */ +	return min(l, *len); +} +#endif + +/** + * nand_write_skip_bad: + * + * Write image to NAND flash. + * Blocks that are marked bad are skipped and the is written to the next + * block instead as long as the image is short enough to fit even after + * skipping the bad blocks.  Due to bad blocks we may not be able to + * perform the requested write.  In the case where the write would + * extend beyond the end of the NAND device, both length and actual (if + * not NULL) are set to 0.  In the case where the write would extend + * beyond the limit we are passed, length is set to 0 and actual is set + * to the required length. + * + * @param nand  	NAND device + * @param offset	offset in flash + * @param length	buffer length + * @param actual	set to size required to write length worth of + *			buffer or 0 on error, if not NULL + * @param lim		maximum size that actual may be in order to not + *			exceed the buffer + * @param buffer        buffer to read from + * @param flags		flags modifying the behaviour of the write to NAND + * @return		0 in case of success + */ +int nand_write_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, +		size_t *actual, loff_t lim, u_char *buffer, int flags) +{ +	int rval = 0, blocksize; +	size_t left_to_write = *length; +	size_t used_for_write = 0; +	u_char *p_buffer = buffer; +	int need_skip; + +	if (actual) +		*actual = 0; + +#ifdef CONFIG_CMD_NAND_YAFFS +	if (flags & WITH_YAFFS_OOB) { +		if (flags & ~WITH_YAFFS_OOB) +			return -EINVAL; + +		int pages; +		pages = nand->erasesize / nand->writesize; +		blocksize = (pages * nand->oobsize) + nand->erasesize; +		if (*length % (nand->writesize + nand->oobsize)) { +			printf("Attempt to write incomplete page" +				" in yaffs mode\n"); +			return -EINVAL; +		} +	} else +#endif +	{ +		blocksize = nand->erasesize; +	} + +	/* +	 * nand_write() handles unaligned, partial page writes. +	 * +	 * We allow length to be unaligned, for convenience in +	 * using the $filesize variable. +	 * +	 * However, starting at an unaligned offset makes the +	 * semantics of bad block skipping ambiguous (really, +	 * you should only start a block skipping access at a +	 * partition boundary).  So don't try to handle that. +	 */ +	if ((offset & (nand->writesize - 1)) != 0) { +		printf("Attempt to write non page-aligned data\n"); +		*length = 0; +		return -EINVAL; +	} + +	need_skip = check_skip_len(nand, offset, *length, &used_for_write); + +	if (actual) +		*actual = used_for_write; + +	if (need_skip < 0) { +		printf("Attempt to write outside the flash area\n"); +		*length = 0; +		return -EINVAL; +	} + +	if (used_for_write > lim) { +		puts("Size of write exceeds partition or device limit\n"); +		*length = 0; +		return -EFBIG; +	} + +	if (!need_skip && !(flags & WITH_DROP_FFS)) { +		rval = nand_write(nand, offset, length, buffer); +		if (rval == 0) +			return 0; + +		*length = 0; +		printf("NAND write to offset %llx failed %d\n", +			offset, rval); +		return rval; +	} + +	while (left_to_write > 0) { +		size_t block_offset = offset & (nand->erasesize - 1); +		size_t write_size, truncated_write_size; + +		WATCHDOG_RESET(); + +		if (nand_block_isbad(nand, offset & ~(nand->erasesize - 1))) { +			printf("Skip bad block 0x%08llx\n", +				offset & ~(nand->erasesize - 1)); +			offset += nand->erasesize - block_offset; +			continue; +		} + +		if (left_to_write < (blocksize - block_offset)) +			write_size = left_to_write; +		else +			write_size = blocksize - block_offset; + +#ifdef CONFIG_CMD_NAND_YAFFS +		if (flags & WITH_YAFFS_OOB) { +			int page, pages; +			size_t pagesize = nand->writesize; +			size_t pagesize_oob = pagesize + nand->oobsize; +			struct mtd_oob_ops ops; + +			ops.len = pagesize; +			ops.ooblen = nand->oobsize; +			ops.mode = MTD_OPS_AUTO_OOB; +			ops.ooboffs = 0; + +			pages = write_size / pagesize_oob; +			for (page = 0; page < pages; page++) { +				WATCHDOG_RESET(); + +				ops.datbuf = p_buffer; +				ops.oobbuf = ops.datbuf + pagesize; + +				rval = mtd_write_oob(nand, offset, &ops); +				if (rval != 0) +					break; + +				offset += pagesize; +				p_buffer += pagesize_oob; +			} +		} +		else +#endif +		{ +			truncated_write_size = write_size; +#ifdef CONFIG_CMD_NAND_TRIMFFS +			if (flags & WITH_DROP_FFS) +				truncated_write_size = drop_ffs(nand, p_buffer, +						&write_size); +#endif + +			rval = nand_write(nand, offset, &truncated_write_size, +					p_buffer); +			offset += write_size; +			p_buffer += write_size; +		} + +		if (rval != 0) { +			printf("NAND write to offset %llx failed %d\n", +				offset, rval); +			*length -= left_to_write; +			return rval; +		} + +		left_to_write -= write_size; +	} + +	return 0; +} + +/** + * nand_read_skip_bad: + * + * Read image from NAND flash. + * Blocks that are marked bad are skipped and the next block is read + * instead as long as the image is short enough to fit even after + * skipping the bad blocks.  Due to bad blocks we may not be able to + * perform the requested read.  In the case where the read would extend + * beyond the end of the NAND device, both length and actual (if not + * NULL) are set to 0.  In the case where the read would extend beyond + * the limit we are passed, length is set to 0 and actual is set to the + * required length. + * + * @param nand NAND device + * @param offset offset in flash + * @param length buffer length, on return holds number of read bytes + * @param actual set to size required to read length worth of buffer or 0 + * on error, if not NULL + * @param lim maximum size that actual may be in order to not exceed the + * buffer + * @param buffer buffer to write to + * @return 0 in case of success + */ +int nand_read_skip_bad(nand_info_t *nand, loff_t offset, size_t *length, +		size_t *actual, loff_t lim, u_char *buffer) +{ +	int rval; +	size_t left_to_read = *length; +	size_t used_for_read = 0; +	u_char *p_buffer = buffer; +	int need_skip; + +	if ((offset & (nand->writesize - 1)) != 0) { +		printf("Attempt to read non page-aligned data\n"); +		*length = 0; +		if (actual) +			*actual = 0; +		return -EINVAL; +	} + +	need_skip = check_skip_len(nand, offset, *length, &used_for_read); + +	if (actual) +		*actual = used_for_read; + +	if (need_skip < 0) { +		printf("Attempt to read outside the flash area\n"); +		*length = 0; +		return -EINVAL; +	} + +	if (used_for_read > lim) { +		puts("Size of read exceeds partition or device limit\n"); +		*length = 0; +		return -EFBIG; +	} + +	if (!need_skip) { +		rval = nand_read(nand, offset, length, buffer); +		if (!rval || rval == -EUCLEAN) +			return 0; + +		*length = 0; +		printf("NAND read from offset %llx failed %d\n", +			offset, rval); +		return rval; +	} + +	while (left_to_read > 0) { +		size_t block_offset = offset & (nand->erasesize - 1); +		size_t read_length; + +		WATCHDOG_RESET(); + +		if (nand_block_isbad(nand, offset & ~(nand->erasesize - 1))) { +			printf("Skipping bad block 0x%08llx\n", +				offset & ~(nand->erasesize - 1)); +			offset += nand->erasesize - block_offset; +			continue; +		} + +		if (left_to_read < (nand->erasesize - block_offset)) +			read_length = left_to_read; +		else +			read_length = nand->erasesize - block_offset; + +		rval = nand_read(nand, offset, &read_length, p_buffer); +		if (rval && rval != -EUCLEAN) { +			printf("NAND read from offset %llx failed %d\n", +				offset, rval); +			*length -= left_to_read; +			return rval; +		} + +		left_to_read -= read_length; +		offset       += read_length; +		p_buffer     += read_length; +	} + +	return 0; +} + +#ifdef CONFIG_CMD_NAND_TORTURE + +/** + * check_pattern: + * + * Check if buffer contains only a certain byte pattern. + * + * @param buf buffer to check + * @param patt the pattern to check + * @param size buffer size in bytes + * @return 1 if there are only patt bytes in buf + *         0 if something else was found + */ +static int check_pattern(const u_char *buf, u_char patt, int size) +{ +	int i; + +	for (i = 0; i < size; i++) +		if (buf[i] != patt) +			return 0; +	return 1; +} + +/** + * nand_torture: + * + * Torture a block of NAND flash. + * This is useful to determine if a block that caused a write error is still + * good or should be marked as bad. + * + * @param nand NAND device + * @param offset offset in flash + * @return 0 if the block is still good + */ +int nand_torture(nand_info_t *nand, loff_t offset) +{ +	u_char patterns[] = {0xa5, 0x5a, 0x00}; +	struct erase_info instr = { +		.mtd = nand, +		.addr = offset, +		.len = nand->erasesize, +	}; +	size_t retlen; +	int err, ret = -1, i, patt_count; +	u_char *buf; + +	if ((offset & (nand->erasesize - 1)) != 0) { +		puts("Attempt to torture a block at a non block-aligned offset\n"); +		return -EINVAL; +	} + +	if (offset + nand->erasesize > nand->size) { +		puts("Attempt to torture a block outside the flash area\n"); +		return -EINVAL; +	} + +	patt_count = ARRAY_SIZE(patterns); + +	buf = malloc(nand->erasesize); +	if (buf == NULL) { +		puts("Out of memory for erase block buffer\n"); +		return -ENOMEM; +	} + +	for (i = 0; i < patt_count; i++) { +		err = nand->erase(nand, &instr); +		if (err) { +			printf("%s: erase() failed for block at 0x%llx: %d\n", +				nand->name, instr.addr, err); +			goto out; +		} + +		/* Make sure the block contains only 0xff bytes */ +		err = nand->read(nand, offset, nand->erasesize, &retlen, buf); +		if ((err && err != -EUCLEAN) || retlen != nand->erasesize) { +			printf("%s: read() failed for block at 0x%llx: %d\n", +				nand->name, instr.addr, err); +			goto out; +		} + +		err = check_pattern(buf, 0xff, nand->erasesize); +		if (!err) { +			printf("Erased block at 0x%llx, but a non-0xff byte was found\n", +				offset); +			ret = -EIO; +			goto out; +		} + +		/* Write a pattern and check it */ +		memset(buf, patterns[i], nand->erasesize); +		err = nand->write(nand, offset, nand->erasesize, &retlen, buf); +		if (err || retlen != nand->erasesize) { +			printf("%s: write() failed for block at 0x%llx: %d\n", +				nand->name, instr.addr, err); +			goto out; +		} + +		err = nand->read(nand, offset, nand->erasesize, &retlen, buf); +		if ((err && err != -EUCLEAN) || retlen != nand->erasesize) { +			printf("%s: read() failed for block at 0x%llx: %d\n", +				nand->name, instr.addr, err); +			goto out; +		} + +		err = check_pattern(buf, patterns[i], nand->erasesize); +		if (!err) { +			printf("Pattern 0x%.2x checking failed for block at " +					"0x%llx\n", patterns[i], offset); +			ret = -EIO; +			goto out; +		} +	} + +	ret = 0; + +out: +	free(buf); +	return ret; +} + +#endif diff --git a/roms/u-boot/drivers/mtd/nand/ndfc.c b/roms/u-boot/drivers/mtd/nand/ndfc.c new file mode 100644 index 00000000..5510b13c --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/ndfc.c @@ -0,0 +1,214 @@ +/* + * Overview: + *   Platform independend driver for NDFC (NanD Flash Controller) + *   integrated into IBM/AMCC PPC4xx cores + * + * (C) Copyright 2006-2009 + * Stefan Roese, DENX Software Engineering, sr@denx.de. + * + * Based on original work by + *	Thomas Gleixner + *	Copyright 2006 IBM + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <linux/mtd/ndfc.h> +#include <linux/mtd/nand_ecc.h> +#include <asm/processor.h> +#include <asm/io.h> +#include <asm/ppc4xx.h> + +#ifndef CONFIG_SYS_NAND_BCR +#define CONFIG_SYS_NAND_BCR 0x80002222 +#endif +#ifndef CONFIG_SYS_NDFC_EBC0_CFG +#define CONFIG_SYS_NDFC_EBC0_CFG 0xb8400000 +#endif + +/* + * We need to store the info, which chip-select (CS) is used for the + * chip number. For example on Sequoia NAND chip #0 uses + * CS #3. + */ +static int ndfc_cs[NDFC_MAX_BANKS]; + +static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	struct nand_chip *this = mtd->priv; +	ulong base = (ulong) this->IO_ADDR_W & 0xffffff00; + +	if (cmd == NAND_CMD_NONE) +		return; + +	if (ctrl & NAND_CLE) +		out_8((u8 *)(base + NDFC_CMD), cmd & 0xFF); +	else +		out_8((u8 *)(base + NDFC_ALE), cmd & 0xFF); +} + +static int ndfc_dev_ready(struct mtd_info *mtdinfo) +{ +	struct nand_chip *this = mtdinfo->priv; +	ulong base = (ulong) this->IO_ADDR_W & 0xffffff00; + +	return (in_be32((u32 *)(base + NDFC_STAT)) & NDFC_STAT_IS_READY); +} + +static void ndfc_enable_hwecc(struct mtd_info *mtdinfo, int mode) +{ +	struct nand_chip *this = mtdinfo->priv; +	ulong base = (ulong) this->IO_ADDR_W & 0xffffff00; +	u32 ccr; + +	ccr = in_be32((u32 *)(base + NDFC_CCR)); +	ccr |= NDFC_CCR_RESET_ECC; +	out_be32((u32 *)(base + NDFC_CCR), ccr); +} + +static int ndfc_calculate_ecc(struct mtd_info *mtdinfo, +			      const u_char *dat, u_char *ecc_code) +{ +	struct nand_chip *this = mtdinfo->priv; +	ulong base = (ulong) this->IO_ADDR_W & 0xffffff00; +	u32 ecc; +	u8 *p = (u8 *)&ecc; + +	ecc = in_be32((u32 *)(base + NDFC_ECC)); + +	/* The NDFC uses Smart Media (SMC) bytes order +	 */ +	ecc_code[0] = p[1]; +	ecc_code[1] = p[2]; +	ecc_code[2] = p[3]; + +	return 0; +} + +/* + * Speedups for buffer read/write/verify + * + * NDFC allows 32bit read/write of data. So we can speed up the buffer + * functions. No further checking, as nand_base will always read/write + * page aligned. + */ +static void ndfc_read_buf(struct mtd_info *mtdinfo, uint8_t *buf, int len) +{ +	struct nand_chip *this = mtdinfo->priv; +	ulong base = (ulong) this->IO_ADDR_W & 0xffffff00; +	uint32_t *p = (uint32_t *) buf; + +	for (;len > 0; len -= 4) +		*p++ = in_be32((u32 *)(base + NDFC_DATA)); +} + +/* + * Don't use these speedup functions in NAND boot image, since the image + * has to fit into 4kByte. + */ +static void ndfc_write_buf(struct mtd_info *mtdinfo, const uint8_t *buf, int len) +{ +	struct nand_chip *this = mtdinfo->priv; +	ulong base = (ulong) this->IO_ADDR_W & 0xffffff00; +	uint32_t *p = (uint32_t *) buf; + +	for (; len > 0; len -= 4) +		out_be32((u32 *)(base + NDFC_DATA), *p++); +} + +static int ndfc_verify_buf(struct mtd_info *mtdinfo, const uint8_t *buf, int len) +{ +	struct nand_chip *this = mtdinfo->priv; +	ulong base = (ulong) this->IO_ADDR_W & 0xffffff00; +	uint32_t *p = (uint32_t *) buf; + +	for (; len > 0; len -= 4) +		if (*p++ != in_be32((u32 *)(base + NDFC_DATA))) +			return -1; + +	return 0; +} + +/* + * Read a byte from the NDFC. + */ +static uint8_t ndfc_read_byte(struct mtd_info *mtd) +{ + +	struct nand_chip *chip = mtd->priv; + +#ifdef CONFIG_SYS_NAND_BUSWIDTH_16BIT +	return (uint8_t) readw(chip->IO_ADDR_R); +#else +	return readb(chip->IO_ADDR_R); +#endif + +} + +void board_nand_select_device(struct nand_chip *nand, int chip) +{ +	/* +	 * Don't use "chip" to address the NAND device, +	 * generate the cs from the address where it is encoded. +	 */ +	ulong base = (ulong)nand->IO_ADDR_W & 0xffffff00; +	int cs = ndfc_cs[chip]; + +	/* Set NandFlash Core Configuration Register */ +	/* 1 col x 2 rows */ +	out_be32((u32 *)(base + NDFC_CCR), 0x00000000 | (cs << 24)); +	out_be32((u32 *)(base + NDFC_BCFG0 + (cs << 2)), CONFIG_SYS_NAND_BCR); +} + +static void ndfc_select_chip(struct mtd_info *mtd, int chip) +{ +	/* +	 * Nothing to do here! +	 */ +} + +int board_nand_init(struct nand_chip *nand) +{ +	int cs = (ulong)nand->IO_ADDR_W & 0x00000003; +	ulong base = (ulong)nand->IO_ADDR_W & 0xffffff00; +	static int chip = 0; + +	/* +	 * Save chip-select for this chip # +	 */ +	ndfc_cs[chip] = cs; + +	/* +	 * Select required NAND chip in NDFC +	 */ +	board_nand_select_device(nand, chip); + +	nand->IO_ADDR_R = (void __iomem *)(base + NDFC_DATA); +	nand->IO_ADDR_W = (void __iomem *)(base + NDFC_DATA); +	nand->cmd_ctrl = ndfc_hwcontrol; +	nand->chip_delay = 50; +	nand->read_buf = ndfc_read_buf; +	nand->dev_ready = ndfc_dev_ready; +	nand->ecc.correct = nand_correct_data; +	nand->ecc.hwctl = ndfc_enable_hwecc; +	nand->ecc.calculate = ndfc_calculate_ecc; +	nand->ecc.mode = NAND_ECC_HW; +	nand->ecc.size = 256; +	nand->ecc.bytes = 3; +	nand->ecc.strength = 1; +	nand->select_chip = ndfc_select_chip; + +#ifdef CONFIG_SYS_NAND_BUSWIDTH_16BIT +	nand->options |= NAND_BUSWIDTH_16; +#endif + +	nand->write_buf  = ndfc_write_buf; +	nand->verify_buf = ndfc_verify_buf; +	nand->read_byte = ndfc_read_byte; + +	chip++; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/nomadik.c b/roms/u-boot/drivers/mtd/nand/nomadik.c new file mode 100644 index 00000000..a7cee513 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/nomadik.c @@ -0,0 +1,206 @@ +/* + * (C) Copyright 2007 STMicroelectronics, <www.st.com> + * (C) Copyright 2009 Alessandro Rubini <rubini@unipv.it> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <nand.h> +#include <asm/io.h> + +static inline int parity(int b) /* b is really a byte; returns 0 or ~0 */ +{ +	__asm__ __volatile__( +		"eor   %0, %0, %0, lsr #4\n\t" +		"eor   %0, %0, %0, lsr #2\n\t" +		"eor   %0, %0, %0, lsr #1\n\t" +		"ands  %0, %0, #1\n\t" +		"subne %0, %0, #2\t" +		: "=r" (b) : "0" (b)); +	return b; +} + +/* + * This is the ECC routine used in hardware, according to the manual. + * HW claims to make the calculation but not the correction; so we must + * recalculate the bytes for a comparison. + */ +static int ecc512(const unsigned char *data, unsigned char *ecc) +{ +	int gpar = 0; +	int i, val, par; +	int pbits = 0;		/* P8, P16, ... P2048 */ +	int pprime = 0;		/* P8', P16', ... P2048' */ +	int lowbits;		/* P1, P2, P4 and primes */ + +	for (i = 0; i < 512; i++) { +		par = parity((val = data[i])); +		gpar ^= val; +		pbits ^= (i & par); +	} +	/* +	 * Ok, now gpar is global parity (xor of all bytes) +	 * pbits are all the parity bits (non-prime ones) +	 */ +	par = parity(gpar); +	pprime = pbits ^ par; +	/* Put low bits in the right position for ecc[2] (bits 7..2) */ +	lowbits = 0 +		| (parity(gpar & 0xf0) & 0x80)	/* P4  */ +		| (parity(gpar & 0x0f) & 0x40)	/* P4' */ +		| (parity(gpar & 0xcc) & 0x20)	/* P2  */ +		| (parity(gpar & 0x33) & 0x10)	/* P2' */ +		| (parity(gpar & 0xaa) & 0x08)	/* P1  */ +		| (parity(gpar & 0x55) & 0x04);	/* P1' */ + +	ecc[2] = ~(lowbits | ((pbits & 0x100) >> 7) | ((pprime & 0x100) >> 8)); +	/* now intermix bits for ecc[1] (P1024..P128') and ecc[0] (P64..P8') */ +	ecc[1] = ~(    (pbits & 0x80) >> 0  | ((pprime & 0x80) >> 1) +		    | ((pbits & 0x40) >> 1) | ((pprime & 0x40) >> 2) +		    | ((pbits & 0x20) >> 2) | ((pprime & 0x20) >> 3) +		    | ((pbits & 0x10) >> 3) | ((pprime & 0x10) >> 4)); + +	ecc[0] = ~(    (pbits & 0x8) << 4  | ((pprime & 0x8) << 3) +		    | ((pbits & 0x4) << 3) | ((pprime & 0x4) << 2) +		    | ((pbits & 0x2) << 2) | ((pprime & 0x2) << 1) +		    | ((pbits & 0x1) << 1) | ((pprime & 0x1) << 0)); +	return 0; +} + +/* This is the method in the chip->ecc field */ +static int nomadik_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, +				 uint8_t *ecc_code) +{ +	return ecc512(dat, ecc_code); +} + +static int nomadik_ecc_correct(struct mtd_info *mtd, uint8_t *dat, +				uint8_t *r_ecc, uint8_t *c_ecc) +{ +	struct nand_chip *chip = mtd->priv; +	uint32_t r, c, d, diff; /*read, calculated, xor of them */ + +	if (!memcmp(r_ecc, c_ecc, chip->ecc.bytes)) +		return 0; + +	/* Reorder the bytes into ascending-order 24 bits -- see manual */ +	r = r_ecc[2] << 22 | r_ecc[1] << 14 | r_ecc[0] << 6 | r_ecc[2] >> 2; +	c = c_ecc[2] << 22 | c_ecc[1] << 14 | c_ecc[0] << 6 | c_ecc[2] >> 2; +	diff = (r ^ c) & ((1<<24)-1); /* use 24 bits only */ + +	/* If 12 bits are different, one per pair, it's correctable */ +	if (((diff | (diff>>1)) & 0x555555) == 0x555555) { +		int bit = ((diff & 2) >> 1) +			| ((diff & 0x8) >> 2) | ((diff & 0x20) >> 3); +		int byte; + +		d = diff >> 6; /* remove bit-order info */ +		byte =  ((d & 2) >> 1) +			| ((d & 0x8) >> 2) | ((d & 0x20) >> 3) +			| ((d & 0x80) >> 4) | ((d & 0x200) >> 5) +			| ((d & 0x800) >> 6) | ((d & 0x2000) >> 7) +			| ((d & 0x8000) >> 8) | ((d & 0x20000) >> 9); +		/* correct the single bit */ +		dat[byte] ^= 1<<bit; +		return 0; +	} +	/* If 1 bit only differs, it's one bit error in ECC, ignore */ +	if ((diff ^ (1 << (ffs(diff) - 1))) == 0) +		return 0; +	/* Otherwise, uncorrectable */ +	return -1; +} + +static void nomadik_ecc_hwctl(struct mtd_info *mtd, int mode) +{ /* mandatory in the structure but not used here */ } + + +/* This is the layout used by older installations, we keep compatible */ +struct nand_ecclayout nomadik_ecc_layout = { +	.eccbytes = 3 * 4, +	.eccpos = { /* each subpage has 16 bytes: pos 2,3,4 hosts ECC */ +		0x02, 0x03, 0x04, +		0x12, 0x13, 0x14, +		0x22, 0x23, 0x24, +		0x32, 0x33, 0x34}, +	.oobfree = { {0x08, 0x08}, {0x18, 0x08}, {0x28, 0x08}, {0x38, 0x08} }, +}; + +#define MASK_ALE	(1 << 24)	/* our ALE is AD21 */ +#define MASK_CLE	(1 << 23)	/* our CLE is AD22 */ + +/* This is copied from the AT91SAM9 devices (Stelian Pop, Lead Tech Design) */ +static void nomadik_nand_hwcontrol(struct mtd_info *mtd, +				   int cmd, unsigned int ctrl) +{ +	struct nand_chip *this = mtd->priv; +	u32 pcr0 = readl(REG_FSMC_PCR0); + +	if (ctrl & NAND_CTRL_CHANGE) { +		ulong IO_ADDR_W = (ulong) this->IO_ADDR_W; +		IO_ADDR_W &= ~(MASK_ALE | MASK_CLE); + +		if (ctrl & NAND_CLE) +			IO_ADDR_W |= MASK_CLE; +		if (ctrl & NAND_ALE) +			IO_ADDR_W |= MASK_ALE; + +		if (ctrl & NAND_NCE) +			writel(pcr0 | 0x4, REG_FSMC_PCR0); +		else +			writel(pcr0 & ~0x4, REG_FSMC_PCR0); + +		this->IO_ADDR_W = (void *) IO_ADDR_W; +		this->IO_ADDR_R = (void *) IO_ADDR_W; +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, this->IO_ADDR_W); +} + +/* Returns 1 when ready; upper layers timeout at 20ms with timer routines */ +static int nomadik_nand_ready(struct mtd_info *mtd) +{ +	return 1; /* The ready bit is handled in hardware */ +} + +/* Copy a buffer 32bits at a time: faster than defualt method which is 8bit */ +static void nomadik_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ +	int i; +	struct nand_chip *chip = mtd->priv; +	u32 *p = (u32 *) buf; + +	len >>= 2; +	writel(0, REG_FSMC_ECCR0); +	for (i = 0; i < len; i++) +		p[i] = readl(chip->IO_ADDR_R); +} + +int board_nand_init(struct nand_chip *chip) +{ +	/* Set up the FSMC_PCR0 for nand access*/ +	writel(0x0000004a, REG_FSMC_PCR0); +	/* Set up FSMC_PMEM0, FSMC_PATT0 with timing data for access */ +	writel(0x00020401, REG_FSMC_PMEM0); +	writel(0x00020404, REG_FSMC_PATT0); + +	chip->options = NAND_COPYBACK |	NAND_CACHEPRG | NAND_NO_PADDING; +	chip->cmd_ctrl = nomadik_nand_hwcontrol; +	chip->dev_ready = nomadik_nand_ready; +	/* The chip allows 32bit reads, so avoid the default 8bit copy */ +	chip->read_buf = nomadik_nand_read_buf; + +	/* ECC: follow the hardware-defined rulse, but do it in sw */ +	chip->ecc.mode = NAND_ECC_HW; +	chip->ecc.bytes = 3; +	chip->ecc.size = 512; +	chip->ecc.strength = 1; +	chip->ecc.layout = &nomadik_ecc_layout; +	chip->ecc.calculate = nomadik_ecc_calculate; +	chip->ecc.hwctl = nomadik_ecc_hwctl; +	chip->ecc.correct = nomadik_ecc_correct; + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/omap_elm.c b/roms/u-boot/drivers/mtd/nand/omap_elm.c new file mode 100644 index 00000000..47b1f1bf --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/omap_elm.c @@ -0,0 +1,196 @@ +/* + * (C) Copyright 2010-2011 Texas Instruments, <www.ti.com> + * Mansoor Ahamed <mansoor.ahamed@ti.com> + * + * BCH Error Location Module (ELM) support. + * + * NOTE: + * 1. Supports only continuous mode. Dont see need for page mode in uboot + * 2. Supports only syndrome polynomial 0. i.e. poly local variable is + *    always set to ELM_DEFAULT_POLY. Dont see need for other polynomial + *    sets in uboot + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/errno.h> +#include <linux/mtd/omap_gpmc.h> +#include <linux/mtd/omap_elm.h> +#include <asm/arch/hardware.h> + +#define ELM_DEFAULT_POLY (0) + +struct elm *elm_cfg; + +/** + * elm_load_syndromes - Load BCH syndromes based on nibble selection + * @syndrome: BCH syndrome + * @nibbles: + * @poly: Syndrome Polynomial set to use + * + * Load BCH syndromes based on nibble selection + */ +static void elm_load_syndromes(u8 *syndrome, u32 nibbles, u8 poly) +{ +	u32 *ptr; +	u32 val; + +	/* reg 0 */ +	ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[0]; +	val = syndrome[0] | (syndrome[1] << 8) | (syndrome[2] << 16) | +				(syndrome[3] << 24); +	writel(val, ptr); +	/* reg 1 */ +	ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[1]; +	val = syndrome[4] | (syndrome[5] << 8) | (syndrome[6] << 16) | +				(syndrome[7] << 24); +	writel(val, ptr); + +	/* BCH 8-bit with 26 nibbles (4*8=32) */ +	if (nibbles > 13) { +		/* reg 2 */ +		ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[2]; +		val = syndrome[8] | (syndrome[9] << 8) | (syndrome[10] << 16) | +				(syndrome[11] << 24); +		writel(val, ptr); +		/* reg 3 */ +		ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[3]; +		val = syndrome[12] | (syndrome[13] << 8) | +			(syndrome[14] << 16) | (syndrome[15] << 24); +		writel(val, ptr); +	} + +	/* BCH 16-bit with 52 nibbles (7*8=56) */ +	if (nibbles > 26) { +		/* reg 4 */ +		ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[4]; +		val = syndrome[16] | (syndrome[17] << 8) | +			(syndrome[18] << 16) | (syndrome[19] << 24); +		writel(val, ptr); + +		/* reg 5 */ +		ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[5]; +		val = syndrome[20] | (syndrome[21] << 8) | +			(syndrome[22] << 16) | (syndrome[23] << 24); +		writel(val, ptr); + +		/* reg 6 */ +		ptr = &elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[6]; +		val = syndrome[24] | (syndrome[25] << 8) | +			(syndrome[26] << 16) | (syndrome[27] << 24); +		writel(val, ptr); +	} +} + +/** + * elm_check_errors - Check for BCH errors and return error locations + * @syndrome: BCH syndrome + * @nibbles: + * @error_count: Returns number of errrors in the syndrome + * @error_locations: Returns error locations (in decimal) in this array + * + * Check the provided syndrome for BCH errors and return error count + * and locations in the array passed. Returns -1 if error is not correctable, + * else returns 0 + */ +int elm_check_error(u8 *syndrome, u32 nibbles, u32 *error_count, +		u32 *error_locations) +{ +	u8 poly = ELM_DEFAULT_POLY; +	s8 i; +	u32 location_status; + +	elm_load_syndromes(syndrome, nibbles, poly); + +	/* start processing */ +	writel((readl(&elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[6]) +				| ELM_SYNDROME_FRAGMENT_6_SYNDROME_VALID), +		&elm_cfg->syndrome_fragments[poly].syndrome_fragment_x[6]); + +	/* wait for processing to complete */ +	while ((readl(&elm_cfg->irqstatus) & (0x1 << poly)) != 0x1) +		; +	/* clear status */ +	writel((readl(&elm_cfg->irqstatus) | (0x1 << poly)), +			&elm_cfg->irqstatus); + +	/* check if correctable */ +	location_status = readl(&elm_cfg->error_location[poly].location_status); +	if (!(location_status & ELM_LOCATION_STATUS_ECC_CORRECTABLE_MASK)) +		return -1; + +	/* get error count */ +	*error_count = readl(&elm_cfg->error_location[poly].location_status) & +					ELM_LOCATION_STATUS_ECC_NB_ERRORS_MASK; + +	for (i = 0; i < *error_count; i++) { +		error_locations[i] = +		     readl(&elm_cfg->error_location[poly].error_location_x[i]); +	} + +	return 0; +} + + +/** + * elm_config - Configure ELM module + * @level: 4 / 8 / 16 bit BCH + * + * Configure ELM module based on BCH level. + * Set mode as continuous mode. + * Currently we are using only syndrome 0 and syndromes 1 to 6 are not used. + * Also, the mode is set only for syndrome 0 + */ +int elm_config(enum bch_level level) +{ +	u32 val; +	u8 poly = ELM_DEFAULT_POLY; +	u32 buffer_size = 0x7FF; + +	/* config size and level */ +	val = (u32)(level) & ELM_LOCATION_CONFIG_ECC_BCH_LEVEL_MASK; +	val |= ((buffer_size << ELM_LOCATION_CONFIG_ECC_SIZE_POS) & +				ELM_LOCATION_CONFIG_ECC_SIZE_MASK); +	writel(val, &elm_cfg->location_config); + +	/* config continous mode */ +	/* enable interrupt generation for syndrome polynomial set */ +	writel((readl(&elm_cfg->irqenable) | (0x1 << poly)), +			&elm_cfg->irqenable); +	/* set continuous mode for the syndrome polynomial set */ +	writel((readl(&elm_cfg->page_ctrl) & ~(0x1 << poly)), +			&elm_cfg->page_ctrl); + +	return 0; +} + +/** + * elm_reset - Do a soft reset of ELM + * + * Perform a soft reset of ELM and return after reset is done. + */ +void elm_reset(void) +{ +	/* initiate reset */ +	writel((readl(&elm_cfg->sysconfig) | ELM_SYSCONFIG_SOFTRESET), +			&elm_cfg->sysconfig); + +	/* wait for reset complete and normal operation */ +	while ((readl(&elm_cfg->sysstatus) & ELM_SYSSTATUS_RESETDONE) != +		ELM_SYSSTATUS_RESETDONE) +		; +} + +/** + * elm_init - Initialize ELM module + * + * Initialize ELM support. Currently it does only base address init + * and ELM reset. + */ +void elm_init(void) +{ +	elm_cfg = (struct elm *)ELM_BASE; +	elm_reset(); +} diff --git a/roms/u-boot/drivers/mtd/nand/omap_gpmc.c b/roms/u-boot/drivers/mtd/nand/omap_gpmc.c new file mode 100644 index 00000000..881a6361 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/omap_gpmc.c @@ -0,0 +1,836 @@ +/* + * (C) Copyright 2004-2008 Texas Instruments, <www.ti.com> + * Rohit Choraria <rohitkc@ti.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/errno.h> +#include <asm/arch/mem.h> +#include <linux/mtd/omap_gpmc.h> +#include <linux/mtd/nand_ecc.h> +#include <linux/bch.h> +#include <linux/compiler.h> +#include <nand.h> +#include <linux/mtd/omap_elm.h> + +#define BADBLOCK_MARKER_LENGTH	2 +#define SECTOR_BYTES		512 +#define ECCCLEAR		(0x1 << 8) +#define ECCRESULTREG1		(0x1 << 0) +/* 4 bit padding to make byte aligned, 56 = 52 + 4 */ +#define BCH4_BIT_PAD		4 + +#ifdef CONFIG_BCH +static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, +				0x97, 0x79, 0xe5, 0x24, 0xb5}; +#endif +static uint8_t cs; +static __maybe_unused struct nand_ecclayout omap_ecclayout; + +/* + * omap_nand_hwcontrol - Set the address pointers corretly for the + *			following address/data/command operation + */ +static void omap_nand_hwcontrol(struct mtd_info *mtd, int32_t cmd, +				uint32_t ctrl) +{ +	register struct nand_chip *this = mtd->priv; + +	/* +	 * Point the IO_ADDR to DATA and ADDRESS registers instead +	 * of chip address +	 */ +	switch (ctrl) { +	case NAND_CTRL_CHANGE | NAND_CTRL_CLE: +		this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_cmd; +		break; +	case NAND_CTRL_CHANGE | NAND_CTRL_ALE: +		this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_adr; +		break; +	case NAND_CTRL_CHANGE | NAND_NCE: +		this->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_dat; +		break; +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, this->IO_ADDR_W); +} + +#ifdef CONFIG_SPL_BUILD +/* Check wait pin as dev ready indicator */ +int omap_spl_dev_ready(struct mtd_info *mtd) +{ +	return gpmc_cfg->status & (1 << 8); +} +#endif + + +/* + * gen_true_ecc - This function will generate true ECC value, which + * can be used when correcting data read from NAND flash memory core + * + * @ecc_buf:	buffer to store ecc code + * + * @return:	re-formatted ECC value + */ +static uint32_t gen_true_ecc(uint8_t *ecc_buf) +{ +	return ecc_buf[0] | (ecc_buf[1] << 16) | ((ecc_buf[2] & 0xF0) << 20) | +		((ecc_buf[2] & 0x0F) << 8); +} + +/* + * omap_correct_data - Compares the ecc read from nand spare area with ECC + * registers values and corrects one bit error if it has occured + * Further details can be had from OMAP TRM and the following selected links: + * http://en.wikipedia.org/wiki/Hamming_code + * http://www.cs.utexas.edu/users/plaxton/c/337/05f/slides/ErrorCorrection-4.pdf + * + * @mtd:		 MTD device structure + * @dat:		 page data + * @read_ecc:		 ecc read from nand flash + * @calc_ecc:		 ecc read from ECC registers + * + * @return 0 if data is OK or corrected, else returns -1 + */ +static int __maybe_unused omap_correct_data(struct mtd_info *mtd, uint8_t *dat, +				uint8_t *read_ecc, uint8_t *calc_ecc) +{ +	uint32_t orig_ecc, new_ecc, res, hm; +	uint16_t parity_bits, byte; +	uint8_t bit; + +	/* Regenerate the orginal ECC */ +	orig_ecc = gen_true_ecc(read_ecc); +	new_ecc = gen_true_ecc(calc_ecc); +	/* Get the XOR of real ecc */ +	res = orig_ecc ^ new_ecc; +	if (res) { +		/* Get the hamming width */ +		hm = hweight32(res); +		/* Single bit errors can be corrected! */ +		if (hm == 12) { +			/* Correctable data! */ +			parity_bits = res >> 16; +			bit = (parity_bits & 0x7); +			byte = (parity_bits >> 3) & 0x1FF; +			/* Flip the bit to correct */ +			dat[byte] ^= (0x1 << bit); +		} else if (hm == 1) { +			printf("Error: Ecc is wrong\n"); +			/* ECC itself is corrupted */ +			return 2; +		} else { +			/* +			 * hm distance != parity pairs OR one, could mean 2 bit +			 * error OR potentially be on a blank page.. +			 * orig_ecc: contains spare area data from nand flash. +			 * new_ecc: generated ecc while reading data area. +			 * Note: if the ecc = 0, all data bits from which it was +			 * generated are 0xFF. +			 * The 3 byte(24 bits) ecc is generated per 512byte +			 * chunk of a page. If orig_ecc(from spare area) +			 * is 0xFF && new_ecc(computed now from data area)=0x0, +			 * this means that data area is 0xFF and spare area is +			 * 0xFF. A sure sign of a erased page! +			 */ +			if ((orig_ecc == 0x0FFF0FFF) && (new_ecc == 0x00000000)) +				return 0; +			printf("Error: Bad compare! failed\n"); +			/* detected 2 bit error */ +			return -1; +		} +	} +	return 0; +} + +/* + * Generic BCH interface + */ +struct nand_bch_priv { +	uint8_t mode; +	uint8_t type; +	uint8_t nibbles; +	struct bch_control *control; +	enum omap_ecc ecc_scheme; +}; + +/* bch types */ +#define ECC_BCH4	0 +#define ECC_BCH8	1 +#define ECC_BCH16	2 + +/* BCH nibbles for diff bch levels */ +#define ECC_BCH4_NIBBLES	13 +#define ECC_BCH8_NIBBLES	26 +#define ECC_BCH16_NIBBLES	52 + +/* + * This can be a single instance cause all current users have only one NAND + * with nearly the same setup (BCH8, some with ELM and others with sw BCH + * library). + * When some users with other BCH strength will exists this have to change! + */ +static __maybe_unused struct nand_bch_priv bch_priv = { +	.type = ECC_BCH8, +	.nibbles = ECC_BCH8_NIBBLES, +	.control = NULL +}; + +/* + * omap_reverse_list - re-orders list elements in reverse order [internal] + * @list:	pointer to start of list + * @length:	length of list +*/ +void omap_reverse_list(u8 *list, unsigned int length) +{ +	unsigned int i, j; +	unsigned int half_length = length / 2; +	u8 tmp; +	for (i = 0, j = length - 1; i < half_length; i++, j--) { +		tmp = list[i]; +		list[i] = list[j]; +		list[j] = tmp; +	} +} + +/* + * omap_enable_hwecc - configures GPMC as per ECC scheme before read/write + * @mtd:	MTD device structure + * @mode:	Read/Write mode + */ +__maybe_unused +static void omap_enable_hwecc(struct mtd_info *mtd, int32_t mode) +{ +	struct nand_chip	*nand	= mtd->priv; +	struct nand_bch_priv	*bch	= nand->priv; +	unsigned int dev_width = (nand->options & NAND_BUSWIDTH_16) ? 1 : 0; +	unsigned int ecc_algo = 0; +	unsigned int bch_type = 0; +	unsigned int eccsize1 = 0x00, eccsize0 = 0x00, bch_wrapmode = 0x00; +	u32 ecc_size_config_val = 0; +	u32 ecc_config_val = 0; + +	/* configure GPMC for specific ecc-scheme */ +	switch (bch->ecc_scheme) { +	case OMAP_ECC_HAM1_CODE_SW: +		return; +	case OMAP_ECC_HAM1_CODE_HW: +		ecc_algo = 0x0; +		bch_type = 0x0; +		bch_wrapmode = 0x00; +		eccsize0 = 0xFF; +		eccsize1 = 0xFF; +		break; +	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: +	case OMAP_ECC_BCH8_CODE_HW: +		ecc_algo = 0x1; +		bch_type = 0x1; +		if (mode == NAND_ECC_WRITE) { +			bch_wrapmode = 0x01; +			eccsize0 = 0;  /* extra bits in nibbles per sector */ +			eccsize1 = 28; /* OOB bits in nibbles per sector */ +		} else { +			bch_wrapmode = 0x01; +			eccsize0 = 26; /* ECC bits in nibbles per sector */ +			eccsize1 = 2;  /* non-ECC bits in nibbles per sector */ +		} +		break; +	default: +		return; +	} +	/* Clear ecc and enable bits */ +	writel(ECCCLEAR | ECCRESULTREG1, &gpmc_cfg->ecc_control); +	/* Configure ecc size for BCH */ +	ecc_size_config_val = (eccsize1 << 22) | (eccsize0 << 12); +	writel(ecc_size_config_val, &gpmc_cfg->ecc_size_config); + +	/* Configure device details for BCH engine */ +	ecc_config_val = ((ecc_algo << 16)	| /* HAM1 | BCHx */ +			(bch_type << 12)	| /* BCH4/BCH8/BCH16 */ +			(bch_wrapmode << 8)	| /* wrap mode */ +			(dev_width << 7)	| /* bus width */ +			(0x0 << 4)		| /* number of sectors */ +			(cs <<  1)		| /* ECC CS */ +			(0x1));			  /* enable ECC */ +	writel(ecc_config_val, &gpmc_cfg->ecc_config); +} + +/* + *  omap_calculate_ecc - Read ECC result + *  @mtd:	MTD structure + *  @dat:	unused + *  @ecc_code:	ecc_code buffer + *  Using noninverted ECC can be considered ugly since writing a blank + *  page ie. padding will clear the ECC bytes. This is no problem as + *  long nobody is trying to write data on the seemingly unused page. + *  Reading an erased page will produce an ECC mismatch between + *  generated and read ECC bytes that has to be dealt with separately. + *  E.g. if page is 0xFF (fresh erased), and if HW ECC engine within GPMC + *  is used, the result of read will be 0x0 while the ECC offsets of the + *  spare area will be 0xFF which will result in an ECC mismatch. + */ +static int omap_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat, +				uint8_t *ecc_code) +{ +	struct nand_chip *chip = mtd->priv; +	struct nand_bch_priv *bch = chip->priv; +	uint32_t *ptr, val = 0; +	int8_t i = 0, j; + +	switch (bch->ecc_scheme) { +	case OMAP_ECC_HAM1_CODE_HW: +		val = readl(&gpmc_cfg->ecc1_result); +		ecc_code[0] = val & 0xFF; +		ecc_code[1] = (val >> 16) & 0xFF; +		ecc_code[2] = ((val >> 8) & 0x0F) | ((val >> 20) & 0xF0); +		break; +#ifdef CONFIG_BCH +	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: +#endif +	case OMAP_ECC_BCH8_CODE_HW: +		ptr = &gpmc_cfg->bch_result_0_3[0].bch_result_x[3]; +		val = readl(ptr); +		ecc_code[i++] = (val >>  0) & 0xFF; +		ptr--; +		for (j = 0; j < 3; j++) { +			val = readl(ptr); +			ecc_code[i++] = (val >> 24) & 0xFF; +			ecc_code[i++] = (val >> 16) & 0xFF; +			ecc_code[i++] = (val >>  8) & 0xFF; +			ecc_code[i++] = (val >>  0) & 0xFF; +			ptr--; +		} +		break; +	default: +		return -EINVAL; +	} +	/* ECC scheme specific syndrome customizations */ +	switch (bch->ecc_scheme) { +	case OMAP_ECC_HAM1_CODE_HW: +		break; +#ifdef CONFIG_BCH +	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + +		for (i = 0; i < chip->ecc.bytes; i++) +			*(ecc_code + i) = *(ecc_code + i) ^ +						bch8_polynomial[i]; +		break; +#endif +	case OMAP_ECC_BCH8_CODE_HW: +		ecc_code[chip->ecc.bytes - 1] = 0x00; +		break; +	default: +		return -EINVAL; +	} +	return 0; +} + +#ifdef CONFIG_NAND_OMAP_ELM +/* + * omap_correct_data_bch - Compares the ecc read from nand spare area + * with ECC registers values and corrects one bit error if it has occured + * + * @mtd:	MTD device structure + * @dat:	page data + * @read_ecc:	ecc read from nand flash (ignored) + * @calc_ecc:	ecc read from ECC registers + * + * @return 0 if data is OK or corrected, else returns -1 + */ +static int omap_correct_data_bch(struct mtd_info *mtd, uint8_t *dat, +				uint8_t *read_ecc, uint8_t *calc_ecc) +{ +	struct nand_chip *chip = mtd->priv; +	struct nand_bch_priv *bch = chip->priv; +	uint32_t eccbytes = chip->ecc.bytes; +	uint32_t error_count = 0, error_max; +	uint32_t error_loc[8]; +	uint32_t i, ecc_flag = 0; +	uint8_t count, err = 0; +	uint32_t byte_pos, bit_pos; + +	/* check calculated ecc */ +	for (i = 0; i < chip->ecc.bytes && !ecc_flag; i++) { +		if (calc_ecc[i] != 0x00) +			ecc_flag = 1; +	} +	if (!ecc_flag) +		return 0; + +	/* check for whether its a erased-page */ +	ecc_flag = 0; +	for (i = 0; i < chip->ecc.bytes && !ecc_flag; i++) { +		if (read_ecc[i] != 0xff) +			ecc_flag = 1; +	} +	if (!ecc_flag) +		return 0; + +	/* +	 * while reading ECC result we read it in big endian. +	 * Hence while loading to ELM we have rotate to get the right endian. +	 */ +	switch (bch->ecc_scheme) { +	case OMAP_ECC_BCH8_CODE_HW: +		omap_reverse_list(calc_ecc, eccbytes - 1); +		break; +	default: +		return -EINVAL; +	} +	/* use elm module to check for errors */ +	elm_config((enum bch_level)(bch->type)); +	if (elm_check_error(calc_ecc, bch->nibbles, &error_count, error_loc)) { +		printf("nand: error: uncorrectable ECC errors\n"); +		return -EINVAL; +	} +	/* correct bch error */ +	for (count = 0; count < error_count; count++) { +		switch (bch->type) { +		case ECC_BCH8: +			/* 14th byte in ECC is reserved to match ROM layout */ +			error_max = SECTOR_BYTES + (eccbytes - 1); +			break; +		default: +			return -EINVAL; +		} +		byte_pos = error_max - (error_loc[count] / 8) - 1; +		bit_pos  = error_loc[count] % 8; +		if (byte_pos < SECTOR_BYTES) { +			dat[byte_pos] ^= 1 << bit_pos; +			printf("nand: bit-flip corrected @data=%d\n", byte_pos); +		} else if (byte_pos < error_max) { +			read_ecc[byte_pos - SECTOR_BYTES] = 1 << bit_pos; +			printf("nand: bit-flip corrected @oob=%d\n", byte_pos - +								SECTOR_BYTES); +		} else { +			err = -EBADMSG; +			printf("nand: error: invalid bit-flip location\n"); +		} +	} +	return (err) ? err : error_count; +} + +/** + * omap_read_page_bch - hardware ecc based page read function + * @mtd:	mtd info structure + * @chip:	nand chip info structure + * @buf:	buffer to store read data + * @oob_required: caller expects OOB data read to chip->oob_poi + * @page:	page number to read + * + */ +static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, +				uint8_t *buf, int oob_required, int page) +{ +	int i, eccsize = chip->ecc.size; +	int eccbytes = chip->ecc.bytes; +	int eccsteps = chip->ecc.steps; +	uint8_t *p = buf; +	uint8_t *ecc_calc = chip->buffers->ecccalc; +	uint8_t *ecc_code = chip->buffers->ecccode; +	uint32_t *eccpos = chip->ecc.layout->eccpos; +	uint8_t *oob = chip->oob_poi; +	uint32_t data_pos; +	uint32_t oob_pos; + +	data_pos = 0; +	/* oob area start */ +	oob_pos = (eccsize * eccsteps) + chip->ecc.layout->eccpos[0]; +	oob += chip->ecc.layout->eccpos[0]; + +	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize, +				oob += eccbytes) { +		chip->ecc.hwctl(mtd, NAND_ECC_READ); +		/* read data */ +		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_pos, page); +		chip->read_buf(mtd, p, eccsize); + +		/* read respective ecc from oob area */ +		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, page); +		chip->read_buf(mtd, oob, eccbytes); +		/* read syndrome */ +		chip->ecc.calculate(mtd, p, &ecc_calc[i]); + +		data_pos += eccsize; +		oob_pos += eccbytes; +	} + +	for (i = 0; i < chip->ecc.total; i++) +		ecc_code[i] = chip->oob_poi[eccpos[i]]; + +	eccsteps = chip->ecc.steps; +	p = buf; + +	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { +		int stat; + +		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); +		if (stat < 0) +			mtd->ecc_stats.failed++; +		else +			mtd->ecc_stats.corrected += stat; +	} +	return 0; +} +#endif /* CONFIG_NAND_OMAP_ELM */ + +/* + * OMAP3 BCH8 support (with BCH library) + */ +#ifdef CONFIG_BCH +/** + * omap_correct_data_bch_sw - Decode received data and correct errors + * @mtd: MTD device structure + * @data: page data + * @read_ecc: ecc read from nand flash + * @calc_ecc: ecc read from HW ECC registers + */ +static int omap_correct_data_bch_sw(struct mtd_info *mtd, u_char *data, +				 u_char *read_ecc, u_char *calc_ecc) +{ +	int i, count; +	/* cannot correct more than 8 errors */ +	unsigned int errloc[8]; +	struct nand_chip *chip = mtd->priv; +	struct nand_bch_priv *chip_priv = chip->priv; +	struct bch_control *bch = chip_priv->control; + +	count = decode_bch(bch, NULL, 512, read_ecc, calc_ecc, NULL, errloc); +	if (count > 0) { +		/* correct errors */ +		for (i = 0; i < count; i++) { +			/* correct data only, not ecc bytes */ +			if (errloc[i] < 8*512) +				data[errloc[i]/8] ^= 1 << (errloc[i] & 7); +			printf("corrected bitflip %u\n", errloc[i]); +#ifdef DEBUG +			puts("read_ecc: "); +			/* +			 * BCH8 have 13 bytes of ECC; BCH4 needs adoption +			 * here! +			 */ +			for (i = 0; i < 13; i++) +				printf("%02x ", read_ecc[i]); +			puts("\n"); +			puts("calc_ecc: "); +			for (i = 0; i < 13; i++) +				printf("%02x ", calc_ecc[i]); +			puts("\n"); +#endif +		} +	} else if (count < 0) { +		puts("ecc unrecoverable error\n"); +	} +	return count; +} + +/** + * omap_free_bch - Release BCH ecc resources + * @mtd: MTD device structure + */ +static void __maybe_unused omap_free_bch(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	struct nand_bch_priv *chip_priv = chip->priv; +	struct bch_control *bch = NULL; + +	if (chip_priv) +		bch = chip_priv->control; + +	if (bch) { +		free_bch(bch); +		chip_priv->control = NULL; +	} +} +#endif /* CONFIG_BCH */ + +/** + * omap_select_ecc_scheme - configures driver for particular ecc-scheme + * @nand: NAND chip device structure + * @ecc_scheme: ecc scheme to configure + * @pagesize: number of main-area bytes per page of NAND device + * @oobsize: number of OOB/spare bytes per page of NAND device + */ +static int omap_select_ecc_scheme(struct nand_chip *nand, +	enum omap_ecc ecc_scheme, unsigned int pagesize, unsigned int oobsize) { +	struct nand_bch_priv	*bch		= nand->priv; +	struct nand_ecclayout	*ecclayout	= &omap_ecclayout; +	int eccsteps = pagesize / SECTOR_BYTES; +	int i; + +	switch (ecc_scheme) { +	case OMAP_ECC_HAM1_CODE_SW: +		debug("nand: selected OMAP_ECC_HAM1_CODE_SW\n"); +		/* For this ecc-scheme, ecc.bytes, ecc.layout, ... are +		 * initialized in nand_scan_tail(), so just set ecc.mode */ +		bch_priv.control	= NULL; +		bch_priv.type		= 0; +		nand->ecc.mode		= NAND_ECC_SOFT; +		nand->ecc.layout	= NULL; +		nand->ecc.size		= 0; +		bch->ecc_scheme		= OMAP_ECC_HAM1_CODE_SW; +		break; + +	case OMAP_ECC_HAM1_CODE_HW: +		debug("nand: selected OMAP_ECC_HAM1_CODE_HW\n"); +		/* check ecc-scheme requirements before updating ecc info */ +		if ((3 * eccsteps) + BADBLOCK_MARKER_LENGTH > oobsize) { +			printf("nand: error: insufficient OOB: require=%d\n", ( +				(3 * eccsteps) + BADBLOCK_MARKER_LENGTH)); +			return -EINVAL; +		} +		bch_priv.control	= NULL; +		bch_priv.type		= 0; +		/* populate ecc specific fields */ +		memset(&nand->ecc, 0, sizeof(struct nand_ecc_ctrl)); +		nand->ecc.mode		= NAND_ECC_HW; +		nand->ecc.strength	= 1; +		nand->ecc.size		= SECTOR_BYTES; +		nand->ecc.bytes		= 3; +		nand->ecc.hwctl		= omap_enable_hwecc; +		nand->ecc.correct	= omap_correct_data; +		nand->ecc.calculate	= omap_calculate_ecc; +		/* define ecc-layout */ +		ecclayout->eccbytes	= nand->ecc.bytes * eccsteps; +		for (i = 0; i < ecclayout->eccbytes; i++) { +			if (nand->options & NAND_BUSWIDTH_16) +				ecclayout->eccpos[i] = i + 2; +			else +				ecclayout->eccpos[i] = i + 1; +		} +		ecclayout->oobfree[0].offset = i + BADBLOCK_MARKER_LENGTH; +		ecclayout->oobfree[0].length = oobsize - ecclayout->eccbytes - +						BADBLOCK_MARKER_LENGTH; +		bch->ecc_scheme		= OMAP_ECC_HAM1_CODE_HW; +		break; + +	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: +#ifdef CONFIG_BCH +		debug("nand: selected OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n"); +		/* check ecc-scheme requirements before updating ecc info */ +		if ((13 * eccsteps) + BADBLOCK_MARKER_LENGTH > oobsize) { +			printf("nand: error: insufficient OOB: require=%d\n", ( +				(13 * eccsteps) + BADBLOCK_MARKER_LENGTH)); +			return -EINVAL; +		} +		/* check if BCH S/W library can be used for error detection */ +		bch_priv.control = init_bch(13, 8, 0x201b); +		if (!bch_priv.control) { +			printf("nand: error: could not init_bch()\n"); +			return -ENODEV; +		} +		bch_priv.type = ECC_BCH8; +		/* populate ecc specific fields */ +		memset(&nand->ecc, 0, sizeof(struct nand_ecc_ctrl)); +		nand->ecc.mode		= NAND_ECC_HW; +		nand->ecc.strength	= 8; +		nand->ecc.size		= SECTOR_BYTES; +		nand->ecc.bytes		= 13; +		nand->ecc.hwctl		= omap_enable_hwecc; +		nand->ecc.correct	= omap_correct_data_bch_sw; +		nand->ecc.calculate	= omap_calculate_ecc; +		/* define ecc-layout */ +		ecclayout->eccbytes	= nand->ecc.bytes * eccsteps; +		ecclayout->eccpos[0]	= BADBLOCK_MARKER_LENGTH; +		for (i = 1; i < ecclayout->eccbytes; i++) { +			if (i % nand->ecc.bytes) +				ecclayout->eccpos[i] = +						ecclayout->eccpos[i - 1] + 1; +			else +				ecclayout->eccpos[i] = +						ecclayout->eccpos[i - 1] + 2; +		} +		ecclayout->oobfree[0].offset = i + BADBLOCK_MARKER_LENGTH; +		ecclayout->oobfree[0].length = oobsize - ecclayout->eccbytes - +						BADBLOCK_MARKER_LENGTH; +		bch->ecc_scheme		= OMAP_ECC_BCH8_CODE_HW_DETECTION_SW; +		break; +#else +		printf("nand: error: CONFIG_BCH required for ECC\n"); +		return -EINVAL; +#endif + +	case OMAP_ECC_BCH8_CODE_HW: +#ifdef CONFIG_NAND_OMAP_ELM +		debug("nand: selected OMAP_ECC_BCH8_CODE_HW\n"); +		/* check ecc-scheme requirements before updating ecc info */ +		if ((14 * eccsteps) + BADBLOCK_MARKER_LENGTH > oobsize) { +			printf("nand: error: insufficient OOB: require=%d\n", ( +				(14 * eccsteps) + BADBLOCK_MARKER_LENGTH)); +			return -EINVAL; +		} +		/* intialize ELM for ECC error detection */ +		elm_init(); +		bch_priv.type		= ECC_BCH8; +		/* populate ecc specific fields */ +		memset(&nand->ecc, 0, sizeof(struct nand_ecc_ctrl)); +		nand->ecc.mode		= NAND_ECC_HW; +		nand->ecc.strength	= 8; +		nand->ecc.size		= SECTOR_BYTES; +		nand->ecc.bytes		= 14; +		nand->ecc.hwctl		= omap_enable_hwecc; +		nand->ecc.correct	= omap_correct_data_bch; +		nand->ecc.calculate	= omap_calculate_ecc; +		nand->ecc.read_page	= omap_read_page_bch; +		/* define ecc-layout */ +		ecclayout->eccbytes	= nand->ecc.bytes * eccsteps; +		for (i = 0; i < ecclayout->eccbytes; i++) +			ecclayout->eccpos[i] = i + BADBLOCK_MARKER_LENGTH; +		ecclayout->oobfree[0].offset = i + BADBLOCK_MARKER_LENGTH; +		ecclayout->oobfree[0].length = oobsize - ecclayout->eccbytes - +						BADBLOCK_MARKER_LENGTH; +		bch->ecc_scheme		= OMAP_ECC_BCH8_CODE_HW; +		break; +#else +		printf("nand: error: CONFIG_NAND_OMAP_ELM required for ECC\n"); +		return -EINVAL; +#endif + +	default: +		debug("nand: error: ecc scheme not enabled or supported\n"); +		return -EINVAL; +	} + +	/* nand_scan_tail() sets ham1 sw ecc; hw ecc layout is set by driver */ +	if (ecc_scheme != OMAP_ECC_HAM1_CODE_SW) +		nand->ecc.layout = ecclayout; + +	return 0; +} + +#ifndef CONFIG_SPL_BUILD +/* + * omap_nand_switch_ecc - switch the ECC operation between different engines + * (h/w and s/w) and different algorithms (hamming and BCHx) + * + * @hardware		- true if one of the HW engines should be used + * @eccstrength		- the number of bits that could be corrected + *			  (1 - hamming, 4 - BCH4, 8 - BCH8, 16 - BCH16) + */ +int __maybe_unused omap_nand_switch_ecc(uint32_t hardware, uint32_t eccstrength) +{ +	struct nand_chip *nand; +	struct mtd_info *mtd; +	int err = 0; + +	if (nand_curr_device < 0 || +	    nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE || +	    !nand_info[nand_curr_device].name) { +		printf("nand: error: no NAND devices found\n"); +		return -ENODEV; +	} + +	mtd = &nand_info[nand_curr_device]; +	nand = mtd->priv; +	nand->options |= NAND_OWN_BUFFERS; +	nand->options &= ~NAND_SUBPAGE_READ; +	/* Setup the ecc configurations again */ +	if (hardware) { +		if (eccstrength == 1) { +			err = omap_select_ecc_scheme(nand, +					OMAP_ECC_HAM1_CODE_HW, +					mtd->writesize, mtd->oobsize); +		} else if (eccstrength == 8) { +			err = omap_select_ecc_scheme(nand, +					OMAP_ECC_BCH8_CODE_HW, +					mtd->writesize, mtd->oobsize); +		} else { +			printf("nand: error: unsupported ECC scheme\n"); +			return -EINVAL; +		} +	} else { +		err = omap_select_ecc_scheme(nand, OMAP_ECC_HAM1_CODE_SW, +					mtd->writesize, mtd->oobsize); +	} + +	/* Update NAND handling after ECC mode switch */ +	if (!err) +		err = nand_scan_tail(mtd); +	return err; +} +#endif /* CONFIG_SPL_BUILD */ + +/* + * Board-specific NAND initialization. The following members of the + * argument are board-specific: + * - IO_ADDR_R: address to read the 8 I/O lines of the flash device + * - IO_ADDR_W: address to write the 8 I/O lines of the flash device + * - cmd_ctrl: hardwarespecific function for accesing control-lines + * - waitfunc: hardwarespecific function for accesing device ready/busy line + * - ecc.hwctl: function to enable (reset) hardware ecc generator + * - ecc.mode: mode of ecc, see defines + * - chip_delay: chip dependent delay for transfering data from array to + *   read regs (tR) + * - options: various chip options. They can partly be set to inform + *   nand_scan about special functionality. See the defines for further + *   explanation + */ +int board_nand_init(struct nand_chip *nand) +{ +	int32_t gpmc_config = 0; +	cs = 0; +	int err = 0; +	/* +	 * xloader/Uboot's gpmc configuration would have configured GPMC for +	 * nand type of memory. The following logic scans and latches on to the +	 * first CS with NAND type memory. +	 * TBD: need to make this logic generic to handle multiple CS NAND +	 * devices. +	 */ +	while (cs < GPMC_MAX_CS) { +		/* Check if NAND type is set */ +		if ((readl(&gpmc_cfg->cs[cs].config1) & 0xC00) == 0x800) { +			/* Found it!! */ +			break; +		} +		cs++; +	} +	if (cs >= GPMC_MAX_CS) { +		printf("nand: error: Unable to find NAND settings in " +			"GPMC Configuration - quitting\n"); +		return -ENODEV; +	} + +	gpmc_config = readl(&gpmc_cfg->config); +	/* Disable Write protect */ +	gpmc_config |= 0x10; +	writel(gpmc_config, &gpmc_cfg->config); + +	nand->IO_ADDR_R = (void __iomem *)&gpmc_cfg->cs[cs].nand_dat; +	nand->IO_ADDR_W = (void __iomem *)&gpmc_cfg->cs[cs].nand_cmd; +	nand->priv	= &bch_priv; +	nand->cmd_ctrl	= omap_nand_hwcontrol; +	nand->options	|= NAND_NO_PADDING | NAND_CACHEPRG; +	/* If we are 16 bit dev, our gpmc config tells us that */ +	if ((readl(&gpmc_cfg->cs[cs].config1) & 0x3000) == 0x1000) +		nand->options |= NAND_BUSWIDTH_16; + +	nand->chip_delay = 100; +	nand->ecc.layout = &omap_ecclayout; + +	/* select ECC scheme */ +#if defined(CONFIG_NAND_OMAP_ECCSCHEME) +	err = omap_select_ecc_scheme(nand, CONFIG_NAND_OMAP_ECCSCHEME, +			CONFIG_SYS_NAND_PAGE_SIZE, CONFIG_SYS_NAND_OOBSIZE); +#else +	/* pagesize and oobsize are not required to configure sw ecc-scheme */ +	err = omap_select_ecc_scheme(nand, OMAP_ECC_HAM1_CODE_SW, +			0, 0); +#endif +	if (err) +		return err; + +#ifdef CONFIG_SPL_BUILD +	if (nand->options & NAND_BUSWIDTH_16) +		nand->read_buf = nand_read_buf16; +	else +		nand->read_buf = nand_read_buf; +	nand->dev_ready = omap_spl_dev_ready; +#endif + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/s3c2410_nand.c b/roms/u-boot/drivers/mtd/nand/s3c2410_nand.c new file mode 100644 index 00000000..db87d072 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/s3c2410_nand.c @@ -0,0 +1,175 @@ +/* + * (C) Copyright 2006 OpenMoko, Inc. + * Author: Harald Welte <laforge@openmoko.org> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> + +#include <nand.h> +#include <asm/arch/s3c24x0_cpu.h> +#include <asm/io.h> + +#define S3C2410_NFCONF_EN          (1<<15) +#define S3C2410_NFCONF_512BYTE     (1<<14) +#define S3C2410_NFCONF_4STEP       (1<<13) +#define S3C2410_NFCONF_INITECC     (1<<12) +#define S3C2410_NFCONF_nFCE        (1<<11) +#define S3C2410_NFCONF_TACLS(x)    ((x)<<8) +#define S3C2410_NFCONF_TWRPH0(x)   ((x)<<4) +#define S3C2410_NFCONF_TWRPH1(x)   ((x)<<0) + +#define S3C2410_ADDR_NALE 4 +#define S3C2410_ADDR_NCLE 8 + +#ifdef CONFIG_NAND_SPL + +/* in the early stage of NAND flash booting, printf() is not available */ +#define printf(fmt, args...) + +static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) +{ +	int i; +	struct nand_chip *this = mtd->priv; + +	for (i = 0; i < len; i++) +		buf[i] = readb(this->IO_ADDR_R); +} +#endif + +static void s3c2410_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) +{ +	struct nand_chip *chip = mtd->priv; +	struct s3c2410_nand *nand = s3c2410_get_base_nand(); + +	debug("hwcontrol(): 0x%02x 0x%02x\n", cmd, ctrl); + +	if (ctrl & NAND_CTRL_CHANGE) { +		ulong IO_ADDR_W = (ulong)nand; + +		if (!(ctrl & NAND_CLE)) +			IO_ADDR_W |= S3C2410_ADDR_NCLE; +		if (!(ctrl & NAND_ALE)) +			IO_ADDR_W |= S3C2410_ADDR_NALE; + +		chip->IO_ADDR_W = (void *)IO_ADDR_W; + +		if (ctrl & NAND_NCE) +			writel(readl(&nand->nfconf) & ~S3C2410_NFCONF_nFCE, +			       &nand->nfconf); +		else +			writel(readl(&nand->nfconf) | S3C2410_NFCONF_nFCE, +			       &nand->nfconf); +	} + +	if (cmd != NAND_CMD_NONE) +		writeb(cmd, chip->IO_ADDR_W); +} + +static int s3c2410_dev_ready(struct mtd_info *mtd) +{ +	struct s3c2410_nand *nand = s3c2410_get_base_nand(); +	debug("dev_ready\n"); +	return readl(&nand->nfstat) & 0x01; +} + +#ifdef CONFIG_S3C2410_NAND_HWECC +void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode) +{ +	struct s3c2410_nand *nand = s3c2410_get_base_nand(); +	debug("s3c2410_nand_enable_hwecc(%p, %d)\n", mtd, mode); +	writel(readl(&nand->nfconf) | S3C2410_NFCONF_INITECC, &nand->nfconf); +} + +static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, +				      u_char *ecc_code) +{ +	struct s3c2410_nand *nand = s3c2410_get_base_nand(); +	ecc_code[0] = readb(&nand->nfecc); +	ecc_code[1] = readb(&nand->nfecc + 1); +	ecc_code[2] = readb(&nand->nfecc + 2); +	debug("s3c2410_nand_calculate_hwecc(%p,): 0x%02x 0x%02x 0x%02x\n", +	       mtd , ecc_code[0], ecc_code[1], ecc_code[2]); + +	return 0; +} + +static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, +				     u_char *read_ecc, u_char *calc_ecc) +{ +	if (read_ecc[0] == calc_ecc[0] && +	    read_ecc[1] == calc_ecc[1] && +	    read_ecc[2] == calc_ecc[2]) +		return 0; + +	printf("s3c2410_nand_correct_data: not implemented\n"); +	return -1; +} +#endif + +int board_nand_init(struct nand_chip *nand) +{ +	u_int32_t cfg; +	u_int8_t tacls, twrph0, twrph1; +	struct s3c24x0_clock_power *clk_power = s3c24x0_get_base_clock_power(); +	struct s3c2410_nand *nand_reg = s3c2410_get_base_nand(); + +	debug("board_nand_init()\n"); + +	writel(readl(&clk_power->clkcon) | (1 << 4), &clk_power->clkcon); + +	/* initialize hardware */ +#if defined(CONFIG_S3C24XX_CUSTOM_NAND_TIMING) +	tacls  = CONFIG_S3C24XX_TACLS; +	twrph0 = CONFIG_S3C24XX_TWRPH0; +	twrph1 =  CONFIG_S3C24XX_TWRPH1; +#else +	tacls = 4; +	twrph0 = 8; +	twrph1 = 8; +#endif + +	cfg = S3C2410_NFCONF_EN; +	cfg |= S3C2410_NFCONF_TACLS(tacls - 1); +	cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); +	cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); +	writel(cfg, &nand_reg->nfconf); + +	/* initialize nand_chip data structure */ +	nand->IO_ADDR_R = (void *)&nand_reg->nfdata; +	nand->IO_ADDR_W = (void *)&nand_reg->nfdata; + +	nand->select_chip = NULL; + +	/* read_buf and write_buf are default */ +	/* read_byte and write_byte are default */ +#ifdef CONFIG_NAND_SPL +	nand->read_buf = nand_read_buf; +#endif + +	/* hwcontrol always must be implemented */ +	nand->cmd_ctrl = s3c2410_hwcontrol; + +	nand->dev_ready = s3c2410_dev_ready; + +#ifdef CONFIG_S3C2410_NAND_HWECC +	nand->ecc.hwctl = s3c2410_nand_enable_hwecc; +	nand->ecc.calculate = s3c2410_nand_calculate_ecc; +	nand->ecc.correct = s3c2410_nand_correct_data; +	nand->ecc.mode = NAND_ECC_HW; +	nand->ecc.size = CONFIG_SYS_NAND_ECCSIZE; +	nand->ecc.bytes = CONFIG_SYS_NAND_ECCBYTES; +	nand->ecc.strength = 1; +#else +	nand->ecc.mode = NAND_ECC_SOFT; +#endif + +#ifdef CONFIG_S3C2410_NAND_BBT +	nand->bbt_options |= NAND_BBT_USE_FLASH; +#endif + +	debug("end of nand_init\n"); + +	return 0; +} diff --git a/roms/u-boot/drivers/mtd/nand/tegra_nand.c b/roms/u-boot/drivers/mtd/nand/tegra_nand.c new file mode 100644 index 00000000..163cf29a --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/tegra_nand.c @@ -0,0 +1,1041 @@ +/* + * Copyright (c) 2011 The Chromium OS Authors. + * (C) Copyright 2011 NVIDIA Corporation <www.nvidia.com> + * (C) Copyright 2006 Detlev Zundel, dzu@denx.de + * (C) Copyright 2006 DENX Software Engineering + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <nand.h> +#include <asm/arch/clock.h> +#include <asm/arch/funcmux.h> +#include <asm/arch-tegra/clk_rst.h> +#include <asm/errno.h> +#include <asm/gpio.h> +#include <fdtdec.h> +#include "tegra_nand.h" + +DECLARE_GLOBAL_DATA_PTR; + +#define NAND_CMD_TIMEOUT_MS		10 + +#define SKIPPED_SPARE_BYTES		4 + +/* ECC bytes to be generated for tag data */ +#define TAG_ECC_BYTES			4 + +/* 64 byte oob block info for large page (== 2KB) device + * + * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC: + *      Skipped bytes(4) + *      Main area Ecc(36) + *      Tag data(20) + *      Tag data Ecc(4) + * + * Yaffs2 will use 16 tag bytes. + */ +static struct nand_ecclayout eccoob = { +	.eccbytes = 36, +	.eccpos = { +		4,  5,  6,  7,  8,  9,  10, 11, 12, +		13, 14, 15, 16, 17, 18, 19, 20, 21, +		22, 23, 24, 25, 26, 27, 28, 29, 30, +		31, 32, 33, 34, 35, 36, 37, 38, 39, +	}, +	.oobavail = 20, +	.oobfree = { +			{ +			.offset = 40, +			.length = 20, +			}, +	} +}; + +enum { +	ECC_OK, +	ECC_TAG_ERROR = 1 << 0, +	ECC_DATA_ERROR = 1 << 1 +}; + +/* Timing parameters */ +enum { +	FDT_NAND_MAX_TRP_TREA, +	FDT_NAND_TWB, +	FDT_NAND_MAX_TCR_TAR_TRR, +	FDT_NAND_TWHR, +	FDT_NAND_MAX_TCS_TCH_TALS_TALH, +	FDT_NAND_TWH, +	FDT_NAND_TWP, +	FDT_NAND_TRH, +	FDT_NAND_TADL, + +	FDT_NAND_TIMING_COUNT +}; + +/* Information about an attached NAND chip */ +struct fdt_nand { +	struct nand_ctlr *reg; +	int enabled;		/* 1 to enable, 0 to disable */ +	struct fdt_gpio_state wp_gpio;	/* write-protect GPIO */ +	s32 width;		/* bit width, normally 8 */ +	u32 timing[FDT_NAND_TIMING_COUNT]; +}; + +struct nand_drv { +	struct nand_ctlr *reg; + +	/* +	* When running in PIO mode to get READ ID bytes from register +	* RESP_0, we need this variable as an index to know which byte in +	* register RESP_0 should be read. +	* Because common code in nand_base.c invokes read_byte function two +	* times for NAND_CMD_READID. +	* And our controller returns 4 bytes at once in register RESP_0. +	*/ +	int pio_byte_index; +	struct fdt_nand config; +}; + +static struct nand_drv nand_ctrl; +static struct mtd_info *our_mtd; +static struct nand_chip nand_chip[CONFIG_SYS_MAX_NAND_DEVICE]; + +#ifdef CONFIG_SYS_DCACHE_OFF +static inline void dma_prepare(void *start, unsigned long length, +			       int is_writing) +{ +} +#else +/** + * Prepare for a DMA transaction + * + * For a write we flush out our data. For a read we invalidate, since we + * need to do this before we read from the buffer after the DMA has + * completed, so may as well do it now. + * + * @param start		Start address for DMA buffer (should be cache-aligned) + * @param length	Length of DMA buffer in bytes + * @param is_writing	0 if reading, non-zero if writing + */ +static void dma_prepare(void *start, unsigned long length, int is_writing) +{ +	unsigned long addr = (unsigned long)start; + +	length = ALIGN(length, ARCH_DMA_MINALIGN); +	if (is_writing) +		flush_dcache_range(addr, addr + length); +	else +		invalidate_dcache_range(addr, addr + length); +} +#endif + +/** + * Wait for command completion + * + * @param reg	nand_ctlr structure + * @return + *	1 - Command completed + *	0 - Timeout + */ +static int nand_waitfor_cmd_completion(struct nand_ctlr *reg) +{ +	u32 reg_val; +	int running; +	int i; + +	for (i = 0; i < NAND_CMD_TIMEOUT_MS * 1000; i++) { +		if ((readl(®->command) & CMD_GO) || +				!(readl(®->status) & STATUS_RBSY0) || +				!(readl(®->isr) & ISR_IS_CMD_DONE)) { +			udelay(1); +			continue; +		} +		reg_val = readl(®->dma_mst_ctrl); +		/* +		 * If DMA_MST_CTRL_EN_A_ENABLE or DMA_MST_CTRL_EN_B_ENABLE +		 * is set, that means DMA engine is running. +		 * +		 * Then we have to wait until DMA_MST_CTRL_IS_DMA_DONE +		 * is cleared, indicating DMA transfer completion. +		 */ +		running = reg_val & (DMA_MST_CTRL_EN_A_ENABLE | +				DMA_MST_CTRL_EN_B_ENABLE); +		if (!running || (reg_val & DMA_MST_CTRL_IS_DMA_DONE)) +			return 1; +		udelay(1); +	} +	return 0; +} + +/** + * Read one byte from the chip + * + * @param mtd	MTD device structure + * @return	data byte + * + * Read function for 8bit bus-width + */ +static uint8_t read_byte(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	u32 dword_read; +	struct nand_drv *info; + +	info = (struct nand_drv *)chip->priv; + +	/* In PIO mode, only 4 bytes can be transferred with single CMD_GO. */ +	if (info->pio_byte_index > 3) { +		info->pio_byte_index = 0; +		writel(CMD_GO | CMD_PIO +			| CMD_RX | CMD_CE0, +			&info->reg->command); +		if (!nand_waitfor_cmd_completion(info->reg)) +			printf("Command timeout\n"); +	} + +	dword_read = readl(&info->reg->resp); +	dword_read = dword_read >> (8 * info->pio_byte_index); +	info->pio_byte_index++; +	return (uint8_t)dword_read; +} + +/** + * Read len bytes from the chip into a buffer + * + * @param mtd	MTD device structure + * @param buf	buffer to store data to + * @param len	number of bytes to read + * + * Read function for 8bit bus-width + */ +static void read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ +	int i, s; +	unsigned int reg; +	struct nand_chip *chip = mtd->priv; +	struct nand_drv *info = (struct nand_drv *)chip->priv; + +	for (i = 0; i < len; i += 4) { +		s = (len - i) > 4 ? 4 : len - i; +		writel(CMD_PIO | CMD_RX | CMD_A_VALID | CMD_CE0 | +			((s - 1) << CMD_TRANS_SIZE_SHIFT) | CMD_GO, +			&info->reg->command); +		if (!nand_waitfor_cmd_completion(info->reg)) +			puts("Command timeout during read_buf\n"); +		reg = readl(&info->reg->resp); +		memcpy(buf + i, ®, s); +	} +} + +/** + * Check NAND status to see if it is ready or not + * + * @param mtd	MTD device structure + * @return + *	1 - ready + *	0 - not ready + */ +static int nand_dev_ready(struct mtd_info *mtd) +{ +	struct nand_chip *chip = mtd->priv; +	int reg_val; +	struct nand_drv *info; + +	info = (struct nand_drv *)chip->priv; + +	reg_val = readl(&info->reg->status); +	if (reg_val & STATUS_RBSY0) +		return 1; +	else +		return 0; +} + +/* Dummy implementation: we don't support multiple chips */ +static void nand_select_chip(struct mtd_info *mtd, int chipnr) +{ +	switch (chipnr) { +	case -1: +	case 0: +		break; + +	default: +		BUG(); +	} +} + +/** + * Clear all interrupt status bits + * + * @param reg	nand_ctlr structure + */ +static void nand_clear_interrupt_status(struct nand_ctlr *reg) +{ +	u32 reg_val; + +	/* Clear interrupt status */ +	reg_val = readl(®->isr); +	writel(reg_val, ®->isr); +} + +/** + * Send command to NAND device + * + * @param mtd		MTD device structure + * @param command	the command to be sent + * @param column	the column address for this command, -1 if none + * @param page_addr	the page address for this command, -1 if none + */ +static void nand_command(struct mtd_info *mtd, unsigned int command, +	int column, int page_addr) +{ +	struct nand_chip *chip = mtd->priv; +	struct nand_drv *info; + +	info = (struct nand_drv *)chip->priv; + +	/* +	 * Write out the command to the device. +	 * +	 * Only command NAND_CMD_RESET or NAND_CMD_READID will come +	 * here before mtd->writesize is initialized. +	 */ + +	/* Emulate NAND_CMD_READOOB */ +	if (command == NAND_CMD_READOOB) { +		assert(mtd->writesize != 0); +		column += mtd->writesize; +		command = NAND_CMD_READ0; +	} + +	/* Adjust columns for 16 bit bus-width */ +	if (column != -1 && (chip->options & NAND_BUSWIDTH_16)) +		column >>= 1; + +	nand_clear_interrupt_status(info->reg); + +	/* Stop DMA engine, clear DMA completion status */ +	writel(DMA_MST_CTRL_EN_A_DISABLE +		| DMA_MST_CTRL_EN_B_DISABLE +		| DMA_MST_CTRL_IS_DMA_DONE, +		&info->reg->dma_mst_ctrl); + +	/* +	 * Program and erase have their own busy handlers +	 * status and sequential in needs no delay +	 */ +	switch (command) { +	case NAND_CMD_READID: +		writel(NAND_CMD_READID, &info->reg->cmd_reg1); +		writel(column & 0xFF, &info->reg->addr_reg1); +		writel(CMD_GO | CMD_CLE | CMD_ALE | CMD_PIO +			| CMD_RX | +			((4 - 1) << CMD_TRANS_SIZE_SHIFT) +			| CMD_CE0, +			&info->reg->command); +		info->pio_byte_index = 0; +		break; +	case NAND_CMD_PARAM: +		writel(NAND_CMD_PARAM, &info->reg->cmd_reg1); +		writel(column & 0xFF, &info->reg->addr_reg1); +		writel(CMD_GO | CMD_CLE | CMD_ALE | CMD_CE0, +			&info->reg->command); +		break; +	case NAND_CMD_READ0: +		writel(NAND_CMD_READ0, &info->reg->cmd_reg1); +		writel(NAND_CMD_READSTART, &info->reg->cmd_reg2); +		writel((page_addr << 16) | (column & 0xFFFF), +			&info->reg->addr_reg1); +		writel(page_addr >> 16, &info->reg->addr_reg2); +		return; +	case NAND_CMD_SEQIN: +		writel(NAND_CMD_SEQIN, &info->reg->cmd_reg1); +		writel(NAND_CMD_PAGEPROG, &info->reg->cmd_reg2); +		writel((page_addr << 16) | (column & 0xFFFF), +			&info->reg->addr_reg1); +		writel(page_addr >> 16, +			&info->reg->addr_reg2); +		return; +	case NAND_CMD_PAGEPROG: +		return; +	case NAND_CMD_ERASE1: +		writel(NAND_CMD_ERASE1, &info->reg->cmd_reg1); +		writel(NAND_CMD_ERASE2, &info->reg->cmd_reg2); +		writel(page_addr, &info->reg->addr_reg1); +		writel(CMD_GO | CMD_CLE | CMD_ALE | +			CMD_SEC_CMD | CMD_CE0 | CMD_ALE_BYTES3, +			&info->reg->command); +		break; +	case NAND_CMD_ERASE2: +		return; +	case NAND_CMD_STATUS: +		writel(NAND_CMD_STATUS, &info->reg->cmd_reg1); +		writel(CMD_GO | CMD_CLE | CMD_PIO | CMD_RX +			| ((1 - 0) << CMD_TRANS_SIZE_SHIFT) +			| CMD_CE0, +			&info->reg->command); +		info->pio_byte_index = 0; +		break; +	case NAND_CMD_RESET: +		writel(NAND_CMD_RESET, &info->reg->cmd_reg1); +		writel(CMD_GO | CMD_CLE | CMD_CE0, +			&info->reg->command); +		break; +	case NAND_CMD_RNDOUT: +	default: +		printf("%s: Unsupported command %d\n", __func__, command); +		return; +	} +	if (!nand_waitfor_cmd_completion(info->reg)) +		printf("Command 0x%02X timeout\n", command); +} + +/** + * Check whether the pointed buffer are all 0xff (blank). + * + * @param buf	data buffer for blank check + * @param len	length of the buffer in byte + * @return + *	1 - blank + *	0 - non-blank + */ +static int blank_check(u8 *buf, int len) +{ +	int i; + +	for (i = 0; i < len; i++) +		if (buf[i] != 0xFF) +			return 0; +	return 1; +} + +/** + * After a DMA transfer for read, we call this function to see whether there + * is any uncorrectable error on the pointed data buffer or oob buffer. + * + * @param reg		nand_ctlr structure + * @param databuf	data buffer + * @param a_len		data buffer length + * @param oobbuf	oob buffer + * @param b_len		oob buffer length + * @return + *	ECC_OK - no ECC error or correctable ECC error + *	ECC_TAG_ERROR - uncorrectable tag ECC error + *	ECC_DATA_ERROR - uncorrectable data ECC error + *	ECC_DATA_ERROR + ECC_TAG_ERROR - uncorrectable data+tag ECC error + */ +static int check_ecc_error(struct nand_ctlr *reg, u8 *databuf, +	int a_len, u8 *oobbuf, int b_len) +{ +	int return_val = ECC_OK; +	u32 reg_val; + +	if (!(readl(®->isr) & ISR_IS_ECC_ERR)) +		return ECC_OK; + +	/* +	 * Area A is used for the data block (databuf). Area B is used for +	 * the spare block (oobbuf) +	 */ +	reg_val = readl(®->dec_status); +	if ((reg_val & DEC_STATUS_A_ECC_FAIL) && databuf) { +		reg_val = readl(®->bch_dec_status_buf); +		/* +		 * If uncorrectable error occurs on data area, then see whether +		 * they are all FF. If all are FF, it's a blank page. +		 * Not error. +		 */ +		if ((reg_val & BCH_DEC_STATUS_FAIL_SEC_FLAG_MASK) && +				!blank_check(databuf, a_len)) +			return_val |= ECC_DATA_ERROR; +	} + +	if ((reg_val & DEC_STATUS_B_ECC_FAIL) && oobbuf) { +		reg_val = readl(®->bch_dec_status_buf); +		/* +		 * If uncorrectable error occurs on tag area, then see whether +		 * they are all FF. If all are FF, it's a blank page. +		 * Not error. +		 */ +		if ((reg_val & BCH_DEC_STATUS_FAIL_TAG_MASK) && +				!blank_check(oobbuf, b_len)) +			return_val |= ECC_TAG_ERROR; +	} + +	return return_val; +} + +/** + * Set GO bit to send command to device + * + * @param reg	nand_ctlr structure + */ +static void start_command(struct nand_ctlr *reg) +{ +	u32 reg_val; + +	reg_val = readl(®->command); +	reg_val |= CMD_GO; +	writel(reg_val, ®->command); +} + +/** + * Clear command GO bit, DMA GO bit, and DMA completion status + * + * @param reg	nand_ctlr structure + */ +static void stop_command(struct nand_ctlr *reg) +{ +	/* Stop command */ +	writel(0, ®->command); + +	/* Stop DMA engine and clear DMA completion status */ +	writel(DMA_MST_CTRL_GO_DISABLE +		| DMA_MST_CTRL_IS_DMA_DONE, +		®->dma_mst_ctrl); +} + +/** + * Set up NAND bus width and page size + * + * @param info		nand_info structure + * @param *reg_val	address of reg_val + * @return 0 if ok, -1 on error + */ +static int set_bus_width_page_size(struct fdt_nand *config, +	u32 *reg_val) +{ +	if (config->width == 8) +		*reg_val = CFG_BUS_WIDTH_8BIT; +	else if (config->width == 16) +		*reg_val = CFG_BUS_WIDTH_16BIT; +	else { +		debug("%s: Unsupported bus width %d\n", __func__, +		      config->width); +		return -1; +	} + +	if (our_mtd->writesize == 512) +		*reg_val |= CFG_PAGE_SIZE_512; +	else if (our_mtd->writesize == 2048) +		*reg_val |= CFG_PAGE_SIZE_2048; +	else if (our_mtd->writesize == 4096) +		*reg_val |= CFG_PAGE_SIZE_4096; +	else { +		debug("%s: Unsupported page size %d\n", __func__, +		      our_mtd->writesize); +		return -1; +	} + +	return 0; +} + +/** + * Page read/write function + * + * @param mtd		mtd info structure + * @param chip		nand chip info structure + * @param buf		data buffer + * @param page		page number + * @param with_ecc	1 to enable ECC, 0 to disable ECC + * @param is_writing	0 for read, 1 for write + * @return	0 when successfully completed + *		-EIO when command timeout + */ +static int nand_rw_page(struct mtd_info *mtd, struct nand_chip *chip, +	uint8_t *buf, int page, int with_ecc, int is_writing) +{ +	u32 reg_val; +	int tag_size; +	struct nand_oobfree *free = chip->ecc.layout->oobfree; +	/* 4*128=512 (byte) is the value that our HW can support. */ +	ALLOC_CACHE_ALIGN_BUFFER(u32, tag_buf, 128); +	char *tag_ptr; +	struct nand_drv *info; +	struct fdt_nand *config; + +	if ((uintptr_t)buf & 0x03) { +		printf("buf %p has to be 4-byte aligned\n", buf); +		return -EINVAL; +	} + +	info = (struct nand_drv *)chip->priv; +	config = &info->config; +	if (set_bus_width_page_size(config, ®_val)) +		return -EINVAL; + +	/* Need to be 4-byte aligned */ +	tag_ptr = (char *)tag_buf; + +	stop_command(info->reg); + +	writel((1 << chip->page_shift) - 1, &info->reg->dma_cfg_a); +	writel(virt_to_phys(buf), &info->reg->data_block_ptr); + +	if (with_ecc) { +		writel(virt_to_phys(tag_ptr), &info->reg->tag_ptr); +		if (is_writing) +			memcpy(tag_ptr, chip->oob_poi + free->offset, +				chip->ecc.layout->oobavail + +				TAG_ECC_BYTES); +	} else { +		writel(virt_to_phys(chip->oob_poi), &info->reg->tag_ptr); +	} + +	/* Set ECC selection, configure ECC settings */ +	if (with_ecc) { +		tag_size = chip->ecc.layout->oobavail + TAG_ECC_BYTES; +		reg_val |= (CFG_SKIP_SPARE_SEL_4 +			| CFG_SKIP_SPARE_ENABLE +			| CFG_HW_ECC_CORRECTION_ENABLE +			| CFG_ECC_EN_TAG_DISABLE +			| CFG_HW_ECC_SEL_RS +			| CFG_HW_ECC_ENABLE +			| CFG_TVAL4 +			| (tag_size - 1)); + +		if (!is_writing) +			tag_size += SKIPPED_SPARE_BYTES; +		dma_prepare(tag_ptr, tag_size, is_writing); +	} else { +		tag_size = mtd->oobsize; +		reg_val |= (CFG_SKIP_SPARE_DISABLE +			| CFG_HW_ECC_CORRECTION_DISABLE +			| CFG_ECC_EN_TAG_DISABLE +			| CFG_HW_ECC_DISABLE +			| (tag_size - 1)); +		dma_prepare(chip->oob_poi, tag_size, is_writing); +	} +	writel(reg_val, &info->reg->config); + +	dma_prepare(buf, 1 << chip->page_shift, is_writing); + +	writel(BCH_CONFIG_BCH_ECC_DISABLE, &info->reg->bch_config); + +	writel(tag_size - 1, &info->reg->dma_cfg_b); + +	nand_clear_interrupt_status(info->reg); + +	reg_val = CMD_CLE | CMD_ALE +		| CMD_SEC_CMD +		| (CMD_ALE_BYTES5 << CMD_ALE_BYTE_SIZE_SHIFT) +		| CMD_A_VALID +		| CMD_B_VALID +		| (CMD_TRANS_SIZE_PAGE << CMD_TRANS_SIZE_SHIFT) +		| CMD_CE0; +	if (!is_writing) +		reg_val |= (CMD_AFT_DAT_DISABLE | CMD_RX); +	else +		reg_val |= (CMD_AFT_DAT_ENABLE | CMD_TX); +	writel(reg_val, &info->reg->command); + +	/* Setup DMA engine */ +	reg_val = DMA_MST_CTRL_GO_ENABLE +		| DMA_MST_CTRL_BURST_8WORDS +		| DMA_MST_CTRL_EN_A_ENABLE +		| DMA_MST_CTRL_EN_B_ENABLE; + +	if (!is_writing) +		reg_val |= DMA_MST_CTRL_DIR_READ; +	else +		reg_val |= DMA_MST_CTRL_DIR_WRITE; + +	writel(reg_val, &info->reg->dma_mst_ctrl); + +	start_command(info->reg); + +	if (!nand_waitfor_cmd_completion(info->reg)) { +		if (!is_writing) +			printf("Read Page 0x%X timeout ", page); +		else +			printf("Write Page 0x%X timeout ", page); +		if (with_ecc) +			printf("with ECC"); +		else +			printf("without ECC"); +		printf("\n"); +		return -EIO; +	} + +	if (with_ecc && !is_writing) { +		memcpy(chip->oob_poi, tag_ptr, +			SKIPPED_SPARE_BYTES); +		memcpy(chip->oob_poi + free->offset, +			tag_ptr + SKIPPED_SPARE_BYTES, +			chip->ecc.layout->oobavail); +		reg_val = (u32)check_ecc_error(info->reg, (u8 *)buf, +			1 << chip->page_shift, +			(u8 *)(tag_ptr + SKIPPED_SPARE_BYTES), +			chip->ecc.layout->oobavail); +		if (reg_val & ECC_TAG_ERROR) +			printf("Read Page 0x%X tag ECC error\n", page); +		if (reg_val & ECC_DATA_ERROR) +			printf("Read Page 0x%X data ECC error\n", +				page); +		if (reg_val & (ECC_DATA_ERROR | ECC_TAG_ERROR)) +			return -EIO; +	} +	return 0; +} + +/** + * Hardware ecc based page read function + * + * @param mtd	mtd info structure + * @param chip	nand chip info structure + * @param buf	buffer to store read data + * @param page	page number to read + * @return	0 when successfully completed + *		-EIO when command timeout + */ +static int nand_read_page_hwecc(struct mtd_info *mtd, +	struct nand_chip *chip, uint8_t *buf, int oob_required, int page) +{ +	return nand_rw_page(mtd, chip, buf, page, 1, 0); +} + +/** + * Hardware ecc based page write function + * + * @param mtd	mtd info structure + * @param chip	nand chip info structure + * @param buf	data buffer + */ +static int nand_write_page_hwecc(struct mtd_info *mtd, +	struct nand_chip *chip, const uint8_t *buf, int oob_required) +{ +	int page; +	struct nand_drv *info; + +	info = (struct nand_drv *)chip->priv; + +	page = (readl(&info->reg->addr_reg1) >> 16) | +		(readl(&info->reg->addr_reg2) << 16); + +	nand_rw_page(mtd, chip, (uint8_t *)buf, page, 1, 1); +	return 0; +} + + +/** + * Read raw page data without ecc + * + * @param mtd	mtd info structure + * @param chip	nand chip info structure + * @param buf	buffer to store read data + * @param page	page number to read + * @return	0 when successfully completed + *		-EINVAL when chip->oob_poi is not double-word aligned + *		-EIO when command timeout + */ +static int nand_read_page_raw(struct mtd_info *mtd, +	struct nand_chip *chip, uint8_t *buf, int oob_required, int page) +{ +	return nand_rw_page(mtd, chip, buf, page, 0, 0); +} + +/** + * Raw page write function + * + * @param mtd	mtd info structure + * @param chip	nand chip info structure + * @param buf	data buffer + */ +static int nand_write_page_raw(struct mtd_info *mtd, +		struct nand_chip *chip,	const uint8_t *buf, int oob_required) +{ +	int page; +	struct nand_drv *info; + +	info = (struct nand_drv *)chip->priv; +	page = (readl(&info->reg->addr_reg1) >> 16) | +		(readl(&info->reg->addr_reg2) << 16); + +	nand_rw_page(mtd, chip, (uint8_t *)buf, page, 0, 1); +	return 0; +} + +/** + * OOB data read/write function + * + * @param mtd		mtd info structure + * @param chip		nand chip info structure + * @param page		page number to read + * @param with_ecc	1 to enable ECC, 0 to disable ECC + * @param is_writing	0 for read, 1 for write + * @return	0 when successfully completed + *		-EINVAL when chip->oob_poi is not double-word aligned + *		-EIO when command timeout + */ +static int nand_rw_oob(struct mtd_info *mtd, struct nand_chip *chip, +	int page, int with_ecc, int is_writing) +{ +	u32 reg_val; +	int tag_size; +	struct nand_oobfree *free = chip->ecc.layout->oobfree; +	struct nand_drv *info; + +	if (((int)chip->oob_poi) & 0x03) +		return -EINVAL; +	info = (struct nand_drv *)chip->priv; +	if (set_bus_width_page_size(&info->config, ®_val)) +		return -EINVAL; + +	stop_command(info->reg); + +	writel(virt_to_phys(chip->oob_poi), &info->reg->tag_ptr); + +	/* Set ECC selection */ +	tag_size = mtd->oobsize; +	if (with_ecc) +		reg_val |= CFG_ECC_EN_TAG_ENABLE; +	else +		reg_val |= (CFG_ECC_EN_TAG_DISABLE); + +	reg_val |= ((tag_size - 1) | +		CFG_SKIP_SPARE_DISABLE | +		CFG_HW_ECC_CORRECTION_DISABLE | +		CFG_HW_ECC_DISABLE); +	writel(reg_val, &info->reg->config); + +	dma_prepare(chip->oob_poi, tag_size, is_writing); + +	writel(BCH_CONFIG_BCH_ECC_DISABLE, &info->reg->bch_config); + +	if (is_writing && with_ecc) +		tag_size -= TAG_ECC_BYTES; + +	writel(tag_size - 1, &info->reg->dma_cfg_b); + +	nand_clear_interrupt_status(info->reg); + +	reg_val = CMD_CLE | CMD_ALE +		| CMD_SEC_CMD +		| (CMD_ALE_BYTES5 << CMD_ALE_BYTE_SIZE_SHIFT) +		| CMD_B_VALID +		| CMD_CE0; +	if (!is_writing) +		reg_val |= (CMD_AFT_DAT_DISABLE | CMD_RX); +	else +		reg_val |= (CMD_AFT_DAT_ENABLE | CMD_TX); +	writel(reg_val, &info->reg->command); + +	/* Setup DMA engine */ +	reg_val = DMA_MST_CTRL_GO_ENABLE +		| DMA_MST_CTRL_BURST_8WORDS +		| DMA_MST_CTRL_EN_B_ENABLE; +	if (!is_writing) +		reg_val |= DMA_MST_CTRL_DIR_READ; +	else +		reg_val |= DMA_MST_CTRL_DIR_WRITE; + +	writel(reg_val, &info->reg->dma_mst_ctrl); + +	start_command(info->reg); + +	if (!nand_waitfor_cmd_completion(info->reg)) { +		if (!is_writing) +			printf("Read OOB of Page 0x%X timeout\n", page); +		else +			printf("Write OOB of Page 0x%X timeout\n", page); +		return -EIO; +	} + +	if (with_ecc && !is_writing) { +		reg_val = (u32)check_ecc_error(info->reg, 0, 0, +			(u8 *)(chip->oob_poi + free->offset), +			chip->ecc.layout->oobavail); +		if (reg_val & ECC_TAG_ERROR) +			printf("Read OOB of Page 0x%X tag ECC error\n", page); +	} +	return 0; +} + +/** + * OOB data read function + * + * @param mtd		mtd info structure + * @param chip		nand chip info structure + * @param page		page number to read + */ +static int nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, +	int page) +{ +	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); +	nand_rw_oob(mtd, chip, page, 0, 0); +	return 0; +} + +/** + * OOB data write function + * + * @param mtd	mtd info structure + * @param chip	nand chip info structure + * @param page	page number to write + * @return	0 when successfully completed + *		-EINVAL when chip->oob_poi is not double-word aligned + *		-EIO when command timeout + */ +static int nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, +	int page) +{ +	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); + +	return nand_rw_oob(mtd, chip, page, 0, 1); +} + +/** + * Set up NAND memory timings according to the provided parameters + * + * @param timing	Timing parameters + * @param reg		NAND controller register address + */ +static void setup_timing(unsigned timing[FDT_NAND_TIMING_COUNT], +			 struct nand_ctlr *reg) +{ +	u32 reg_val, clk_rate, clk_period, time_val; + +	clk_rate = (u32)clock_get_periph_rate(PERIPH_ID_NDFLASH, +		CLOCK_ID_PERIPH) / 1000000; +	clk_period = 1000 / clk_rate; +	reg_val = ((timing[FDT_NAND_MAX_TRP_TREA] / clk_period) << +		TIMING_TRP_RESP_CNT_SHIFT) & TIMING_TRP_RESP_CNT_MASK; +	reg_val |= ((timing[FDT_NAND_TWB] / clk_period) << +		TIMING_TWB_CNT_SHIFT) & TIMING_TWB_CNT_MASK; +	time_val = timing[FDT_NAND_MAX_TCR_TAR_TRR] / clk_period; +	if (time_val > 2) +		reg_val |= ((time_val - 2) << TIMING_TCR_TAR_TRR_CNT_SHIFT) & +			TIMING_TCR_TAR_TRR_CNT_MASK; +	reg_val |= ((timing[FDT_NAND_TWHR] / clk_period) << +		TIMING_TWHR_CNT_SHIFT) & TIMING_TWHR_CNT_MASK; +	time_val = timing[FDT_NAND_MAX_TCS_TCH_TALS_TALH] / clk_period; +	if (time_val > 1) +		reg_val |= ((time_val - 1) << TIMING_TCS_CNT_SHIFT) & +			TIMING_TCS_CNT_MASK; +	reg_val |= ((timing[FDT_NAND_TWH] / clk_period) << +		TIMING_TWH_CNT_SHIFT) & TIMING_TWH_CNT_MASK; +	reg_val |= ((timing[FDT_NAND_TWP] / clk_period) << +		TIMING_TWP_CNT_SHIFT) & TIMING_TWP_CNT_MASK; +	reg_val |= ((timing[FDT_NAND_TRH] / clk_period) << +		TIMING_TRH_CNT_SHIFT) & TIMING_TRH_CNT_MASK; +	reg_val |= ((timing[FDT_NAND_MAX_TRP_TREA] / clk_period) << +		TIMING_TRP_CNT_SHIFT) & TIMING_TRP_CNT_MASK; +	writel(reg_val, ®->timing); + +	reg_val = 0; +	time_val = timing[FDT_NAND_TADL] / clk_period; +	if (time_val > 2) +		reg_val = (time_val - 2) & TIMING2_TADL_CNT_MASK; +	writel(reg_val, ®->timing2); +} + +/** + * Decode NAND parameters from the device tree + * + * @param blob	Device tree blob + * @param node	Node containing "nand-flash" compatble node + * @return 0 if ok, -ve on error (FDT_ERR_...) + */ +static int fdt_decode_nand(const void *blob, int node, struct fdt_nand *config) +{ +	int err; + +	config->reg = (struct nand_ctlr *)fdtdec_get_addr(blob, node, "reg"); +	config->enabled = fdtdec_get_is_enabled(blob, node); +	config->width = fdtdec_get_int(blob, node, "nvidia,nand-width", 8); +	err = fdtdec_decode_gpio(blob, node, "nvidia,wp-gpios", +				 &config->wp_gpio); +	if (err) +		return err; +	err = fdtdec_get_int_array(blob, node, "nvidia,timing", +			config->timing, FDT_NAND_TIMING_COUNT); +	if (err < 0) +		return err; + +	/* Now look up the controller and decode that */ +	node = fdt_next_node(blob, node, NULL); +	if (node < 0) +		return node; + +	return 0; +} + +/** + * Board-specific NAND initialization + * + * @param nand	nand chip info structure + * @return 0, after initialized, -1 on error + */ +int tegra_nand_init(struct nand_chip *nand, int devnum) +{ +	struct nand_drv *info = &nand_ctrl; +	struct fdt_nand *config = &info->config; +	int node, ret; + +	node = fdtdec_next_compatible(gd->fdt_blob, 0, +				      COMPAT_NVIDIA_TEGRA20_NAND); +	if (node < 0) +		return -1; +	if (fdt_decode_nand(gd->fdt_blob, node, config)) { +		printf("Could not decode nand-flash in device tree\n"); +		return -1; +	} +	if (!config->enabled) +		return -1; +	info->reg = config->reg; +	nand->ecc.mode = NAND_ECC_HW; +	nand->ecc.layout = &eccoob; + +	nand->options = LP_OPTIONS; +	nand->cmdfunc = nand_command; +	nand->read_byte = read_byte; +	nand->read_buf = read_buf; +	nand->ecc.read_page = nand_read_page_hwecc; +	nand->ecc.write_page = nand_write_page_hwecc; +	nand->ecc.read_page_raw = nand_read_page_raw; +	nand->ecc.write_page_raw = nand_write_page_raw; +	nand->ecc.read_oob = nand_read_oob; +	nand->ecc.write_oob = nand_write_oob; +	nand->ecc.strength = 1; +	nand->select_chip = nand_select_chip; +	nand->dev_ready  = nand_dev_ready; +	nand->priv = &nand_ctrl; + +	/* Adjust controller clock rate */ +	clock_start_periph_pll(PERIPH_ID_NDFLASH, CLOCK_ID_PERIPH, 52000000); + +	/* Adjust timing for NAND device */ +	setup_timing(config->timing, info->reg); + +	fdtdec_setup_gpio(&config->wp_gpio); +	gpio_direction_output(config->wp_gpio.gpio, 1); + +	our_mtd = &nand_info[devnum]; +	our_mtd->priv = nand; +	ret = nand_scan_ident(our_mtd, CONFIG_SYS_NAND_MAX_CHIPS, NULL); +	if (ret) +		return ret; + +	nand->ecc.size = our_mtd->writesize; +	nand->ecc.bytes = our_mtd->oobsize; + +	ret = nand_scan_tail(our_mtd); +	if (ret) +		return ret; + +	ret = nand_register(devnum); +	if (ret) +		return ret; + +	return 0; +} + +void board_nand_init(void) +{ +	struct nand_chip *nand = &nand_chip[0]; + +	if (tegra_nand_init(nand, 0)) +		puts("Tegra NAND init failed\n"); +} diff --git a/roms/u-boot/drivers/mtd/nand/tegra_nand.h b/roms/u-boot/drivers/mtd/nand/tegra_nand.h new file mode 100644 index 00000000..ded9d710 --- /dev/null +++ b/roms/u-boot/drivers/mtd/nand/tegra_nand.h @@ -0,0 +1,241 @@ +/* + * (C) Copyright 2011 NVIDIA Corporation <www.nvidia.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +/* register offset */ +#define COMMAND_0		0x00 +#define CMD_GO			(1 << 31) +#define CMD_CLE			(1 << 30) +#define CMD_ALE			(1 << 29) +#define CMD_PIO			(1 << 28) +#define CMD_TX			(1 << 27) +#define CMD_RX			(1 << 26) +#define CMD_SEC_CMD		(1 << 25) +#define CMD_AFT_DAT_MASK	(1 << 24) +#define CMD_AFT_DAT_DISABLE	0 +#define CMD_AFT_DAT_ENABLE	(1 << 24) +#define CMD_TRANS_SIZE_SHIFT	20 +#define CMD_TRANS_SIZE_PAGE	8 +#define CMD_A_VALID		(1 << 19) +#define CMD_B_VALID		(1 << 18) +#define CMD_RD_STATUS_CHK	(1 << 17) +#define CMD_R_BSY_CHK		(1 << 16) +#define CMD_CE7			(1 << 15) +#define CMD_CE6			(1 << 14) +#define CMD_CE5			(1 << 13) +#define CMD_CE4			(1 << 12) +#define CMD_CE3			(1 << 11) +#define CMD_CE2			(1 << 10) +#define CMD_CE1			(1 << 9) +#define CMD_CE0			(1 << 8) +#define CMD_CLE_BYTE_SIZE_SHIFT	4 +enum { +	CMD_CLE_BYTES1 = 0, +	CMD_CLE_BYTES2, +	CMD_CLE_BYTES3, +	CMD_CLE_BYTES4, +}; +#define CMD_ALE_BYTE_SIZE_SHIFT	0 +enum { +	CMD_ALE_BYTES1 = 0, +	CMD_ALE_BYTES2, +	CMD_ALE_BYTES3, +	CMD_ALE_BYTES4, +	CMD_ALE_BYTES5, +	CMD_ALE_BYTES6, +	CMD_ALE_BYTES7, +	CMD_ALE_BYTES8 +}; + +#define STATUS_0			0x04 +#define STATUS_RBSY0			(1 << 8) + +#define ISR_0				0x08 +#define ISR_IS_CMD_DONE			(1 << 5) +#define ISR_IS_ECC_ERR			(1 << 4) + +#define IER_0				0x0C + +#define CFG_0				0x10 +#define CFG_HW_ECC_MASK			(1 << 31) +#define CFG_HW_ECC_DISABLE		0 +#define CFG_HW_ECC_ENABLE		(1 << 31) +#define CFG_HW_ECC_SEL_MASK		(1 << 30) +#define CFG_HW_ECC_SEL_HAMMING		0 +#define CFG_HW_ECC_SEL_RS		(1 << 30) +#define CFG_HW_ECC_CORRECTION_MASK	(1 << 29) +#define CFG_HW_ECC_CORRECTION_DISABLE	0 +#define CFG_HW_ECC_CORRECTION_ENABLE	(1 << 29) +#define CFG_PIPELINE_EN_MASK		(1 << 28) +#define CFG_PIPELINE_EN_DISABLE		0 +#define CFG_PIPELINE_EN_ENABLE		(1 << 28) +#define CFG_ECC_EN_TAG_MASK		(1 << 27) +#define CFG_ECC_EN_TAG_DISABLE		0 +#define CFG_ECC_EN_TAG_ENABLE		(1 << 27) +#define CFG_TVALUE_MASK			(3 << 24) +enum { +	CFG_TVAL4 = 0 << 24, +	CFG_TVAL6 = 1 << 24, +	CFG_TVAL8 = 2 << 24 +}; +#define CFG_SKIP_SPARE_MASK		(1 << 23) +#define CFG_SKIP_SPARE_DISABLE		0 +#define CFG_SKIP_SPARE_ENABLE		(1 << 23) +#define CFG_COM_BSY_MASK		(1 << 22) +#define CFG_COM_BSY_DISABLE		0 +#define CFG_COM_BSY_ENABLE		(1 << 22) +#define CFG_BUS_WIDTH_MASK		(1 << 21) +#define CFG_BUS_WIDTH_8BIT		0 +#define CFG_BUS_WIDTH_16BIT		(1 << 21) +#define CFG_LPDDR1_MODE_MASK		(1 << 20) +#define CFG_LPDDR1_MODE_DISABLE		0 +#define CFG_LPDDR1_MODE_ENABLE		(1 << 20) +#define CFG_EDO_MODE_MASK		(1 << 19) +#define CFG_EDO_MODE_DISABLE		0 +#define CFG_EDO_MODE_ENABLE		(1 << 19) +#define CFG_PAGE_SIZE_SEL_MASK		(7 << 16) +enum { +	CFG_PAGE_SIZE_256	= 0 << 16, +	CFG_PAGE_SIZE_512	= 1 << 16, +	CFG_PAGE_SIZE_1024	= 2 << 16, +	CFG_PAGE_SIZE_2048	= 3 << 16, +	CFG_PAGE_SIZE_4096	= 4 << 16 +}; +#define CFG_SKIP_SPARE_SEL_MASK		(3 << 14) +enum { +	CFG_SKIP_SPARE_SEL_4	= 0 << 14, +	CFG_SKIP_SPARE_SEL_8	= 1 << 14, +	CFG_SKIP_SPARE_SEL_12	= 2 << 14, +	CFG_SKIP_SPARE_SEL_16	= 3 << 14 +}; +#define CFG_TAG_BYTE_SIZE_MASK	0x1FF + +#define TIMING_0			0x14 +#define TIMING_TRP_RESP_CNT_SHIFT	28 +#define TIMING_TRP_RESP_CNT_MASK	(0xf << TIMING_TRP_RESP_CNT_SHIFT) +#define TIMING_TWB_CNT_SHIFT		24 +#define TIMING_TWB_CNT_MASK		(0xf << TIMING_TWB_CNT_SHIFT) +#define TIMING_TCR_TAR_TRR_CNT_SHIFT	20 +#define TIMING_TCR_TAR_TRR_CNT_MASK	(0xf << TIMING_TCR_TAR_TRR_CNT_SHIFT) +#define TIMING_TWHR_CNT_SHIFT		16 +#define TIMING_TWHR_CNT_MASK		(0xf << TIMING_TWHR_CNT_SHIFT) +#define TIMING_TCS_CNT_SHIFT		14 +#define TIMING_TCS_CNT_MASK		(3 << TIMING_TCS_CNT_SHIFT) +#define TIMING_TWH_CNT_SHIFT		12 +#define TIMING_TWH_CNT_MASK		(3 << TIMING_TWH_CNT_SHIFT) +#define TIMING_TWP_CNT_SHIFT		8 +#define TIMING_TWP_CNT_MASK		(0xf << TIMING_TWP_CNT_SHIFT) +#define TIMING_TRH_CNT_SHIFT		4 +#define TIMING_TRH_CNT_MASK		(3 << TIMING_TRH_CNT_SHIFT) +#define TIMING_TRP_CNT_SHIFT		0 +#define TIMING_TRP_CNT_MASK		(0xf << TIMING_TRP_CNT_SHIFT) + +#define RESP_0				0x18 + +#define TIMING2_0			0x1C +#define TIMING2_TADL_CNT_SHIFT		0 +#define TIMING2_TADL_CNT_MASK		(0xf << TIMING2_TADL_CNT_SHIFT) + +#define CMD_REG1_0			0x20 +#define CMD_REG2_0			0x24 +#define ADDR_REG1_0			0x28 +#define ADDR_REG2_0			0x2C + +#define DMA_MST_CTRL_0			0x30 +#define DMA_MST_CTRL_GO_MASK		(1 << 31) +#define DMA_MST_CTRL_GO_DISABLE		0 +#define DMA_MST_CTRL_GO_ENABLE		(1 << 31) +#define DMA_MST_CTRL_DIR_MASK		(1 << 30) +#define DMA_MST_CTRL_DIR_READ		0 +#define DMA_MST_CTRL_DIR_WRITE		(1 << 30) +#define DMA_MST_CTRL_PERF_EN_MASK	(1 << 29) +#define DMA_MST_CTRL_PERF_EN_DISABLE	0 +#define DMA_MST_CTRL_PERF_EN_ENABLE	(1 << 29) +#define DMA_MST_CTRL_REUSE_BUFFER_MASK	(1 << 27) +#define DMA_MST_CTRL_REUSE_BUFFER_DISABLE	0 +#define DMA_MST_CTRL_REUSE_BUFFER_ENABLE	(1 << 27) +#define DMA_MST_CTRL_BURST_SIZE_SHIFT	24 +#define DMA_MST_CTRL_BURST_SIZE_MASK	(7 << DMA_MST_CTRL_BURST_SIZE_SHIFT) +enum { +	DMA_MST_CTRL_BURST_1WORDS	= 2 << DMA_MST_CTRL_BURST_SIZE_SHIFT, +	DMA_MST_CTRL_BURST_4WORDS	= 3 << DMA_MST_CTRL_BURST_SIZE_SHIFT, +	DMA_MST_CTRL_BURST_8WORDS	= 4 << DMA_MST_CTRL_BURST_SIZE_SHIFT, +	DMA_MST_CTRL_BURST_16WORDS	= 5 << DMA_MST_CTRL_BURST_SIZE_SHIFT +}; +#define DMA_MST_CTRL_IS_DMA_DONE	(1 << 20) +#define DMA_MST_CTRL_EN_A_MASK		(1 << 2) +#define DMA_MST_CTRL_EN_A_DISABLE	0 +#define DMA_MST_CTRL_EN_A_ENABLE	(1 << 2) +#define DMA_MST_CTRL_EN_B_MASK		(1 << 1) +#define DMA_MST_CTRL_EN_B_DISABLE	0 +#define DMA_MST_CTRL_EN_B_ENABLE	(1 << 1) + +#define DMA_CFG_A_0			0x34 +#define DMA_CFG_B_0			0x38 +#define FIFO_CTRL_0			0x3C +#define DATA_BLOCK_PTR_0		0x40 +#define TAG_PTR_0			0x44 +#define ECC_PTR_0			0x48 + +#define DEC_STATUS_0			0x4C +#define DEC_STATUS_A_ECC_FAIL		(1 << 1) +#define DEC_STATUS_B_ECC_FAIL		(1 << 0) + +#define BCH_CONFIG_0			0xCC +#define BCH_CONFIG_BCH_TVALUE_SHIFT	4 +#define BCH_CONFIG_BCH_TVALUE_MASK	(3 << BCH_CONFIG_BCH_TVALUE_SHIFT) +enum { +	BCH_CONFIG_BCH_TVAL4	= 0 << BCH_CONFIG_BCH_TVALUE_SHIFT, +	BCH_CONFIG_BCH_TVAL8	= 1 << BCH_CONFIG_BCH_TVALUE_SHIFT, +	BCH_CONFIG_BCH_TVAL14	= 2 << BCH_CONFIG_BCH_TVALUE_SHIFT, +	BCH_CONFIG_BCH_TVAL16	= 3 << BCH_CONFIG_BCH_TVALUE_SHIFT +}; +#define BCH_CONFIG_BCH_ECC_MASK		(1 << 0) +#define BCH_CONFIG_BCH_ECC_DISABLE	0 +#define BCH_CONFIG_BCH_ECC_ENABLE	(1 << 0) + +#define BCH_DEC_RESULT_0			0xD0 +#define BCH_DEC_RESULT_CORRFAIL_ERR_MASK	(1 << 8) +#define BCH_DEC_RESULT_PAGE_COUNT_MASK		0xFF + +#define BCH_DEC_STATUS_BUF_0			0xD4 +#define BCH_DEC_STATUS_FAIL_SEC_FLAG_MASK	0xFF000000 +#define BCH_DEC_STATUS_CORR_SEC_FLAG_MASK	0x00FF0000 +#define BCH_DEC_STATUS_FAIL_TAG_MASK		(1 << 14) +#define BCH_DEC_STATUS_CORR_TAG_MASK		(1 << 13) +#define BCH_DEC_STATUS_MAX_CORR_CNT_MASK	(0x1f << 8) +#define BCH_DEC_STATUS_PAGE_NUMBER_MASK		0xFF + +#define LP_OPTIONS	0 + +struct nand_ctlr { +	u32	command;	/* offset 00h */ +	u32	status;		/* offset 04h */ +	u32	isr;		/* offset 08h */ +	u32	ier;		/* offset 0Ch */ +	u32	config;		/* offset 10h */ +	u32	timing;		/* offset 14h */ +	u32	resp;		/* offset 18h */ +	u32	timing2;	/* offset 1Ch */ +	u32	cmd_reg1;	/* offset 20h */ +	u32	cmd_reg2;	/* offset 24h */ +	u32	addr_reg1;	/* offset 28h */ +	u32	addr_reg2;	/* offset 2Ch */ +	u32	dma_mst_ctrl;	/* offset 30h */ +	u32	dma_cfg_a;	/* offset 34h */ +	u32	dma_cfg_b;	/* offset 38h */ +	u32	fifo_ctrl;	/* offset 3Ch */ +	u32	data_block_ptr;	/* offset 40h */ +	u32	tag_ptr;	/* offset 44h */ +	u32	resv1;		/* offset 48h */ +	u32	dec_status;	/* offset 4Ch */ +	u32	hwstatus_cmd;	/* offset 50h */ +	u32	hwstatus_mask;	/* offset 54h */ +	u32	resv2[29]; +	u32	bch_config;	/* offset CCh */ +	u32	bch_dec_result;	/* offset D0h */ +	u32	bch_dec_status_buf; +				/* offset D4h */ +}; diff --git a/roms/u-boot/drivers/mtd/onenand/Makefile b/roms/u-boot/drivers/mtd/onenand/Makefile new file mode 100644 index 00000000..b2493488 --- /dev/null +++ b/roms/u-boot/drivers/mtd/onenand/Makefile @@ -0,0 +1,13 @@ +# +# Copyright (C) 2005-2007 Samsung Electronics. +# Kyungmin Park <kyungmin.park@samsung.com> +# +# SPDX-License-Identifier:	GPL-2.0+ +# + +ifndef	CONFIG_SPL_BUILD +obj-$(CONFIG_CMD_ONENAND)	:= onenand_uboot.o onenand_base.o onenand_bbt.o +obj-$(CONFIG_SAMSUNG_ONENAND)	+= samsung.o +else +obj-y				:= onenand_spl.o +endif diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_base.c b/roms/u-boot/drivers/mtd/onenand/onenand_base.c new file mode 100644 index 00000000..e33e8d38 --- /dev/null +++ b/roms/u-boot/drivers/mtd/onenand/onenand_base.c @@ -0,0 +1,2784 @@ +/* + *  linux/drivers/mtd/onenand/onenand_base.c + * + *  Copyright (C) 2005-2007 Samsung Electronics + *  Kyungmin Park <kyungmin.park@samsung.com> + * + *  Credits: + *      Adrian Hunter <ext-adrian.hunter@nokia.com>: + *      auto-placement support, read-while load support, various fixes + *      Copyright (C) Nokia Corporation, 2007 + * + *      Rohit Hagargundgi <h.rohit at samsung.com>, + *      Amul Kumar Saha <amul.saha@samsung.com>: + *      Flex-OneNAND support + *      Copyright (C) Samsung Electronics, 2009 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <common.h> +#include <linux/compat.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/onenand.h> + +#include <asm/io.h> +#include <asm/errno.h> +#include <malloc.h> + +/* It should access 16-bit instead of 8-bit */ +static void *memcpy_16(void *dst, const void *src, unsigned int len) +{ +	void *ret = dst; +	short *d = dst; +	const short *s = src; + +	len >>= 1; +	while (len-- > 0) +		*d++ = *s++; +	return ret; +} + +/** + *  onenand_oob_128 - oob info for Flex-Onenand with 4KB page + *  For now, we expose only 64 out of 80 ecc bytes + */ +static struct nand_ecclayout onenand_oob_128 = { +	.eccbytes	= 64, +	.eccpos		= { +		6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +		22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +		38, 39, 40, 41, 42, 43, 44, 45, 46, 47, +		54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +		70, 71, 72, 73, 74, 75, 76, 77, 78, 79, +		86, 87, 88, 89, 90, 91, 92, 93, 94, 95, +		102, 103, 104, 105 +		}, +	.oobfree	= { +		{2, 4}, {18, 4}, {34, 4}, {50, 4}, +		{66, 4}, {82, 4}, {98, 4}, {114, 4} +	} +}; + +/** + * onenand_oob_64 - oob info for large (2KB) page + */ +static struct nand_ecclayout onenand_oob_64 = { +	.eccbytes	= 20, +	.eccpos		= { +		8, 9, 10, 11, 12, +		24, 25, 26, 27, 28, +		40, 41, 42, 43, 44, +		56, 57, 58, 59, 60, +		}, +	.oobfree	= { +		{2, 3}, {14, 2}, {18, 3}, {30, 2}, +		{34, 3}, {46, 2}, {50, 3}, {62, 2} +	} +}; + +/** + * onenand_oob_32 - oob info for middle (1KB) page + */ +static struct nand_ecclayout onenand_oob_32 = { +	.eccbytes	= 10, +	.eccpos		= { +		8, 9, 10, 11, 12, +		24, 25, 26, 27, 28, +		}, +	.oobfree	= { {2, 3}, {14, 2}, {18, 3}, {30, 2} } +}; + +/* + * Warning! This array is used with the memcpy_16() function, thus + * it must be aligned to 2 bytes. GCC can make this array unaligned + * as the array is made of unsigned char, which memcpy16() doesn't + * like and will cause unaligned access. + */ +static const unsigned char __aligned(2) ffchars[] = { +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 16 */ +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 32 */ +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 48 */ +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 64 */ +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 80 */ +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 96 */ +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 112 */ +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +	0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,	/* 128 */ +}; + +/** + * onenand_readw - [OneNAND Interface] Read OneNAND register + * @param addr		address to read + * + * Read OneNAND register + */ +static unsigned short onenand_readw(void __iomem * addr) +{ +	return readw(addr); +} + +/** + * onenand_writew - [OneNAND Interface] Write OneNAND register with value + * @param value		value to write + * @param addr		address to write + * + * Write OneNAND register with value + */ +static void onenand_writew(unsigned short value, void __iomem * addr) +{ +	writew(value, addr); +} + +/** + * onenand_block_address - [DEFAULT] Get block address + * @param device	the device id + * @param block		the block + * @return		translated block address if DDP, otherwise same + * + * Setup Start Address 1 Register (F100h) + */ +static int onenand_block_address(struct onenand_chip *this, int block) +{ +	/* Device Flash Core select, NAND Flash Block Address */ +	if (block & this->density_mask) +		return ONENAND_DDP_CHIP1 | (block ^ this->density_mask); + +	return block; +} + +/** + * onenand_bufferram_address - [DEFAULT] Get bufferram address + * @param device	the device id + * @param block		the block + * @return		set DBS value if DDP, otherwise 0 + * + * Setup Start Address 2 Register (F101h) for DDP + */ +static int onenand_bufferram_address(struct onenand_chip *this, int block) +{ +	/* Device BufferRAM Select */ +	if (block & this->density_mask) +		return ONENAND_DDP_CHIP1; + +	return ONENAND_DDP_CHIP0; +} + +/** + * onenand_page_address - [DEFAULT] Get page address + * @param page		the page address + * @param sector	the sector address + * @return		combined page and sector address + * + * Setup Start Address 8 Register (F107h) + */ +static int onenand_page_address(int page, int sector) +{ +	/* Flash Page Address, Flash Sector Address */ +	int fpa, fsa; + +	fpa = page & ONENAND_FPA_MASK; +	fsa = sector & ONENAND_FSA_MASK; + +	return ((fpa << ONENAND_FPA_SHIFT) | fsa); +} + +/** + * onenand_buffer_address - [DEFAULT] Get buffer address + * @param dataram1	DataRAM index + * @param sectors	the sector address + * @param count		the number of sectors + * @return		the start buffer value + * + * Setup Start Buffer Register (F200h) + */ +static int onenand_buffer_address(int dataram1, int sectors, int count) +{ +	int bsa, bsc; + +	/* BufferRAM Sector Address */ +	bsa = sectors & ONENAND_BSA_MASK; + +	if (dataram1) +		bsa |= ONENAND_BSA_DATARAM1;	/* DataRAM1 */ +	else +		bsa |= ONENAND_BSA_DATARAM0;	/* DataRAM0 */ + +	/* BufferRAM Sector Count */ +	bsc = count & ONENAND_BSC_MASK; + +	return ((bsa << ONENAND_BSA_SHIFT) | bsc); +} + +/** + * flexonenand_block - Return block number for flash address + * @param this		- OneNAND device structure + * @param addr		- Address for which block number is needed + */ +static unsigned int flexonenand_block(struct onenand_chip *this, loff_t addr) +{ +	unsigned int boundary, blk, die = 0; + +	if (ONENAND_IS_DDP(this) && addr >= this->diesize[0]) { +		die = 1; +		addr -= this->diesize[0]; +	} + +	boundary = this->boundary[die]; + +	blk = addr >> (this->erase_shift - 1); +	if (blk > boundary) +		blk = (blk + boundary + 1) >> 1; + +	blk += die ? this->density_mask : 0; +	return blk; +} + +unsigned int onenand_block(struct onenand_chip *this, loff_t addr) +{ +	if (!FLEXONENAND(this)) +		return addr >> this->erase_shift; +	return flexonenand_block(this, addr); +} + +/** + * flexonenand_addr - Return address of the block + * @this:		OneNAND device structure + * @block:		Block number on Flex-OneNAND + * + * Return address of the block + */ +static loff_t flexonenand_addr(struct onenand_chip *this, int block) +{ +	loff_t ofs = 0; +	int die = 0, boundary; + +	if (ONENAND_IS_DDP(this) && block >= this->density_mask) { +		block -= this->density_mask; +		die = 1; +		ofs = this->diesize[0]; +	} + +	boundary = this->boundary[die]; +	ofs += (loff_t) block << (this->erase_shift - 1); +	if (block > (boundary + 1)) +		ofs += (loff_t) (block - boundary - 1) +			<< (this->erase_shift - 1); +	return ofs; +} + +loff_t onenand_addr(struct onenand_chip *this, int block) +{ +	if (!FLEXONENAND(this)) +		return (loff_t) block << this->erase_shift; +	return flexonenand_addr(this, block); +} + +/** + * flexonenand_region - [Flex-OneNAND] Return erase region of addr + * @param mtd		MTD device structure + * @param addr		address whose erase region needs to be identified + */ +int flexonenand_region(struct mtd_info *mtd, loff_t addr) +{ +	int i; + +	for (i = 0; i < mtd->numeraseregions; i++) +		if (addr < mtd->eraseregions[i].offset) +			break; +	return i - 1; +} + +/** + * onenand_get_density - [DEFAULT] Get OneNAND density + * @param dev_id        OneNAND device ID + * + * Get OneNAND density from device ID + */ +static inline int onenand_get_density(int dev_id) +{ +	int density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT; +	return (density & ONENAND_DEVICE_DENSITY_MASK); +} + +/** + * onenand_command - [DEFAULT] Send command to OneNAND device + * @param mtd		MTD device structure + * @param cmd		the command to be sent + * @param addr		offset to read from or write to + * @param len		number of bytes to read or write + * + * Send command to OneNAND device. This function is used for middle/large page + * devices (1KB/2KB Bytes per page) + */ +static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, +			   size_t len) +{ +	struct onenand_chip *this = mtd->priv; +	int value; +	int block, page; + +	/* Now we use page size operation */ +	int sectors = 0, count = 0; + +	/* Address translation */ +	switch (cmd) { +	case ONENAND_CMD_UNLOCK: +	case ONENAND_CMD_LOCK: +	case ONENAND_CMD_LOCK_TIGHT: +	case ONENAND_CMD_UNLOCK_ALL: +		block = -1; +		page = -1; +		break; + +	case FLEXONENAND_CMD_PI_ACCESS: +		/* addr contains die index */ +		block = addr * this->density_mask; +		page = -1; +		break; + +	case ONENAND_CMD_ERASE: +	case ONENAND_CMD_BUFFERRAM: +		block = onenand_block(this, addr); +		page = -1; +		break; + +	case FLEXONENAND_CMD_READ_PI: +		cmd = ONENAND_CMD_READ; +		block = addr * this->density_mask; +		page = 0; +		break; + +	default: +		block = onenand_block(this, addr); +		page = (int) (addr +			- onenand_addr(this, block)) >> this->page_shift; +		page &= this->page_mask; +		break; +	} + +	/* NOTE: The setting order of the registers is very important! */ +	if (cmd == ONENAND_CMD_BUFFERRAM) { +		/* Select DataRAM for DDP */ +		value = onenand_bufferram_address(this, block); +		this->write_word(value, +				 this->base + ONENAND_REG_START_ADDRESS2); + +		if (ONENAND_IS_4KB_PAGE(this)) +			ONENAND_SET_BUFFERRAM0(this); +		else +			/* Switch to the next data buffer */ +			ONENAND_SET_NEXT_BUFFERRAM(this); + +		return 0; +	} + +	if (block != -1) { +		/* Write 'DFS, FBA' of Flash */ +		value = onenand_block_address(this, block); +		this->write_word(value, +				 this->base + ONENAND_REG_START_ADDRESS1); + +		/* Select DataRAM for DDP */ +		value = onenand_bufferram_address(this, block); +		this->write_word(value, +				 this->base + ONENAND_REG_START_ADDRESS2); +	} + +	if (page != -1) { +		int dataram; + +		switch (cmd) { +		case FLEXONENAND_CMD_RECOVER_LSB: +		case ONENAND_CMD_READ: +		case ONENAND_CMD_READOOB: +			if (ONENAND_IS_4KB_PAGE(this)) +				dataram = ONENAND_SET_BUFFERRAM0(this); +			else +				dataram = ONENAND_SET_NEXT_BUFFERRAM(this); + +			break; + +		default: +			dataram = ONENAND_CURRENT_BUFFERRAM(this); +			break; +		} + +		/* Write 'FPA, FSA' of Flash */ +		value = onenand_page_address(page, sectors); +		this->write_word(value, +				 this->base + ONENAND_REG_START_ADDRESS8); + +		/* Write 'BSA, BSC' of DataRAM */ +		value = onenand_buffer_address(dataram, sectors, count); +		this->write_word(value, this->base + ONENAND_REG_START_BUFFER); +	} + +	/* Interrupt clear */ +	this->write_word(ONENAND_INT_CLEAR, this->base + ONENAND_REG_INTERRUPT); +	/* Write command */ +	this->write_word(cmd, this->base + ONENAND_REG_COMMAND); + +	return 0; +} + +/** + * onenand_read_ecc - return ecc status + * @param this		onenand chip structure + */ +static int onenand_read_ecc(struct onenand_chip *this) +{ +	int ecc, i; + +	if (!FLEXONENAND(this)) +		return this->read_word(this->base + ONENAND_REG_ECC_STATUS); + +	for (i = 0; i < 4; i++) { +		ecc = this->read_word(this->base +				+ ((ONENAND_REG_ECC_STATUS + i) << 1)); +		if (likely(!ecc)) +			continue; +		if (ecc & FLEXONENAND_UNCORRECTABLE_ERROR) +			return ONENAND_ECC_2BIT_ALL; +	} + +	return 0; +} + +/** + * onenand_wait - [DEFAULT] wait until the command is done + * @param mtd		MTD device structure + * @param state		state to select the max. timeout value + * + * Wait for command done. This applies to all OneNAND command + * Read can take up to 30us, erase up to 2ms and program up to 350us + * according to general OneNAND specs + */ +static int onenand_wait(struct mtd_info *mtd, int state) +{ +	struct onenand_chip *this = mtd->priv; +	unsigned int flags = ONENAND_INT_MASTER; +	unsigned int interrupt = 0; +	unsigned int ctrl; + +	while (1) { +		interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); +		if (interrupt & flags) +			break; +	} + +	ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); + +	if (interrupt & ONENAND_INT_READ) { +		int ecc = onenand_read_ecc(this); +		if (ecc & ONENAND_ECC_2BIT_ALL) { +			printk("onenand_wait: ECC error = 0x%04x\n", ecc); +			return -EBADMSG; +		} +	} + +	if (ctrl & ONENAND_CTRL_ERROR) { +		printk("onenand_wait: controller error = 0x%04x\n", ctrl); +		if (ctrl & ONENAND_CTRL_LOCK) +			printk("onenand_wait: it's locked error = 0x%04x\n", +				ctrl); + +		return -EIO; +	} + + +	return 0; +} + +/** + * onenand_bufferram_offset - [DEFAULT] BufferRAM offset + * @param mtd		MTD data structure + * @param area		BufferRAM area + * @return		offset given area + * + * Return BufferRAM offset given area + */ +static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area) +{ +	struct onenand_chip *this = mtd->priv; + +	if (ONENAND_CURRENT_BUFFERRAM(this)) { +		if (area == ONENAND_DATARAM) +			return mtd->writesize; +		if (area == ONENAND_SPARERAM) +			return mtd->oobsize; +	} + +	return 0; +} + +/** + * onenand_read_bufferram - [OneNAND Interface] Read the bufferram area + * @param mtd		MTD data structure + * @param area		BufferRAM area + * @param buffer	the databuffer to put/get data + * @param offset	offset to read from or write to + * @param count		number of bytes to read/write + * + * Read the BufferRAM area + */ +static int onenand_read_bufferram(struct mtd_info *mtd, loff_t addr, int area, +				  unsigned char *buffer, int offset, +				  size_t count) +{ +	struct onenand_chip *this = mtd->priv; +	void __iomem *bufferram; + +	bufferram = this->base + area; +	bufferram += onenand_bufferram_offset(mtd, area); + +	memcpy_16(buffer, bufferram + offset, count); + +	return 0; +} + +/** + * onenand_sync_read_bufferram - [OneNAND Interface] Read the bufferram area with Sync. Burst mode + * @param mtd		MTD data structure + * @param area		BufferRAM area + * @param buffer	the databuffer to put/get data + * @param offset	offset to read from or write to + * @param count		number of bytes to read/write + * + * Read the BufferRAM area with Sync. Burst Mode + */ +static int onenand_sync_read_bufferram(struct mtd_info *mtd, loff_t addr, int area, +				       unsigned char *buffer, int offset, +				       size_t count) +{ +	struct onenand_chip *this = mtd->priv; +	void __iomem *bufferram; + +	bufferram = this->base + area; +	bufferram += onenand_bufferram_offset(mtd, area); + +	this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ); + +	memcpy_16(buffer, bufferram + offset, count); + +	this->mmcontrol(mtd, 0); + +	return 0; +} + +/** + * onenand_write_bufferram - [OneNAND Interface] Write the bufferram area + * @param mtd		MTD data structure + * @param area		BufferRAM area + * @param buffer	the databuffer to put/get data + * @param offset	offset to read from or write to + * @param count		number of bytes to read/write + * + * Write the BufferRAM area + */ +static int onenand_write_bufferram(struct mtd_info *mtd, loff_t addr, int area, +				   const unsigned char *buffer, int offset, +				   size_t count) +{ +	struct onenand_chip *this = mtd->priv; +	void __iomem *bufferram; + +	bufferram = this->base + area; +	bufferram += onenand_bufferram_offset(mtd, area); + +	memcpy_16(bufferram + offset, buffer, count); + +	return 0; +} + +/** + * onenand_get_2x_blockpage - [GENERIC] Get blockpage at 2x program mode + * @param mtd		MTD data structure + * @param addr		address to check + * @return		blockpage address + * + * Get blockpage address at 2x program mode + */ +static int onenand_get_2x_blockpage(struct mtd_info *mtd, loff_t addr) +{ +	struct onenand_chip *this = mtd->priv; +	int blockpage, block, page; + +	/* Calculate the even block number */ +	block = (int) (addr >> this->erase_shift) & ~1; +	/* Is it the odd plane? */ +	if (addr & this->writesize) +		block++; +	page = (int) (addr >> (this->page_shift + 1)) & this->page_mask; +	blockpage = (block << 7) | page; + +	return blockpage; +} + +/** + * onenand_check_bufferram - [GENERIC] Check BufferRAM information + * @param mtd		MTD data structure + * @param addr		address to check + * @return		1 if there are valid data, otherwise 0 + * + * Check bufferram if there is data we required + */ +static int onenand_check_bufferram(struct mtd_info *mtd, loff_t addr) +{ +	struct onenand_chip *this = mtd->priv; +	int blockpage, found = 0; +	unsigned int i; + +	if (ONENAND_IS_2PLANE(this)) +		blockpage = onenand_get_2x_blockpage(mtd, addr); +	else +		blockpage = (int) (addr >> this->page_shift); + +	/* Is there valid data? */ +	i = ONENAND_CURRENT_BUFFERRAM(this); +	if (this->bufferram[i].blockpage == blockpage) +		found = 1; +	else { +		/* Check another BufferRAM */ +		i = ONENAND_NEXT_BUFFERRAM(this); +		if (this->bufferram[i].blockpage == blockpage) { +			ONENAND_SET_NEXT_BUFFERRAM(this); +			found = 1; +		} +	} + +	if (found && ONENAND_IS_DDP(this)) { +		/* Select DataRAM for DDP */ +		int block = onenand_block(this, addr); +		int value = onenand_bufferram_address(this, block); +		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); +	} + +	return found; +} + +/** + * onenand_update_bufferram - [GENERIC] Update BufferRAM information + * @param mtd		MTD data structure + * @param addr		address to update + * @param valid		valid flag + * + * Update BufferRAM information + */ +static int onenand_update_bufferram(struct mtd_info *mtd, loff_t addr, +				    int valid) +{ +	struct onenand_chip *this = mtd->priv; +	int blockpage; +	unsigned int i; + +	if (ONENAND_IS_2PLANE(this)) +		blockpage = onenand_get_2x_blockpage(mtd, addr); +	else +		blockpage = (int)(addr >> this->page_shift); + +	/* Invalidate another BufferRAM */ +	i = ONENAND_NEXT_BUFFERRAM(this); +	if (this->bufferram[i].blockpage == blockpage) +		this->bufferram[i].blockpage = -1; + +	/* Update BufferRAM */ +	i = ONENAND_CURRENT_BUFFERRAM(this); +	if (valid) +		this->bufferram[i].blockpage = blockpage; +	else +		this->bufferram[i].blockpage = -1; + +	return 0; +} + +/** + * onenand_invalidate_bufferram - [GENERIC] Invalidate BufferRAM information + * @param mtd           MTD data structure + * @param addr          start address to invalidate + * @param len           length to invalidate + * + * Invalidate BufferRAM information + */ +static void onenand_invalidate_bufferram(struct mtd_info *mtd, loff_t addr, +					 unsigned int len) +{ +	struct onenand_chip *this = mtd->priv; +	int i; +	loff_t end_addr = addr + len; + +	/* Invalidate BufferRAM */ +	for (i = 0; i < MAX_BUFFERRAM; i++) { +		loff_t buf_addr = this->bufferram[i].blockpage << this->page_shift; + +		if (buf_addr >= addr && buf_addr < end_addr) +			this->bufferram[i].blockpage = -1; +	} +} + +/** + * onenand_get_device - [GENERIC] Get chip for selected access + * @param mtd		MTD device structure + * @param new_state	the state which is requested + * + * Get the device and lock it for exclusive access + */ +static void onenand_get_device(struct mtd_info *mtd, int new_state) +{ +	/* Do nothing */ +} + +/** + * onenand_release_device - [GENERIC] release chip + * @param mtd		MTD device structure + * + * Deselect, release chip lock and wake up anyone waiting on the device + */ +static void onenand_release_device(struct mtd_info *mtd) +{ +	/* Do nothing */ +} + +/** + * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer + * @param mtd		MTD device structure + * @param buf		destination address + * @param column	oob offset to read from + * @param thislen	oob length to read + */ +static int onenand_transfer_auto_oob(struct mtd_info *mtd, uint8_t *buf, +					int column, int thislen) +{ +	struct onenand_chip *this = mtd->priv; +	struct nand_oobfree *free; +	int readcol = column; +	int readend = column + thislen; +	int lastgap = 0; +	unsigned int i; +	uint8_t *oob_buf = this->oob_buf; + +	free = this->ecclayout->oobfree; +	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length; +	     i++, free++) { +		if (readcol >= lastgap) +			readcol += free->offset - lastgap; +		if (readend >= lastgap) +			readend += free->offset - lastgap; +		lastgap = free->offset + free->length; +	} +	this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize); +	free = this->ecclayout->oobfree; +	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length; +	     i++, free++) { +		int free_end = free->offset + free->length; +		if (free->offset < readend && free_end > readcol) { +			int st = max_t(int,free->offset,readcol); +			int ed = min_t(int,free_end,readend); +			int n = ed - st; +			memcpy(buf, oob_buf + st, n); +			buf += n; +		} else if (column == 0) +			break; +	} +	return 0; +} + +/** + * onenand_recover_lsb - [Flex-OneNAND] Recover LSB page data + * @param mtd		MTD device structure + * @param addr		address to recover + * @param status	return value from onenand_wait + * + * MLC NAND Flash cell has paired pages - LSB page and MSB page. LSB page has + * lower page address and MSB page has higher page address in paired pages. + * If power off occurs during MSB page program, the paired LSB page data can + * become corrupt. LSB page recovery read is a way to read LSB page though page + * data are corrupted. When uncorrectable error occurs as a result of LSB page + * read after power up, issue LSB page recovery read. + */ +static int onenand_recover_lsb(struct mtd_info *mtd, loff_t addr, int status) +{ +	struct onenand_chip *this = mtd->priv; +	int i; + +	/* Recovery is only for Flex-OneNAND */ +	if (!FLEXONENAND(this)) +		return status; + +	/* check if we failed due to uncorrectable error */ +	if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR) +		return status; + +	/* check if address lies in MLC region */ +	i = flexonenand_region(mtd, addr); +	if (mtd->eraseregions[i].erasesize < (1 << this->erase_shift)) +		return status; + +	printk("onenand_recover_lsb:" +		"Attempting to recover from uncorrectable read\n"); + +	/* Issue the LSB page recovery command */ +	this->command(mtd, FLEXONENAND_CMD_RECOVER_LSB, addr, this->writesize); +	return this->wait(mtd, FL_READING); +} + +/** + * onenand_read_ops_nolock - [OneNAND Interface] OneNAND read main and/or out-of-band + * @param mtd		MTD device structure + * @param from		offset to read from + * @param ops		oob operation description structure + * + * OneNAND read main and/or out-of-band data + */ +static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, +		struct mtd_oob_ops *ops) +{ +	struct onenand_chip *this = mtd->priv; +	struct mtd_ecc_stats stats; +	size_t len = ops->len; +	size_t ooblen = ops->ooblen; +	u_char *buf = ops->datbuf; +	u_char *oobbuf = ops->oobbuf; +	int read = 0, column, thislen; +	int oobread = 0, oobcolumn, thisooblen, oobsize; +	int ret = 0, boundary = 0; +	int writesize = this->writesize; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ops_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); + +	if (ops->mode == MTD_OPS_AUTO_OOB) +		oobsize = this->ecclayout->oobavail; +	else +		oobsize = mtd->oobsize; + +	oobcolumn = from & (mtd->oobsize - 1); + +	/* Do not allow reads past end of device */ +	if ((from + len) > mtd->size) { +		printk(KERN_ERR "onenand_read_ops_nolock: Attempt read beyond end of device\n"); +		ops->retlen = 0; +		ops->oobretlen = 0; +		return -EINVAL; +	} + +	stats = mtd->ecc_stats; + +	/* Read-while-load method */ +	/* Note: We can't use this feature in MLC */ + +	/* Do first load to bufferRAM */ +	if (read < len) { +		if (!onenand_check_bufferram(mtd, from)) { +			this->main_buf = buf; +			this->command(mtd, ONENAND_CMD_READ, from, writesize); +			ret = this->wait(mtd, FL_READING); +			if (unlikely(ret)) +				ret = onenand_recover_lsb(mtd, from, ret); +			onenand_update_bufferram(mtd, from, !ret); +			if (ret == -EBADMSG) +				ret = 0; +		} +	} + +	thislen = min_t(int, writesize, len - read); +	column = from & (writesize - 1); +	if (column + thislen > writesize) +		thislen = writesize - column; + +	while (!ret) { +		/* If there is more to load then start next load */ +		from += thislen; +		if (!ONENAND_IS_4KB_PAGE(this) && read + thislen < len) { +			this->main_buf = buf + thislen; +			this->command(mtd, ONENAND_CMD_READ, from, writesize); +			/* +			 * Chip boundary handling in DDP +			 * Now we issued chip 1 read and pointed chip 1 +			 * bufferam so we have to point chip 0 bufferam. +			 */ +			if (ONENAND_IS_DDP(this) && +					unlikely(from == (this->chipsize >> 1))) { +				this->write_word(ONENAND_DDP_CHIP0, this->base + ONENAND_REG_START_ADDRESS2); +				boundary = 1; +			} else +				boundary = 0; +			ONENAND_SET_PREV_BUFFERRAM(this); +		} + +		/* While load is going, read from last bufferRAM */ +		this->read_bufferram(mtd, from - thislen, ONENAND_DATARAM, buf, column, thislen); + +		/* Read oob area if needed */ +		if (oobbuf) { +			thisooblen = oobsize - oobcolumn; +			thisooblen = min_t(int, thisooblen, ooblen - oobread); + +			if (ops->mode == MTD_OPS_AUTO_OOB) +				onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen); +			else +				this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen); +			oobread += thisooblen; +			oobbuf += thisooblen; +			oobcolumn = 0; +		} + +		if (ONENAND_IS_4KB_PAGE(this) && (read + thislen < len)) { +			this->command(mtd, ONENAND_CMD_READ, from, writesize); +			ret = this->wait(mtd, FL_READING); +			if (unlikely(ret)) +				ret = onenand_recover_lsb(mtd, from, ret); +			onenand_update_bufferram(mtd, from, !ret); +			if (mtd_is_eccerr(ret)) +				ret = 0; +		} + +		/* See if we are done */ +		read += thislen; +		if (read == len) +			break; +		/* Set up for next read from bufferRAM */ +		if (unlikely(boundary)) +			this->write_word(ONENAND_DDP_CHIP1, this->base + ONENAND_REG_START_ADDRESS2); +		if (!ONENAND_IS_4KB_PAGE(this)) +			ONENAND_SET_NEXT_BUFFERRAM(this); +		buf += thislen; +		thislen = min_t(int, writesize, len - read); +		column = 0; + +		if (!ONENAND_IS_4KB_PAGE(this)) { +			/* Now wait for load */ +			ret = this->wait(mtd, FL_READING); +			onenand_update_bufferram(mtd, from, !ret); +			if (mtd_is_eccerr(ret)) +				ret = 0; +		} +	} + +	/* +	 * Return success, if no ECC failures, else -EBADMSG +	 * fs driver will take care of that, because +	 * retlen == desired len and result == -EBADMSG +	 */ +	ops->retlen = read; +	ops->oobretlen = oobread; + +	if (ret) +		return ret; + +	if (mtd->ecc_stats.failed - stats.failed) +		return -EBADMSG; + +	/* return max bitflips per ecc step; ONENANDs correct 1 bit only */ +	return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0; +} + +/** + * onenand_read_oob_nolock - [MTD Interface] OneNAND read out-of-band + * @param mtd		MTD device structure + * @param from		offset to read from + * @param ops		oob operation description structure + * + * OneNAND read out-of-band data from the spare area + */ +static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from, +		struct mtd_oob_ops *ops) +{ +	struct onenand_chip *this = mtd->priv; +	struct mtd_ecc_stats stats; +	int read = 0, thislen, column, oobsize; +	size_t len = ops->ooblen; +	unsigned int mode = ops->mode; +	u_char *buf = ops->oobbuf; +	int ret = 0, readcmd; + +	from += ops->ooboffs; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_read_oob_nolock: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); + +	/* Initialize return length value */ +	ops->oobretlen = 0; + +	if (mode == MTD_OPS_AUTO_OOB) +		oobsize = this->ecclayout->oobavail; +	else +		oobsize = mtd->oobsize; + +	column = from & (mtd->oobsize - 1); + +	if (unlikely(column >= oobsize)) { +		printk(KERN_ERR "onenand_read_oob_nolock: Attempted to start read outside oob\n"); +		return -EINVAL; +	} + +	/* Do not allow reads past end of device */ +	if (unlikely(from >= mtd->size || +		column + len > ((mtd->size >> this->page_shift) - +				(from >> this->page_shift)) * oobsize)) { +		printk(KERN_ERR "onenand_read_oob_nolock: Attempted to read beyond end of device\n"); +		return -EINVAL; +	} + +	stats = mtd->ecc_stats; + +	readcmd = ONENAND_IS_4KB_PAGE(this) ? +		ONENAND_CMD_READ : ONENAND_CMD_READOOB; + +	while (read < len) { +		thislen = oobsize - column; +		thislen = min_t(int, thislen, len); + +		this->spare_buf = buf; +		this->command(mtd, readcmd, from, mtd->oobsize); + +		onenand_update_bufferram(mtd, from, 0); + +		ret = this->wait(mtd, FL_READING); +		if (unlikely(ret)) +			ret = onenand_recover_lsb(mtd, from, ret); + +		if (ret && ret != -EBADMSG) { +			printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret); +			break; +		} + +		if (mode == MTD_OPS_AUTO_OOB) +			onenand_transfer_auto_oob(mtd, buf, column, thislen); +		else +			this->read_bufferram(mtd, 0, ONENAND_SPARERAM, buf, column, thislen); + +		read += thislen; + +		if (read == len) +			break; + +		buf += thislen; + +		/* Read more? */ +		if (read < len) { +			/* Page size */ +			from += mtd->writesize; +			column = 0; +		} +	} + +	ops->oobretlen = read; + +	if (ret) +		return ret; + +	if (mtd->ecc_stats.failed - stats.failed) +		return -EBADMSG; + +	return 0; +} + +/** + * onenand_read - [MTD Interface] MTD compability function for onenand_read_ecc + * @param mtd		MTD device structure + * @param from		offset to read from + * @param len		number of bytes to read + * @param retlen	pointer to variable to store the number of read bytes + * @param buf		the databuffer to put data + * + * This function simply calls onenand_read_ecc with oob buffer and oobsel = NULL +*/ +int onenand_read(struct mtd_info *mtd, loff_t from, size_t len, +		 size_t * retlen, u_char * buf) +{ +	struct mtd_oob_ops ops = { +		.len    = len, +		.ooblen = 0, +		.datbuf = buf, +		.oobbuf = NULL, +	}; +	int ret; + +	onenand_get_device(mtd, FL_READING); +	ret = onenand_read_ops_nolock(mtd, from, &ops); +	onenand_release_device(mtd); + +	*retlen = ops.retlen; +	return ret; +} + +/** + * onenand_read_oob - [MTD Interface] OneNAND read out-of-band + * @param mtd		MTD device structure + * @param from		offset to read from + * @param ops		oob operations description structure + * + * OneNAND main and/or out-of-band + */ +int onenand_read_oob(struct mtd_info *mtd, loff_t from, +			struct mtd_oob_ops *ops) +{ +	int ret; + +	switch (ops->mode) { +	case MTD_OPS_PLACE_OOB: +	case MTD_OPS_AUTO_OOB: +		break; +	case MTD_OPS_RAW: +		/* Not implemented yet */ +	default: +		return -EINVAL; +	} + +	onenand_get_device(mtd, FL_READING); +	if (ops->datbuf) +		ret = onenand_read_ops_nolock(mtd, from, ops); +	else +		ret = onenand_read_oob_nolock(mtd, from, ops); +	onenand_release_device(mtd); + +	return ret; +} + +/** + * onenand_bbt_wait - [DEFAULT] wait until the command is done + * @param mtd		MTD device structure + * @param state		state to select the max. timeout value + * + * Wait for command done. + */ +static int onenand_bbt_wait(struct mtd_info *mtd, int state) +{ +	struct onenand_chip *this = mtd->priv; +	unsigned int flags = ONENAND_INT_MASTER; +	unsigned int interrupt; +	unsigned int ctrl; + +	while (1) { +		interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); +		if (interrupt & flags) +			break; +	} + +	/* To get correct interrupt status in timeout case */ +	interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); +	ctrl = this->read_word(this->base + ONENAND_REG_CTRL_STATUS); + +	if (interrupt & ONENAND_INT_READ) { +		int ecc = onenand_read_ecc(this); +		if (ecc & ONENAND_ECC_2BIT_ALL) { +			printk(KERN_INFO "onenand_bbt_wait: ecc error = 0x%04x" +				", controller = 0x%04x\n", ecc, ctrl); +			return ONENAND_BBT_READ_ERROR; +		} +	} else { +		printk(KERN_ERR "onenand_bbt_wait: read timeout!" +				"ctrl=0x%04x intr=0x%04x\n", ctrl, interrupt); +		return ONENAND_BBT_READ_FATAL_ERROR; +	} + +	/* Initial bad block case: 0x2400 or 0x0400 */ +	if (ctrl & ONENAND_CTRL_ERROR) { +		printk(KERN_DEBUG "onenand_bbt_wait: controller error = 0x%04x\n", ctrl); +		return ONENAND_BBT_READ_ERROR; +	} + +	return 0; +} + +/** + * onenand_bbt_read_oob - [MTD Interface] OneNAND read out-of-band for bbt scan + * @param mtd		MTD device structure + * @param from		offset to read from + * @param ops		oob operation description structure + * + * OneNAND read out-of-band data from the spare area for bbt scan + */ +int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, +		struct mtd_oob_ops *ops) +{ +	struct onenand_chip *this = mtd->priv; +	int read = 0, thislen, column; +	int ret = 0, readcmd; +	size_t len = ops->ooblen; +	u_char *buf = ops->oobbuf; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_bbt_read_oob: from = 0x%08x, len = %zi\n", (unsigned int) from, len); + +	readcmd = ONENAND_IS_4KB_PAGE(this) ? +		ONENAND_CMD_READ : ONENAND_CMD_READOOB; + +	/* Initialize return value */ +	ops->oobretlen = 0; + +	/* Do not allow reads past end of device */ +	if (unlikely((from + len) > mtd->size)) { +		printk(KERN_ERR "onenand_bbt_read_oob: Attempt read beyond end of device\n"); +		return ONENAND_BBT_READ_FATAL_ERROR; +	} + +	/* Grab the lock and see if the device is available */ +	onenand_get_device(mtd, FL_READING); + +	column = from & (mtd->oobsize - 1); + +	while (read < len) { + +		thislen = mtd->oobsize - column; +		thislen = min_t(int, thislen, len); + +		this->spare_buf = buf; +		this->command(mtd, readcmd, from, mtd->oobsize); + +		onenand_update_bufferram(mtd, from, 0); + +		ret = this->bbt_wait(mtd, FL_READING); +		if (unlikely(ret)) +			ret = onenand_recover_lsb(mtd, from, ret); + +		if (ret) +			break; + +		this->read_bufferram(mtd, 0, ONENAND_SPARERAM, buf, column, thislen); +		read += thislen; +		if (read == len) +			break; + +		buf += thislen; + +		/* Read more? */ +		if (read < len) { +			/* Update Page size */ +			from += this->writesize; +			column = 0; +		} +	} + +	/* Deselect and wake up anyone waiting on the device */ +	onenand_release_device(mtd); + +	ops->oobretlen = read; +	return ret; +} + + +#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE +/** + * onenand_verify_oob - [GENERIC] verify the oob contents after a write + * @param mtd           MTD device structure + * @param buf           the databuffer to verify + * @param to            offset to read from + */ +static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to) +{ +	struct onenand_chip *this = mtd->priv; +	u_char *oob_buf = this->oob_buf; +	int status, i, readcmd; + +	readcmd = ONENAND_IS_4KB_PAGE(this) ? +		ONENAND_CMD_READ : ONENAND_CMD_READOOB; + +	this->command(mtd, readcmd, to, mtd->oobsize); +	onenand_update_bufferram(mtd, to, 0); +	status = this->wait(mtd, FL_READING); +	if (status) +		return status; + +	this->read_bufferram(mtd, 0, ONENAND_SPARERAM, oob_buf, 0, mtd->oobsize); +	for (i = 0; i < mtd->oobsize; i++) +		if (buf[i] != 0xFF && buf[i] != oob_buf[i]) +			return -EBADMSG; + +	return 0; +} + +/** + * onenand_verify - [GENERIC] verify the chip contents after a write + * @param mtd          MTD device structure + * @param buf          the databuffer to verify + * @param addr         offset to read from + * @param len          number of bytes to read and compare + */ +static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, size_t len) +{ +	struct onenand_chip *this = mtd->priv; +	void __iomem *dataram; +	int ret = 0; +	int thislen, column; + +	while (len != 0) { +		thislen = min_t(int, this->writesize, len); +		column = addr & (this->writesize - 1); +		if (column + thislen > this->writesize) +			thislen = this->writesize - column; + +		this->command(mtd, ONENAND_CMD_READ, addr, this->writesize); + +		onenand_update_bufferram(mtd, addr, 0); + +		ret = this->wait(mtd, FL_READING); +		if (ret) +			return ret; + +		onenand_update_bufferram(mtd, addr, 1); + +		dataram = this->base + ONENAND_DATARAM; +		dataram += onenand_bufferram_offset(mtd, ONENAND_DATARAM); + +		if (memcmp(buf, dataram + column, thislen)) +			return -EBADMSG; + +		len -= thislen; +		buf += thislen; +		addr += thislen; +	} + +	return 0; +} +#else +#define onenand_verify(...)             (0) +#define onenand_verify_oob(...)         (0) +#endif + +#define NOTALIGNED(x)	((x & (this->subpagesize - 1)) != 0) + +/** + * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer + * @param mtd           MTD device structure + * @param oob_buf       oob buffer + * @param buf           source address + * @param column        oob offset to write to + * @param thislen       oob length to write + */ +static int onenand_fill_auto_oob(struct mtd_info *mtd, u_char *oob_buf, +		const u_char *buf, int column, int thislen) +{ +	struct onenand_chip *this = mtd->priv; +	struct nand_oobfree *free; +	int writecol = column; +	int writeend = column + thislen; +	int lastgap = 0; +	unsigned int i; + +	free = this->ecclayout->oobfree; +	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length; +	     i++, free++) { +		if (writecol >= lastgap) +			writecol += free->offset - lastgap; +		if (writeend >= lastgap) +			writeend += free->offset - lastgap; +		lastgap = free->offset + free->length; +	} +	free = this->ecclayout->oobfree; +	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && free->length; +	     i++, free++) { +		int free_end = free->offset + free->length; +		if (free->offset < writeend && free_end > writecol) { +			int st = max_t(int,free->offset,writecol); +			int ed = min_t(int,free_end,writeend); +			int n = ed - st; +			memcpy(oob_buf + st, buf, n); +			buf += n; +		} else if (column == 0) +			break; +	} +	return 0; +} + +/** + * onenand_write_ops_nolock - [OneNAND Interface] write main and/or out-of-band + * @param mtd           MTD device structure + * @param to            offset to write to + * @param ops           oob operation description structure + * + * Write main and/or oob with ECC + */ +static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, +		struct mtd_oob_ops *ops) +{ +	struct onenand_chip *this = mtd->priv; +	int written = 0, column, thislen, subpage; +	int oobwritten = 0, oobcolumn, thisooblen, oobsize; +	size_t len = ops->len; +	size_t ooblen = ops->ooblen; +	const u_char *buf = ops->datbuf; +	const u_char *oob = ops->oobbuf; +	u_char *oobbuf; +	int ret = 0; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ops_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); + +	/* Initialize retlen, in case of early exit */ +	ops->retlen = 0; +	ops->oobretlen = 0; + +	/* Reject writes, which are not page aligned */ +	if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { +		printk(KERN_ERR "onenand_write_ops_nolock: Attempt to write not page aligned data\n"); +		return -EINVAL; +	} + +	if (ops->mode == MTD_OPS_AUTO_OOB) +		oobsize = this->ecclayout->oobavail; +	else +		oobsize = mtd->oobsize; + +	oobcolumn = to & (mtd->oobsize - 1); + +	column = to & (mtd->writesize - 1); + +	/* Loop until all data write */ +	while (written < len) { +		u_char *wbuf = (u_char *) buf; + +		thislen = min_t(int, mtd->writesize - column, len - written); +		thisooblen = min_t(int, oobsize - oobcolumn, ooblen - oobwritten); + +		this->command(mtd, ONENAND_CMD_BUFFERRAM, to, thislen); + +		/* Partial page write */ +		subpage = thislen < mtd->writesize; +		if (subpage) { +			memset(this->page_buf, 0xff, mtd->writesize); +			memcpy(this->page_buf + column, buf, thislen); +			wbuf = this->page_buf; +		} + +		this->write_bufferram(mtd, to, ONENAND_DATARAM, wbuf, 0, mtd->writesize); + +		if (oob) { +			oobbuf = this->oob_buf; + +			/* We send data to spare ram with oobsize +			 *                          * to prevent byte access */ +			memset(oobbuf, 0xff, mtd->oobsize); +			if (ops->mode == MTD_OPS_AUTO_OOB) +				onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen); +			else +				memcpy(oobbuf + oobcolumn, oob, thisooblen); + +			oobwritten += thisooblen; +			oob += thisooblen; +			oobcolumn = 0; +		} else +			oobbuf = (u_char *) ffchars; + +		this->write_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); + +		this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize); + +		ret = this->wait(mtd, FL_WRITING); + +		/* In partial page write we don't update bufferram */ +		onenand_update_bufferram(mtd, to, !ret && !subpage); +		if (ONENAND_IS_2PLANE(this)) { +			ONENAND_SET_BUFFERRAM1(this); +			onenand_update_bufferram(mtd, to + this->writesize, !ret && !subpage); +		} + +		if (ret) { +			printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); +			break; +		} + +		/* Only check verify write turn on */ +		ret = onenand_verify(mtd, buf, to, thislen); +		if (ret) { +			printk(KERN_ERR "onenand_write_ops_nolock: verify failed %d\n", ret); +			break; +		} + +		written += thislen; + +		if (written == len) +			break; + +		column = 0; +		to += thislen; +		buf += thislen; +	} + +	ops->retlen = written; + +	return ret; +} + +/** + * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band + * @param mtd           MTD device structure + * @param to            offset to write to + * @param len           number of bytes to write + * @param retlen        pointer to variable to store the number of written bytes + * @param buf           the data to write + * @param mode          operation mode + * + * OneNAND write out-of-band + */ +static int onenand_write_oob_nolock(struct mtd_info *mtd, loff_t to, +		struct mtd_oob_ops *ops) +{ +	struct onenand_chip *this = mtd->priv; +	int column, ret = 0, oobsize; +	int written = 0, oobcmd; +	u_char *oobbuf; +	size_t len = ops->ooblen; +	const u_char *buf = ops->oobbuf; +	unsigned int mode = ops->mode; + +	to += ops->ooboffs; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob_nolock: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); + +	/* Initialize retlen, in case of early exit */ +	ops->oobretlen = 0; + +	if (mode == MTD_OPS_AUTO_OOB) +		oobsize = this->ecclayout->oobavail; +	else +		oobsize = mtd->oobsize; + +	column = to & (mtd->oobsize - 1); + +	if (unlikely(column >= oobsize)) { +		printk(KERN_ERR "onenand_write_oob_nolock: Attempted to start write outside oob\n"); +		return -EINVAL; +	} + +	/* For compatibility with NAND: Do not allow write past end of page */ +	if (unlikely(column + len > oobsize)) { +		printk(KERN_ERR "onenand_write_oob_nolock: " +				"Attempt to write past end of page\n"); +		return -EINVAL; +	} + +	/* Do not allow reads past end of device */ +	if (unlikely(to >= mtd->size || +				column + len > ((mtd->size >> this->page_shift) - +					(to >> this->page_shift)) * oobsize)) { +		printk(KERN_ERR "onenand_write_oob_nolock: Attempted to write past end of device\n"); +		return -EINVAL; +	} + +	oobbuf = this->oob_buf; + +	oobcmd = ONENAND_IS_4KB_PAGE(this) ? +		ONENAND_CMD_PROG : ONENAND_CMD_PROGOOB; + +	/* Loop until all data write */ +	while (written < len) { +		int thislen = min_t(int, oobsize, len - written); + +		this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize); + +		/* We send data to spare ram with oobsize +		 * to prevent byte access */ +		memset(oobbuf, 0xff, mtd->oobsize); +		if (mode == MTD_OPS_AUTO_OOB) +			onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen); +		else +			memcpy(oobbuf + column, buf, thislen); +		this->write_bufferram(mtd, 0, ONENAND_SPARERAM, oobbuf, 0, mtd->oobsize); + +		if (ONENAND_IS_4KB_PAGE(this)) { +			/* Set main area of DataRAM to 0xff*/ +			memset(this->page_buf, 0xff, mtd->writesize); +			this->write_bufferram(mtd, 0, ONENAND_DATARAM, +				this->page_buf,	0, mtd->writesize); +		} + +		this->command(mtd, oobcmd, to, mtd->oobsize); + +		onenand_update_bufferram(mtd, to, 0); +		if (ONENAND_IS_2PLANE(this)) { +			ONENAND_SET_BUFFERRAM1(this); +			onenand_update_bufferram(mtd, to + this->writesize, 0); +		} + +		ret = this->wait(mtd, FL_WRITING); +		if (ret) { +			printk(KERN_ERR "onenand_write_oob_nolock: write failed %d\n", ret); +			break; +		} + +		ret = onenand_verify_oob(mtd, oobbuf, to); +		if (ret) { +			printk(KERN_ERR "onenand_write_oob_nolock: verify failed %d\n", ret); +			break; +		} + +		written += thislen; +		if (written == len) +			break; + +		to += mtd->writesize; +		buf += thislen; +		column = 0; +	} + +	ops->oobretlen = written; + +	return ret; +} + +/** + * onenand_write - [MTD Interface] compability function for onenand_write_ecc + * @param mtd		MTD device structure + * @param to		offset to write to + * @param len		number of bytes to write + * @param retlen	pointer to variable to store the number of written bytes + * @param buf		the data to write + * + * Write with ECC + */ +int onenand_write(struct mtd_info *mtd, loff_t to, size_t len, +		  size_t * retlen, const u_char * buf) +{ +	struct mtd_oob_ops ops = { +		.len    = len, +		.ooblen = 0, +		.datbuf = (u_char *) buf, +		.oobbuf = NULL, +	}; +	int ret; + +	onenand_get_device(mtd, FL_WRITING); +	ret = onenand_write_ops_nolock(mtd, to, &ops); +	onenand_release_device(mtd); + +	*retlen = ops.retlen; +	return ret; +} + +/** + * onenand_write_oob - [MTD Interface] OneNAND write out-of-band + * @param mtd		MTD device structure + * @param to		offset to write to + * @param ops		oob operation description structure + * + * OneNAND write main and/or out-of-band + */ +int onenand_write_oob(struct mtd_info *mtd, loff_t to, +			struct mtd_oob_ops *ops) +{ +	int ret; + +	switch (ops->mode) { +	case MTD_OPS_PLACE_OOB: +	case MTD_OPS_AUTO_OOB: +		break; +	case MTD_OPS_RAW: +		/* Not implemented yet */ +	default: +		return -EINVAL; +	} + +	onenand_get_device(mtd, FL_WRITING); +	if (ops->datbuf) +		ret = onenand_write_ops_nolock(mtd, to, ops); +	else +		ret = onenand_write_oob_nolock(mtd, to, ops); +	onenand_release_device(mtd); + +	return ret; + +} + +/** + * onenand_block_isbad_nolock - [GENERIC] Check if a block is marked bad + * @param mtd		MTD device structure + * @param ofs		offset from device start + * @param allowbbt	1, if its allowed to access the bbt area + * + * Check, if the block is bad, Either by reading the bad block table or + * calling of the scan function. + */ +static int onenand_block_isbad_nolock(struct mtd_info *mtd, loff_t ofs, int allowbbt) +{ +	struct onenand_chip *this = mtd->priv; +	struct bbm_info *bbm = this->bbm; + +	/* Return info from the table */ +	return bbm->isbad_bbt(mtd, ofs, allowbbt); +} + + +/** + * onenand_erase - [MTD Interface] erase block(s) + * @param mtd		MTD device structure + * @param instr		erase instruction + * + * Erase one ore more blocks + */ +int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) +{ +	struct onenand_chip *this = mtd->priv; +	unsigned int block_size; +	loff_t addr = instr->addr; +	unsigned int len = instr->len; +	int ret = 0, i; +	struct mtd_erase_region_info *region = NULL; +	unsigned int region_end = 0; + +	MTDDEBUG(MTD_DEBUG_LEVEL3, "onenand_erase: start = 0x%08x, len = %i\n", +			(unsigned int) addr, len); + +	if (FLEXONENAND(this)) { +		/* Find the eraseregion of this address */ +		i = flexonenand_region(mtd, addr); +		region = &mtd->eraseregions[i]; + +		block_size = region->erasesize; +		region_end = region->offset +			+ region->erasesize * region->numblocks; + +		/* Start address within region must align on block boundary. +		 * Erase region's start offset is always block start address. +		 */ +		if (unlikely((addr - region->offset) & (block_size - 1))) { +			MTDDEBUG(MTD_DEBUG_LEVEL0, "onenand_erase:" +				" Unaligned address\n"); +			return -EINVAL; +		} +	} else { +		block_size = 1 << this->erase_shift; + +		/* Start address must align on block boundary */ +		if (unlikely(addr & (block_size - 1))) { +			MTDDEBUG(MTD_DEBUG_LEVEL0, "onenand_erase:" +						"Unaligned address\n"); +			return -EINVAL; +		} +	} + +	/* Length must align on block boundary */ +	if (unlikely(len & (block_size - 1))) { +		MTDDEBUG (MTD_DEBUG_LEVEL0, +			 "onenand_erase: Length not block aligned\n"); +		return -EINVAL; +	} + +	/* Grab the lock and see if the device is available */ +	onenand_get_device(mtd, FL_ERASING); + +	/* Loop throught the pages */ +	instr->state = MTD_ERASING; + +	while (len) { + +		/* Check if we have a bad block, we do not erase bad blocks */ +		if (instr->priv == 0 && onenand_block_isbad_nolock(mtd, addr, 0)) { +			printk(KERN_WARNING "onenand_erase: attempt to erase" +				" a bad block at addr 0x%08x\n", +				(unsigned int) addr); +			instr->state = MTD_ERASE_FAILED; +			goto erase_exit; +		} + +		this->command(mtd, ONENAND_CMD_ERASE, addr, block_size); + +		onenand_invalidate_bufferram(mtd, addr, block_size); + +		ret = this->wait(mtd, FL_ERASING); +		/* Check, if it is write protected */ +		if (ret) { +			if (ret == -EPERM) +				MTDDEBUG (MTD_DEBUG_LEVEL0, "onenand_erase: " +					  "Device is write protected!!!\n"); +			else +				MTDDEBUG (MTD_DEBUG_LEVEL0, "onenand_erase: " +					  "Failed erase, block %d\n", +					onenand_block(this, addr)); +			instr->state = MTD_ERASE_FAILED; +			instr->fail_addr = addr; + +			goto erase_exit; +		} + +		len -= block_size; +		addr += block_size; + +		if (addr == region_end) { +			if (!len) +				break; +			region++; + +			block_size = region->erasesize; +			region_end = region->offset +				+ region->erasesize * region->numblocks; + +			if (len & (block_size - 1)) { +				/* This has been checked at MTD +				 * partitioning level. */ +				printk("onenand_erase: Unaligned address\n"); +				goto erase_exit; +			} +		} +	} + +	instr->state = MTD_ERASE_DONE; + +erase_exit: + +	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; +	/* Do call back function */ +	if (!ret) +		mtd_erase_callback(instr); + +	/* Deselect and wake up anyone waiting on the device */ +	onenand_release_device(mtd); + +	return ret; +} + +/** + * onenand_sync - [MTD Interface] sync + * @param mtd		MTD device structure + * + * Sync is actually a wait for chip ready function + */ +void onenand_sync(struct mtd_info *mtd) +{ +	MTDDEBUG (MTD_DEBUG_LEVEL3, "onenand_sync: called\n"); + +	/* Grab the lock and see if the device is available */ +	onenand_get_device(mtd, FL_SYNCING); + +	/* Release it and go back */ +	onenand_release_device(mtd); +} + +/** + * onenand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad + * @param mtd		MTD device structure + * @param ofs		offset relative to mtd start + * + * Check whether the block is bad + */ +int onenand_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ +	int ret; + +	/* Check for invalid offset */ +	if (ofs > mtd->size) +		return -EINVAL; + +	onenand_get_device(mtd, FL_READING); +	ret = onenand_block_isbad_nolock(mtd,ofs, 0); +	onenand_release_device(mtd); +	return ret; +} + +/** + * onenand_default_block_markbad - [DEFAULT] mark a block bad + * @param mtd           MTD device structure + * @param ofs           offset from device start + * + * This is the default implementation, which can be overridden by + * a hardware specific driver. + */ +static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	struct onenand_chip *this = mtd->priv; +	struct bbm_info *bbm = this->bbm; +	u_char buf[2] = {0, 0}; +	struct mtd_oob_ops ops = { +		.mode = MTD_OPS_PLACE_OOB, +		.ooblen = 2, +		.oobbuf = buf, +		.ooboffs = 0, +	}; +	int block; + +	/* Get block number */ +	block = onenand_block(this, ofs); +	if (bbm->bbt) +		bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); + +	/* We write two bytes, so we dont have to mess with 16 bit access */ +	ofs += mtd->oobsize + (bbm->badblockpos & ~0x01); +	return onenand_write_oob_nolock(mtd, ofs, &ops); +} + +/** + * onenand_block_markbad - [MTD Interface] Mark the block at the given offset as bad + * @param mtd		MTD device structure + * @param ofs		offset relative to mtd start + * + * Mark the block as bad + */ +int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ +	int ret; + +	ret = onenand_block_isbad(mtd, ofs); +	if (ret) { +		/* If it was bad already, return success and do nothing */ +		if (ret > 0) +			return 0; +		return ret; +	} + +	ret = mtd_block_markbad(mtd, ofs); +	return ret; +} + +/** + * onenand_do_lock_cmd - [OneNAND Interface] Lock or unlock block(s) + * @param mtd           MTD device structure + * @param ofs           offset relative to mtd start + * @param len           number of bytes to lock or unlock + * @param cmd           lock or unlock command + * + * Lock or unlock one or more blocks + */ +static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int cmd) +{ +	struct onenand_chip *this = mtd->priv; +	int start, end, block, value, status; + +	start = onenand_block(this, ofs); +	end = onenand_block(this, ofs + len); + +	/* Continuous lock scheme */ +	if (this->options & ONENAND_HAS_CONT_LOCK) { +		/* Set start block address */ +		this->write_word(start, +				 this->base + ONENAND_REG_START_BLOCK_ADDRESS); +		/* Set end block address */ +		this->write_word(end - 1, +				 this->base + ONENAND_REG_END_BLOCK_ADDRESS); +		/* Write unlock command */ +		this->command(mtd, cmd, 0, 0); + +		/* There's no return value */ +		this->wait(mtd, FL_UNLOCKING); + +		/* Sanity check */ +		while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS) +		       & ONENAND_CTRL_ONGO) +			continue; + +		/* Check lock status */ +		status = this->read_word(this->base + ONENAND_REG_WP_STATUS); +		if (!(status & ONENAND_WP_US)) +			printk(KERN_ERR "wp status = 0x%x\n", status); + +		return 0; +	} + +	/* Block lock scheme */ +	for (block = start; block < end; block++) { +		/* Set block address */ +		value = onenand_block_address(this, block); +		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); +		/* Select DataRAM for DDP */ +		value = onenand_bufferram_address(this, block); +		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); + +		/* Set start block address */ +		this->write_word(block, +				 this->base + ONENAND_REG_START_BLOCK_ADDRESS); +		/* Write unlock command */ +		this->command(mtd, ONENAND_CMD_UNLOCK, 0, 0); + +		/* There's no return value */ +		this->wait(mtd, FL_UNLOCKING); + +		/* Sanity check */ +		while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS) +		       & ONENAND_CTRL_ONGO) +			continue; + +		/* Check lock status */ +		status = this->read_word(this->base + ONENAND_REG_WP_STATUS); +		if (!(status & ONENAND_WP_US)) +			printk(KERN_ERR "block = %d, wp status = 0x%x\n", +			       block, status); +	} + +	return 0; +} + +#ifdef ONENAND_LINUX +/** + * onenand_lock - [MTD Interface] Lock block(s) + * @param mtd           MTD device structure + * @param ofs           offset relative to mtd start + * @param len           number of bytes to unlock + * + * Lock one or more blocks + */ +static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len) +{ +	int ret; + +	onenand_get_device(mtd, FL_LOCKING); +	ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK); +	onenand_release_device(mtd); +	return ret; +} + +/** + * onenand_unlock - [MTD Interface] Unlock block(s) + * @param mtd           MTD device structure + * @param ofs           offset relative to mtd start + * @param len           number of bytes to unlock + * + * Unlock one or more blocks + */ +static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len) +{ +	int ret; + +	onenand_get_device(mtd, FL_LOCKING); +	ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK); +	onenand_release_device(mtd); +	return ret; +} +#endif + +/** + * onenand_check_lock_status - [OneNAND Interface] Check lock status + * @param this          onenand chip data structure + * + * Check lock status + */ +static int onenand_check_lock_status(struct onenand_chip *this) +{ +	unsigned int value, block, status; +	unsigned int end; + +	end = this->chipsize >> this->erase_shift; +	for (block = 0; block < end; block++) { +		/* Set block address */ +		value = onenand_block_address(this, block); +		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); +		/* Select DataRAM for DDP */ +		value = onenand_bufferram_address(this, block); +		this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2); +		/* Set start block address */ +		this->write_word(block, this->base + ONENAND_REG_START_BLOCK_ADDRESS); + +		/* Check lock status */ +		status = this->read_word(this->base + ONENAND_REG_WP_STATUS); +		if (!(status & ONENAND_WP_US)) { +			printk(KERN_ERR "block = %d, wp status = 0x%x\n", block, status); +			return 0; +		} +	} + +	return 1; +} + +/** + * onenand_unlock_all - [OneNAND Interface] unlock all blocks + * @param mtd           MTD device structure + * + * Unlock all blocks + */ +static void onenand_unlock_all(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	loff_t ofs = 0; +	size_t len = mtd->size; + +	if (this->options & ONENAND_HAS_UNLOCK_ALL) { +		/* Set start block address */ +		this->write_word(0, this->base + ONENAND_REG_START_BLOCK_ADDRESS); +		/* Write unlock command */ +		this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0); + +		/* There's no return value */ +		this->wait(mtd, FL_LOCKING); + +		/* Sanity check */ +		while (this->read_word(this->base + ONENAND_REG_CTRL_STATUS) +				& ONENAND_CTRL_ONGO) +			continue; + +		/* Check lock status */ +		if (onenand_check_lock_status(this)) +			return; + +		/* Workaround for all block unlock in DDP */ +		if (ONENAND_IS_DDP(this) && !FLEXONENAND(this)) { +			/* All blocks on another chip */ +			ofs = this->chipsize >> 1; +			len = this->chipsize >> 1; +		} +	} + +	onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK); +} + + +/** + * onenand_check_features - Check and set OneNAND features + * @param mtd           MTD data structure + * + * Check and set OneNAND features + * - lock scheme + * - two plane + */ +static void onenand_check_features(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	unsigned int density, process; + +	/* Lock scheme depends on density and process */ +	density = onenand_get_density(this->device_id); +	process = this->version_id >> ONENAND_VERSION_PROCESS_SHIFT; + +	/* Lock scheme */ +	switch (density) { +	case ONENAND_DEVICE_DENSITY_4Gb: +		if (ONENAND_IS_DDP(this)) +			this->options |= ONENAND_HAS_2PLANE; +		else +			this->options |= ONENAND_HAS_4KB_PAGE; + +	case ONENAND_DEVICE_DENSITY_2Gb: +		/* 2Gb DDP don't have 2 plane */ +		if (!ONENAND_IS_DDP(this)) +			this->options |= ONENAND_HAS_2PLANE; +		this->options |= ONENAND_HAS_UNLOCK_ALL; + +	case ONENAND_DEVICE_DENSITY_1Gb: +		/* A-Die has all block unlock */ +		if (process) +			this->options |= ONENAND_HAS_UNLOCK_ALL; +		break; + +	default: +		/* Some OneNAND has continuous lock scheme */ +		if (!process) +			this->options |= ONENAND_HAS_CONT_LOCK; +		break; +	} + +	if (ONENAND_IS_MLC(this)) +		this->options |= ONENAND_HAS_4KB_PAGE; + +	if (ONENAND_IS_4KB_PAGE(this)) +		this->options &= ~ONENAND_HAS_2PLANE; + +	if (FLEXONENAND(this)) { +		this->options &= ~ONENAND_HAS_CONT_LOCK; +		this->options |= ONENAND_HAS_UNLOCK_ALL; +	} + +	if (this->options & ONENAND_HAS_CONT_LOCK) +		printk(KERN_DEBUG "Lock scheme is Continuous Lock\n"); +	if (this->options & ONENAND_HAS_UNLOCK_ALL) +		printk(KERN_DEBUG "Chip support all block unlock\n"); +	if (this->options & ONENAND_HAS_2PLANE) +		printk(KERN_DEBUG "Chip has 2 plane\n"); +	if (this->options & ONENAND_HAS_4KB_PAGE) +		printk(KERN_DEBUG "Chip has 4KiB pagesize\n"); + +} + +/** + * onenand_print_device_info - Print device ID + * @param device        device ID + * + * Print device ID + */ +char *onenand_print_device_info(int device, int version) +{ +	int vcc, demuxed, ddp, density, flexonenand; +	char *dev_info = malloc(80); +	char *p = dev_info; + +	vcc = device & ONENAND_DEVICE_VCC_MASK; +	demuxed = device & ONENAND_DEVICE_IS_DEMUX; +	ddp = device & ONENAND_DEVICE_IS_DDP; +	density = onenand_get_density(device); +	flexonenand = device & DEVICE_IS_FLEXONENAND; +	p += sprintf(dev_info, "%s%sOneNAND%s %dMB %sV 16-bit (0x%02x)", +	       demuxed ? "" : "Muxed ", +	       flexonenand ? "Flex-" : "", +	       ddp ? "(DDP)" : "", +	       (16 << density), vcc ? "2.65/3.3" : "1.8", device); + +	sprintf(p, "\nOneNAND version = 0x%04x", version); +	printk("%s\n", dev_info); + +	return dev_info; +} + +static const struct onenand_manufacturers onenand_manuf_ids[] = { +	{ONENAND_MFR_NUMONYX, "Numonyx"}, +	{ONENAND_MFR_SAMSUNG, "Samsung"}, +}; + +/** + * onenand_check_maf - Check manufacturer ID + * @param manuf         manufacturer ID + * + * Check manufacturer ID + */ +static int onenand_check_maf(int manuf) +{ +	int size = ARRAY_SIZE(onenand_manuf_ids); +	int i; +#ifdef ONENAND_DEBUG +	char *name; +#endif + +	for (i = 0; i < size; i++) +		if (manuf == onenand_manuf_ids[i].id) +			break; + +#ifdef ONENAND_DEBUG +	if (i < size) +		name = onenand_manuf_ids[i].name; +	else +		name = "Unknown"; + +	printk(KERN_DEBUG "OneNAND Manufacturer: %s (0x%0x)\n", name, manuf); +#endif + +	return i == size; +} + +/** +* flexonenand_get_boundary	- Reads the SLC boundary +* @param onenand_info		- onenand info structure +* +* Fill up boundary[] field in onenand_chip +**/ +static int flexonenand_get_boundary(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	unsigned int die, bdry; +	int syscfg, locked; + +	/* Disable ECC */ +	syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1); +	this->write_word((syscfg | 0x0100), this->base + ONENAND_REG_SYS_CFG1); + +	for (die = 0; die < this->dies; die++) { +		this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0); +		this->wait(mtd, FL_SYNCING); + +		this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0); +		this->wait(mtd, FL_READING); + +		bdry = this->read_word(this->base + ONENAND_DATARAM); +		if ((bdry >> FLEXONENAND_PI_UNLOCK_SHIFT) == 3) +			locked = 0; +		else +			locked = 1; +		this->boundary[die] = bdry & FLEXONENAND_PI_MASK; + +		this->command(mtd, ONENAND_CMD_RESET, 0, 0); +		this->wait(mtd, FL_RESETING); + +		printk(KERN_INFO "Die %d boundary: %d%s\n", die, +		       this->boundary[die], locked ? "(Locked)" : "(Unlocked)"); +	} + +	/* Enable ECC */ +	this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); +	return 0; +} + +/** + * flexonenand_get_size - Fill up fields in onenand_chip and mtd_info + * 			  boundary[], diesize[], mtd->size, mtd->erasesize, + * 			  mtd->eraseregions + * @param mtd		- MTD device structure + */ +static void flexonenand_get_size(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	int die, i, eraseshift, density; +	int blksperdie, maxbdry; +	loff_t ofs; + +	density = onenand_get_density(this->device_id); +	blksperdie = ((loff_t)(16 << density) << 20) >> (this->erase_shift); +	blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0; +	maxbdry = blksperdie - 1; +	eraseshift = this->erase_shift - 1; + +	mtd->numeraseregions = this->dies << 1; + +	/* This fills up the device boundary */ +	flexonenand_get_boundary(mtd); +	die = 0; +	ofs = 0; +	i = -1; +	for (; die < this->dies; die++) { +		if (!die || this->boundary[die-1] != maxbdry) { +			i++; +			mtd->eraseregions[i].offset = ofs; +			mtd->eraseregions[i].erasesize = 1 << eraseshift; +			mtd->eraseregions[i].numblocks = +							this->boundary[die] + 1; +			ofs += mtd->eraseregions[i].numblocks << eraseshift; +			eraseshift++; +		} else { +			mtd->numeraseregions -= 1; +			mtd->eraseregions[i].numblocks += +							this->boundary[die] + 1; +			ofs += (this->boundary[die] + 1) << (eraseshift - 1); +		} +		if (this->boundary[die] != maxbdry) { +			i++; +			mtd->eraseregions[i].offset = ofs; +			mtd->eraseregions[i].erasesize = 1 << eraseshift; +			mtd->eraseregions[i].numblocks = maxbdry ^ +							 this->boundary[die]; +			ofs += mtd->eraseregions[i].numblocks << eraseshift; +			eraseshift--; +		} else +			mtd->numeraseregions -= 1; +	} + +	/* Expose MLC erase size except when all blocks are SLC */ +	mtd->erasesize = 1 << this->erase_shift; +	if (mtd->numeraseregions == 1) +		mtd->erasesize >>= 1; + +	printk(KERN_INFO "Device has %d eraseregions\n", mtd->numeraseregions); +	for (i = 0; i < mtd->numeraseregions; i++) +		printk(KERN_INFO "[offset: 0x%08llx, erasesize: 0x%05x," +			" numblocks: %04u]\n", mtd->eraseregions[i].offset, +			mtd->eraseregions[i].erasesize, +			mtd->eraseregions[i].numblocks); + +	for (die = 0, mtd->size = 0; die < this->dies; die++) { +		this->diesize[die] = (loff_t) (blksperdie << this->erase_shift); +		this->diesize[die] -= (loff_t) (this->boundary[die] + 1) +						 << (this->erase_shift - 1); +		mtd->size += this->diesize[die]; +	} +} + +/** + * flexonenand_check_blocks_erased - Check if blocks are erased + * @param mtd_info	- mtd info structure + * @param start		- first erase block to check + * @param end		- last erase block to check + * + * Converting an unerased block from MLC to SLC + * causes byte values to change. Since both data and its ECC + * have changed, reads on the block give uncorrectable error. + * This might lead to the block being detected as bad. + * + * Avoid this by ensuring that the block to be converted is + * erased. + */ +static int flexonenand_check_blocks_erased(struct mtd_info *mtd, +					int start, int end) +{ +	struct onenand_chip *this = mtd->priv; +	int i, ret; +	int block; +	struct mtd_oob_ops ops = { +		.mode = MTD_OPS_PLACE_OOB, +		.ooboffs = 0, +		.ooblen	= mtd->oobsize, +		.datbuf	= NULL, +		.oobbuf	= this->oob_buf, +	}; +	loff_t addr; + +	printk(KERN_DEBUG "Check blocks from %d to %d\n", start, end); + +	for (block = start; block <= end; block++) { +		addr = flexonenand_addr(this, block); +		if (onenand_block_isbad_nolock(mtd, addr, 0)) +			continue; + +		/* +		 * Since main area write results in ECC write to spare, +		 * it is sufficient to check only ECC bytes for change. +		 */ +		ret = onenand_read_oob_nolock(mtd, addr, &ops); +		if (ret) +			return ret; + +		for (i = 0; i < mtd->oobsize; i++) +			if (this->oob_buf[i] != 0xff) +				break; + +		if (i != mtd->oobsize) { +			printk(KERN_WARNING "Block %d not erased.\n", block); +			return 1; +		} +	} + +	return 0; +} + +/** + * flexonenand_set_boundary	- Writes the SLC boundary + * @param mtd			- mtd info structure + */ +int flexonenand_set_boundary(struct mtd_info *mtd, int die, +				    int boundary, int lock) +{ +	struct onenand_chip *this = mtd->priv; +	int ret, density, blksperdie, old, new, thisboundary; +	loff_t addr; + +	if (die >= this->dies) +		return -EINVAL; + +	if (boundary == this->boundary[die]) +		return 0; + +	density = onenand_get_density(this->device_id); +	blksperdie = ((16 << density) << 20) >> this->erase_shift; +	blksperdie >>= ONENAND_IS_DDP(this) ? 1 : 0; + +	if (boundary >= blksperdie) { +		printk("flexonenand_set_boundary:" +			"Invalid boundary value. " +			"Boundary not changed.\n"); +		return -EINVAL; +	} + +	/* Check if converting blocks are erased */ +	old = this->boundary[die] + (die * this->density_mask); +	new = boundary + (die * this->density_mask); +	ret = flexonenand_check_blocks_erased(mtd, min(old, new) +						+ 1, max(old, new)); +	if (ret) { +		printk(KERN_ERR "flexonenand_set_boundary: Please erase blocks before boundary change\n"); +		return ret; +	} + +	this->command(mtd, FLEXONENAND_CMD_PI_ACCESS, die, 0); +	this->wait(mtd, FL_SYNCING); + +	/* Check is boundary is locked */ +	this->command(mtd, FLEXONENAND_CMD_READ_PI, die, 0); +	ret = this->wait(mtd, FL_READING); + +	thisboundary = this->read_word(this->base + ONENAND_DATARAM); +	if ((thisboundary >> FLEXONENAND_PI_UNLOCK_SHIFT) != 3) { +		printk(KERN_ERR "flexonenand_set_boundary: boundary locked\n"); +		goto out; +	} + +	printk(KERN_INFO "flexonenand_set_boundary: Changing die %d boundary: %d%s\n", +			die, boundary, lock ? "(Locked)" : "(Unlocked)"); + +	boundary &= FLEXONENAND_PI_MASK; +	boundary |= lock ? 0 : (3 << FLEXONENAND_PI_UNLOCK_SHIFT); + +	addr = die ? this->diesize[0] : 0; +	this->command(mtd, ONENAND_CMD_ERASE, addr, 0); +	ret = this->wait(mtd, FL_ERASING); +	if (ret) { +		printk("flexonenand_set_boundary:" +			"Failed PI erase for Die %d\n", die); +		goto out; +	} + +	this->write_word(boundary, this->base + ONENAND_DATARAM); +	this->command(mtd, ONENAND_CMD_PROG, addr, 0); +	ret = this->wait(mtd, FL_WRITING); +	if (ret) { +		printk("flexonenand_set_boundary:" +			"Failed PI write for Die %d\n", die); +		goto out; +	} + +	this->command(mtd, FLEXONENAND_CMD_PI_UPDATE, die, 0); +	ret = this->wait(mtd, FL_WRITING); +out: +	this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_REG_COMMAND); +	this->wait(mtd, FL_RESETING); +	if (!ret) +		/* Recalculate device size on boundary change*/ +		flexonenand_get_size(mtd); + +	return ret; +} + +/** + * onenand_chip_probe - [OneNAND Interface] Probe the OneNAND chip + * @param mtd		MTD device structure + * + * OneNAND detection method: + *   Compare the the values from command with ones from register + */ +static int onenand_chip_probe(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	int bram_maf_id, bram_dev_id, maf_id, dev_id; +	int syscfg; + +	/* Save system configuration 1 */ +	syscfg = this->read_word(this->base + ONENAND_REG_SYS_CFG1); + +	/* Clear Sync. Burst Read mode to read BootRAM */ +	this->write_word((syscfg & ~ONENAND_SYS_CFG1_SYNC_READ), +			 this->base + ONENAND_REG_SYS_CFG1); + +	/* Send the command for reading device ID from BootRAM */ +	this->write_word(ONENAND_CMD_READID, this->base + ONENAND_BOOTRAM); + +	/* Read manufacturer and device IDs from BootRAM */ +	bram_maf_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x0); +	bram_dev_id = this->read_word(this->base + ONENAND_BOOTRAM + 0x2); + +	/* Reset OneNAND to read default register values */ +	this->write_word(ONENAND_CMD_RESET, this->base + ONENAND_BOOTRAM); + +	/* Wait reset */ +	this->wait(mtd, FL_RESETING); + +	/* Restore system configuration 1 */ +	this->write_word(syscfg, this->base + ONENAND_REG_SYS_CFG1); + +	/* Check manufacturer ID */ +	if (onenand_check_maf(bram_maf_id)) +		return -ENXIO; + +	/* Read manufacturer and device IDs from Register */ +	maf_id = this->read_word(this->base + ONENAND_REG_MANUFACTURER_ID); +	dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); + +	/* Check OneNAND device */ +	if (maf_id != bram_maf_id || dev_id != bram_dev_id) +		return -ENXIO; + +	return 0; +} + +/** + * onenand_probe - [OneNAND Interface] Probe the OneNAND device + * @param mtd		MTD device structure + * + * OneNAND detection method: + *   Compare the the values from command with ones from register + */ +int onenand_probe(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	int dev_id, ver_id; +	int density; +	int ret; + +	ret = this->chip_probe(mtd); +	if (ret) +		return ret; + +	/* Read device IDs from Register */ +	dev_id = this->read_word(this->base + ONENAND_REG_DEVICE_ID); +	ver_id = this->read_word(this->base + ONENAND_REG_VERSION_ID); +	this->technology = this->read_word(this->base + ONENAND_REG_TECHNOLOGY); + +	/* Flash device information */ +	mtd->name = onenand_print_device_info(dev_id, ver_id); +	this->device_id = dev_id; +	this->version_id = ver_id; + +	/* Check OneNAND features */ +	onenand_check_features(mtd); + +	density = onenand_get_density(dev_id); +	if (FLEXONENAND(this)) { +		this->dies = ONENAND_IS_DDP(this) ? 2 : 1; +		/* Maximum possible erase regions */ +		mtd->numeraseregions = this->dies << 1; +		mtd->eraseregions = malloc(sizeof(struct mtd_erase_region_info) +					* (this->dies << 1)); +		if (!mtd->eraseregions) +			return -ENOMEM; +	} + +	/* +	 * For Flex-OneNAND, chipsize represents maximum possible device size. +	 * mtd->size represents the actual device size. +	 */ +	this->chipsize = (16 << density) << 20; + +	/* OneNAND page size & block size */ +	/* The data buffer size is equal to page size */ +	mtd->writesize = +	    this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); +	/* We use the full BufferRAM */ +	if (ONENAND_IS_4KB_PAGE(this)) +		mtd->writesize <<= 1; + +	mtd->oobsize = mtd->writesize >> 5; +	/* Pagers per block is always 64 in OneNAND */ +	mtd->erasesize = mtd->writesize << 6; +	/* +	 * Flex-OneNAND SLC area has 64 pages per block. +	 * Flex-OneNAND MLC area has 128 pages per block. +	 * Expose MLC erase size to find erase_shift and page_mask. +	 */ +	if (FLEXONENAND(this)) +		mtd->erasesize <<= 1; + +	this->erase_shift = ffs(mtd->erasesize) - 1; +	this->page_shift = ffs(mtd->writesize) - 1; +	this->ppb_shift = (this->erase_shift - this->page_shift); +	this->page_mask = (mtd->erasesize / mtd->writesize) - 1; +	/* Set density mask. it is used for DDP */ +	if (ONENAND_IS_DDP(this)) +		this->density_mask = this->chipsize >> (this->erase_shift + 1); +	/* It's real page size */ +	this->writesize = mtd->writesize; + +	/* REVIST: Multichip handling */ + +	if (FLEXONENAND(this)) +		flexonenand_get_size(mtd); +	else +		mtd->size = this->chipsize; + +	mtd->flags = MTD_CAP_NANDFLASH; +	mtd->_erase = onenand_erase; +	mtd->_read = onenand_read; +	mtd->_write = onenand_write; +	mtd->_read_oob = onenand_read_oob; +	mtd->_write_oob = onenand_write_oob; +	mtd->_sync = onenand_sync; +	mtd->_block_isbad = onenand_block_isbad; +	mtd->_block_markbad = onenand_block_markbad; + +	return 0; +} + +/** + * onenand_scan - [OneNAND Interface] Scan for the OneNAND device + * @param mtd		MTD device structure + * @param maxchips	Number of chips to scan for + * + * This fills out all the not initialized function pointers + * with the defaults. + * The flash ID is read and the mtd/chip structures are + * filled with the appropriate values. + */ +int onenand_scan(struct mtd_info *mtd, int maxchips) +{ +	int i; +	struct onenand_chip *this = mtd->priv; + +	if (!this->read_word) +		this->read_word = onenand_readw; +	if (!this->write_word) +		this->write_word = onenand_writew; + +	if (!this->command) +		this->command = onenand_command; +	if (!this->wait) +		this->wait = onenand_wait; +	if (!this->bbt_wait) +		this->bbt_wait = onenand_bbt_wait; + +	if (!this->read_bufferram) +		this->read_bufferram = onenand_read_bufferram; +	if (!this->write_bufferram) +		this->write_bufferram = onenand_write_bufferram; + +	if (!this->chip_probe) +		this->chip_probe = onenand_chip_probe; + +	if (!this->block_markbad) +		this->block_markbad = onenand_default_block_markbad; +	if (!this->scan_bbt) +		this->scan_bbt = onenand_default_bbt; + +	if (onenand_probe(mtd)) +		return -ENXIO; + +	/* Set Sync. Burst Read after probing */ +	if (this->mmcontrol) { +		printk(KERN_INFO "OneNAND Sync. Burst Read support\n"); +		this->read_bufferram = onenand_sync_read_bufferram; +	} + +	/* Allocate buffers, if necessary */ +	if (!this->page_buf) { +		this->page_buf = kzalloc(mtd->writesize, GFP_KERNEL); +		if (!this->page_buf) { +			printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n"); +			return -ENOMEM; +		} +		this->options |= ONENAND_PAGEBUF_ALLOC; +	} +	if (!this->oob_buf) { +		this->oob_buf = kzalloc(mtd->oobsize, GFP_KERNEL); +		if (!this->oob_buf) { +			printk(KERN_ERR "onenand_scan: Can't allocate oob_buf\n"); +			if (this->options & ONENAND_PAGEBUF_ALLOC) { +				this->options &= ~ONENAND_PAGEBUF_ALLOC; +				kfree(this->page_buf); +			} +			return -ENOMEM; +		} +		this->options |= ONENAND_OOBBUF_ALLOC; +	} + +	this->state = FL_READY; + +	/* +	 * Allow subpage writes up to oobsize. +	 */ +	switch (mtd->oobsize) { +	case 128: +		this->ecclayout = &onenand_oob_128; +		mtd->subpage_sft = 0; +		break; + +	case 64: +		this->ecclayout = &onenand_oob_64; +		mtd->subpage_sft = 2; +		break; + +	case 32: +		this->ecclayout = &onenand_oob_32; +		mtd->subpage_sft = 1; +		break; + +	default: +		printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n", +			mtd->oobsize); +		mtd->subpage_sft = 0; +		/* To prevent kernel oops */ +		this->ecclayout = &onenand_oob_32; +		break; +	} + +	this->subpagesize = mtd->writesize >> mtd->subpage_sft; + +	/* +	 * The number of bytes available for a client to place data into +	 * the out of band area +	 */ +	this->ecclayout->oobavail = 0; + +	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE && +	    this->ecclayout->oobfree[i].length; i++) +		this->ecclayout->oobavail += +			this->ecclayout->oobfree[i].length; +	mtd->oobavail = this->ecclayout->oobavail; + +	mtd->ecclayout = this->ecclayout; + +	/* Unlock whole block */ +	onenand_unlock_all(mtd); + +	return this->scan_bbt(mtd); +} + +/** + * onenand_release - [OneNAND Interface] Free resources held by the OneNAND device + * @param mtd		MTD device structure + */ +void onenand_release(struct mtd_info *mtd) +{ +} diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_bbt.c b/roms/u-boot/drivers/mtd/onenand/onenand_bbt.c new file mode 100644 index 00000000..0267c2c5 --- /dev/null +++ b/roms/u-boot/drivers/mtd/onenand/onenand_bbt.c @@ -0,0 +1,266 @@ +/* + *  linux/drivers/mtd/onenand/onenand_bbt.c + * + *  Bad Block Table support for the OneNAND driver + * + *  Copyright(c) 2005-2008 Samsung Electronics + *  Kyungmin Park <kyungmin.park@samsung.com> + * + *  TODO: + *    Split BBT core and chip specific BBT. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <common.h> +#include <linux/compat.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/onenand.h> +#include <malloc.h> + +#include <asm/errno.h> + +/** + * check_short_pattern - [GENERIC] check if a pattern is in the buffer + * @param buf		the buffer to search + * @param len		the length of buffer to search + * @param paglen	the pagelength + * @param td		search pattern descriptor + * + * Check for a pattern at the given place. Used to search bad block + * tables and good / bad block identifiers. Same as check_pattern, but + * no optional empty check and the pattern is expected to start + * at offset 0. + */ +static int check_short_pattern(uint8_t * buf, int len, int paglen, +			       struct nand_bbt_descr *td) +{ +	int i; +	uint8_t *p = buf; + +	/* Compare the pattern */ +	for (i = 0; i < td->len; i++) { +		if (p[i] != td->pattern[i]) +			return -1; +	} +	return 0; +} + +/** + * create_bbt - [GENERIC] Create a bad block table by scanning the device + * @param mtd		MTD device structure + * @param buf		temporary buffer + * @param bd		descriptor for the good/bad block search pattern + * @param chip		create the table for a specific chip, -1 read all chips. + *              Applies only if NAND_BBT_PERCHIP option is set + * + * Create a bad block table by scanning the device + * for the given good/bad block identify pattern + */ +static int create_bbt(struct mtd_info *mtd, uint8_t * buf, +		      struct nand_bbt_descr *bd, int chip) +{ +	struct onenand_chip *this = mtd->priv; +	struct bbm_info *bbm = this->bbm; +	int i, j, numblocks, len, scanlen; +	int startblock; +	loff_t from; +	size_t readlen, ooblen; +	struct mtd_oob_ops ops; +	int rgn; + +	printk(KERN_INFO "Scanning device for bad blocks\n"); + +	len = 1; + +	/* We need only read few bytes from the OOB area */ +	scanlen = ooblen = 0; +	readlen = bd->len; + +	/* chip == -1 case only */ +	/* Note that numblocks is 2 * (real numblocks) here; +	 * see i += 2 below as it makses shifting and masking less painful +	 */ +	numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1); +	startblock = 0; +	from = 0; + +	ops.mode = MTD_OPS_PLACE_OOB; +	ops.ooblen = readlen; +	ops.oobbuf = buf; +	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; + +	for (i = startblock; i < numblocks;) { +		int ret; + +		for (j = 0; j < len; j++) { +			/* No need to read pages fully, +			 * just read required OOB bytes */ +			ret = onenand_bbt_read_oob(mtd, +					     from + j * mtd->writesize + +					     bd->offs, &ops); + +			/* If it is a initial bad block, just ignore it */ +			if (ret == ONENAND_BBT_READ_FATAL_ERROR) +				return -EIO; + +			if (ret || check_short_pattern +			    (&buf[j * scanlen], scanlen, mtd->writesize, bd)) { +				bbm->bbt[i >> 3] |= 0x03 << (i & 0x6); +				printk(KERN_WARNING +				       "Bad eraseblock %d at 0x%08x\n", i >> 1, +				       (unsigned int)from); +				break; +			} +		} +		i += 2; + +		if (FLEXONENAND(this)) { +			rgn = flexonenand_region(mtd, from); +			from += mtd->eraseregions[rgn].erasesize; +		} else +			from += (1 << bbm->bbt_erase_shift); +	} + +	return 0; +} + +/** + * onenand_memory_bbt - [GENERIC] create a memory based bad block table + * @param mtd		MTD device structure + * @param bd		descriptor for the good/bad block search pattern + * + * The function creates a memory based bbt by scanning the device + * for manufacturer / software marked good / bad blocks + */ +static inline int onenand_memory_bbt(struct mtd_info *mtd, +				     struct nand_bbt_descr *bd) +{ +	unsigned char data_buf[MAX_ONENAND_PAGESIZE]; + +	bd->options &= ~NAND_BBT_SCANEMPTY; +	return create_bbt(mtd, data_buf, bd, -1); +} + +/** + * onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad + * @param mtd		MTD device structure + * @param offs		offset in the device + * @param allowbbt	allow access to bad block table region + */ +static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt) +{ +	struct onenand_chip *this = mtd->priv; +	struct bbm_info *bbm = this->bbm; +	int block; +	uint8_t res; + +	/* Get block number * 2 */ +	block = (int) (onenand_block(this, offs) << 1); +	res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03; + +	MTDDEBUG (MTD_DEBUG_LEVEL2, +		"onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n", +		(unsigned int)offs, block >> 1, res); + +	switch ((int)res) { +	case 0x00: +		return 0; +	case 0x01: +		return 1; +	case 0x02: +		return allowbbt ? 0 : 1; +	} + +	return 1; +} + +/** + * onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s) + * @param mtd		MTD device structure + * @param bd		descriptor for the good/bad block search pattern + * + * The function checks, if a bad block table(s) is/are already + * available. If not it scans the device for manufacturer + * marked good / bad blocks and writes the bad block table(s) to + * the selected place. + * + * The bad block table memory is allocated here. It must be freed + * by calling the onenand_free_bbt function. + * + */ +int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd) +{ +	struct onenand_chip *this = mtd->priv; +	struct bbm_info *bbm = this->bbm; +	int len, ret = 0; + +	len = this->chipsize >> (this->erase_shift + 2); +	/* Allocate memory (2bit per block) */ +	bbm->bbt = malloc(len); +	if (!bbm->bbt) +		return -ENOMEM; +	/* Clear the memory bad block table */ +	memset(bbm->bbt, 0x00, len); + +	/* Set the bad block position */ +	bbm->badblockpos = ONENAND_BADBLOCK_POS; + +	/* Set erase shift */ +	bbm->bbt_erase_shift = this->erase_shift; + +	if (!bbm->isbad_bbt) +		bbm->isbad_bbt = onenand_isbad_bbt; + +	/* Scan the device to build a memory based bad block table */ +	if ((ret = onenand_memory_bbt(mtd, bd))) { +		printk(KERN_ERR +		       "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n"); +		free(bbm->bbt); +		bbm->bbt = NULL; +	} + +	return ret; +} + +/* + * Define some generic bad / good block scan pattern which are used + * while scanning a device for factory marked good / bad blocks. + */ +static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; + +static struct nand_bbt_descr largepage_memorybased = { +	.options = 0, +	.offs = 0, +	.len = 2, +	.pattern = scan_ff_pattern, +}; + +/** + * onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device + * @param mtd		MTD device structure + * + * This function selects the default bad block table + * support for the device and calls the onenand_scan_bbt function + */ +int onenand_default_bbt(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	struct bbm_info *bbm; + +	this->bbm = malloc(sizeof(struct bbm_info)); +	if (!this->bbm) +		return -ENOMEM; + +	bbm = this->bbm; + +	memset(bbm, 0, sizeof(struct bbm_info)); + +	/* 1KB page has same configuration as 2KB page */ +	if (!bbm->badblock_pattern) +		bbm->badblock_pattern = &largepage_memorybased; + +	return onenand_scan_bbt(mtd, bbm->badblock_pattern); +} diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_spl.c b/roms/u-boot/drivers/mtd/onenand/onenand_spl.c new file mode 100644 index 00000000..fe6b7d92 --- /dev/null +++ b/roms/u-boot/drivers/mtd/onenand/onenand_spl.c @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> + * + * Based on code: + *	Copyright (C) 2005-2009 Samsung Electronics + *	Kyungmin Park <kyungmin.park@samsung.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <linux/mtd/onenand_regs.h> +#include <onenand_uboot.h> + +/* + * Device geometry: + * - 2048b page, 128k erase block. + * - 4096b page, 256k erase block. + */ +enum onenand_spl_pagesize { +	PAGE_2K = 2048, +	PAGE_4K = 4096, +}; + +#define ONENAND_PAGES_PER_BLOCK			64 +#define onenand_block_address(block)		(block) +#define onenand_sector_address(page)		(page << 2) +#define onenand_buffer_address()		((1 << 3) << 8) +#define onenand_bufferram_address(block)	(0) + +static inline uint16_t onenand_readw(uint32_t addr) +{ +	return readw(CONFIG_SYS_ONENAND_BASE + addr); +} + +static inline void onenand_writew(uint16_t value, uint32_t addr) +{ +	writew(value, CONFIG_SYS_ONENAND_BASE + addr); +} + +static enum onenand_spl_pagesize onenand_spl_get_geometry(void) +{ +	uint32_t dev_id, density; + +	if (!onenand_readw(ONENAND_REG_TECHNOLOGY)) { +		dev_id = onenand_readw(ONENAND_REG_DEVICE_ID); +		density = dev_id >> ONENAND_DEVICE_DENSITY_SHIFT; +		density &= ONENAND_DEVICE_DENSITY_MASK; + +		if (density < ONENAND_DEVICE_DENSITY_4Gb) +			return PAGE_2K; + +		if (dev_id & ONENAND_DEVICE_IS_DDP) +			return PAGE_2K; +	} + +	return PAGE_4K; +} + +static int onenand_spl_read_page(uint32_t block, uint32_t page, uint32_t *buf, +					enum onenand_spl_pagesize pagesize) +{ +	const uint32_t addr = CONFIG_SYS_ONENAND_BASE + ONENAND_DATARAM; +	uint32_t offset; + +	onenand_writew(onenand_block_address(block), +			ONENAND_REG_START_ADDRESS1); + +	onenand_writew(onenand_bufferram_address(block), +			ONENAND_REG_START_ADDRESS2); + +	onenand_writew(onenand_sector_address(page), +			ONENAND_REG_START_ADDRESS8); + +	onenand_writew(onenand_buffer_address(), +			ONENAND_REG_START_BUFFER); + +	onenand_writew(ONENAND_INT_CLEAR, ONENAND_REG_INTERRUPT); + +	onenand_writew(ONENAND_CMD_READ, ONENAND_REG_COMMAND); + +	while (!(onenand_readw(ONENAND_REG_INTERRUPT) & ONENAND_INT_READ)) +		continue; + +	/* Check for invalid block mark */ +	if (page < 2 && (onenand_readw(ONENAND_SPARERAM) != 0xffff)) +		return 1; + +	for (offset = 0; offset < pagesize; offset += 4) +		buf[offset / 4] = readl(addr + offset); + +	return 0; +} + +void onenand_spl_load_image(uint32_t offs, uint32_t size, void *dst) +{ +	uint32_t *addr = (uint32_t *)dst; +	uint32_t to_page; +	uint32_t block; +	uint32_t page, rpage; +	enum onenand_spl_pagesize pagesize; +	int ret; + +	pagesize = onenand_spl_get_geometry(); + +	/* +	 * The page can be either 2k or 4k, avoid using DIV_ROUND_UP to avoid +	 * pulling further unwanted functions into the SPL. +	 */ +	if (pagesize == 2048) { +		page = offs / 2048; +		to_page = page + DIV_ROUND_UP(size, 2048); +	} else { +		page = offs / 4096; +		to_page = page + DIV_ROUND_UP(size, 4096); +	} + +	for (; page <= to_page; page++) { +		block = page / ONENAND_PAGES_PER_BLOCK; +		rpage = page & (ONENAND_PAGES_PER_BLOCK - 1); +		ret = onenand_spl_read_page(block, rpage, addr, pagesize); +		if (ret) +			page += ONENAND_PAGES_PER_BLOCK - 1; +		else +			addr += pagesize / 4; +	} +} diff --git a/roms/u-boot/drivers/mtd/onenand/onenand_uboot.c b/roms/u-boot/drivers/mtd/onenand/onenand_uboot.c new file mode 100644 index 00000000..ae60c3bb --- /dev/null +++ b/roms/u-boot/drivers/mtd/onenand/onenand_uboot.c @@ -0,0 +1,56 @@ +/* + *  drivers/mtd/onenand/onenand_uboot.c + * + *  Copyright (C) 2005-2008 Samsung Electronics + *  Kyungmin Park <kyungmin.park@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* + * OneNAND initialization at U-Boot + */ + +#include <common.h> +#include <linux/compat.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/onenand.h> + +struct mtd_info onenand_mtd; +struct onenand_chip onenand_chip; +static __attribute__((unused)) char dev_name[] = "onenand0"; + +void onenand_init(void) +{ +	memset(&onenand_mtd, 0, sizeof(struct mtd_info)); +	memset(&onenand_chip, 0, sizeof(struct onenand_chip)); + +	onenand_mtd.priv = &onenand_chip; + +#ifdef CONFIG_USE_ONENAND_BOARD_INIT +	/* +	 * It's used for some board init required +	 */ +	onenand_board_init(&onenand_mtd); +#else +	onenand_chip.base = (void *) CONFIG_SYS_ONENAND_BASE; +#endif + +	onenand_scan(&onenand_mtd, 1); + +	if (onenand_chip.device_id & DEVICE_IS_FLEXONENAND) +		puts("Flex-"); +	puts("OneNAND: "); +	print_size(onenand_chip.chipsize, "\n"); + +#ifdef CONFIG_MTD_DEVICE +	/* +	 * Add MTD device so that we can reference it later +	 * via the mtdcore infrastructure (e.g. ubi). +	 */ +	onenand_mtd.name = dev_name; +	add_mtd_device(&onenand_mtd); +#endif +} diff --git a/roms/u-boot/drivers/mtd/onenand/samsung.c b/roms/u-boot/drivers/mtd/onenand/samsung.c new file mode 100644 index 00000000..df04c2bb --- /dev/null +++ b/roms/u-boot/drivers/mtd/onenand/samsung.c @@ -0,0 +1,577 @@ +/* + * S5PC100 OneNAND driver at U-Boot + * + * Copyright (C) 2008-2009 Samsung Electronics + * Kyungmin Park <kyungmin.park@samsung.com> + * + * Implementation: + *	Emulate the pseudo BufferRAM + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <malloc.h> +#include <linux/compat.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/onenand.h> +#include <linux/mtd/samsung_onenand.h> + +#include <asm/io.h> +#include <asm/errno.h> + +#ifdef ONENAND_DEBUG +#define DPRINTK(format, args...)					\ +do {									\ +	printf("%s[%d]: " format "\n", __func__, __LINE__, ##args);	\ +} while (0) +#else +#define DPRINTK(...)			do { } while (0) +#endif + +#define ONENAND_ERASE_STATUS		0x00 +#define ONENAND_MULTI_ERASE_SET		0x01 +#define ONENAND_ERASE_START		0x03 +#define ONENAND_UNLOCK_START		0x08 +#define ONENAND_UNLOCK_END		0x09 +#define ONENAND_LOCK_START		0x0A +#define ONENAND_LOCK_END		0x0B +#define ONENAND_LOCK_TIGHT_START	0x0C +#define ONENAND_LOCK_TIGHT_END		0x0D +#define ONENAND_UNLOCK_ALL		0x0E +#define ONENAND_OTP_ACCESS		0x12 +#define ONENAND_SPARE_ACCESS_ONLY	0x13 +#define ONENAND_MAIN_ACCESS_ONLY	0x14 +#define ONENAND_ERASE_VERIFY		0x15 +#define ONENAND_MAIN_SPARE_ACCESS	0x16 +#define ONENAND_PIPELINE_READ		0x4000 + +#if defined(CONFIG_S5P) +#define MAP_00				(0x0 << 26) +#define MAP_01				(0x1 << 26) +#define MAP_10				(0x2 << 26) +#define MAP_11				(0x3 << 26) +#endif + +/* read/write of XIP buffer */ +#define CMD_MAP_00(mem_addr)		(MAP_00 | ((mem_addr) << 1)) +/* read/write to the memory device */ +#define CMD_MAP_01(mem_addr)		(MAP_01 | (mem_addr)) +/* control special functions of the memory device */ +#define CMD_MAP_10(mem_addr)		(MAP_10 | (mem_addr)) +/* direct interface(direct access) with the memory device */ +#define CMD_MAP_11(mem_addr)		(MAP_11 | ((mem_addr) << 2)) + +struct s3c_onenand { +	struct mtd_info	*mtd; +	void __iomem	*base; +	void __iomem	*ahb_addr; +	int		bootram_command; +	void __iomem	*page_buf; +	void __iomem	*oob_buf; +	unsigned int	(*mem_addr)(int fba, int fpa, int fsa); +	struct samsung_onenand *reg; +}; + +static struct s3c_onenand *onenand; + +static int s3c_read_cmd(unsigned int cmd) +{ +	return readl(onenand->ahb_addr + cmd); +} + +static void s3c_write_cmd(int value, unsigned int cmd) +{ +	writel(value, onenand->ahb_addr + cmd); +} + +/* + * MEM_ADDR + * + * fba: flash block address + * fpa: flash page address + * fsa: flash sector address + * + * return the buffer address on the memory device + * It will be combined with CMD_MAP_XX + */ +#if defined(CONFIG_S5P) +static unsigned int s3c_mem_addr(int fba, int fpa, int fsa) +{ +	return (fba << 13) | (fpa << 7) | (fsa << 5); +} +#endif + +static void s3c_onenand_reset(void) +{ +	unsigned long timeout = 0x10000; +	int stat; + +	writel(ONENAND_MEM_RESET_COLD, &onenand->reg->mem_reset); +	while (timeout--) { +		stat = readl(&onenand->reg->int_err_stat); +		if (stat & RST_CMP) +			break; +	} +	stat = readl(&onenand->reg->int_err_stat); +	writel(stat, &onenand->reg->int_err_ack); + +	/* Clear interrupt */ +	writel(0x0, &onenand->reg->int_err_ack); +	/* Clear the ECC status */ +	writel(0x0, &onenand->reg->ecc_err_stat); +} + +static unsigned short s3c_onenand_readw(void __iomem *addr) +{ +	struct onenand_chip *this = onenand->mtd->priv; +	int reg = addr - this->base; +	int word_addr = reg >> 1; +	int value; + +	/* It's used for probing time */ +	switch (reg) { +	case ONENAND_REG_MANUFACTURER_ID: +		return readl(&onenand->reg->manufact_id); +	case ONENAND_REG_DEVICE_ID: +		return readl(&onenand->reg->device_id); +	case ONENAND_REG_VERSION_ID: +		return readl(&onenand->reg->flash_ver_id); +	case ONENAND_REG_DATA_BUFFER_SIZE: +		return readl(&onenand->reg->data_buf_size); +	case ONENAND_REG_TECHNOLOGY: +		return readl(&onenand->reg->tech); +	case ONENAND_REG_SYS_CFG1: +		return readl(&onenand->reg->mem_cfg); + +	/* Used at unlock all status */ +	case ONENAND_REG_CTRL_STATUS: +		return 0; + +	case ONENAND_REG_WP_STATUS: +		return ONENAND_WP_US; + +	default: +		break; +	} + +	/* BootRAM access control */ +	if (reg < ONENAND_DATARAM && onenand->bootram_command) { +		if (word_addr == 0) +			return readl(&onenand->reg->manufact_id); +		if (word_addr == 1) +			return readl(&onenand->reg->device_id); +		if (word_addr == 2) +			return readl(&onenand->reg->flash_ver_id); +	} + +	value = s3c_read_cmd(CMD_MAP_11(word_addr)) & 0xffff; +	printk(KERN_INFO "s3c_onenand_readw:  Illegal access" +		" at reg 0x%x, value 0x%x\n", word_addr, value); +	return value; +} + +static void s3c_onenand_writew(unsigned short value, void __iomem *addr) +{ +	struct onenand_chip *this = onenand->mtd->priv; +	int reg = addr - this->base; +	int word_addr = reg >> 1; + +	/* It's used for probing time */ +	switch (reg) { +	case ONENAND_REG_SYS_CFG1: +		writel(value, &onenand->reg->mem_cfg); +		return; + +	case ONENAND_REG_START_ADDRESS1: +	case ONENAND_REG_START_ADDRESS2: +		return; + +	/* Lock/lock-tight/unlock/unlock_all */ +	case ONENAND_REG_START_BLOCK_ADDRESS: +		return; + +	default: +		break; +	} + +	/* BootRAM access control */ +	if (reg < ONENAND_DATARAM) { +		if (value == ONENAND_CMD_READID) { +			onenand->bootram_command = 1; +			return; +		} +		if (value == ONENAND_CMD_RESET) { +			writel(ONENAND_MEM_RESET_COLD, +					&onenand->reg->mem_reset); +			onenand->bootram_command = 0; +			return; +		} +	} + +	printk(KERN_INFO "s3c_onenand_writew: Illegal access" +		" at reg 0x%x, value 0x%x\n", word_addr, value); + +	s3c_write_cmd(value, CMD_MAP_11(word_addr)); +} + +static int s3c_onenand_wait(struct mtd_info *mtd, int state) +{ +	unsigned int flags = INT_ACT; +	unsigned int stat, ecc; +	unsigned long timeout = 0x100000; + +	switch (state) { +	case FL_READING: +		flags |= BLK_RW_CMP | LOAD_CMP; +		break; +	case FL_WRITING: +		flags |= BLK_RW_CMP | PGM_CMP; +		break; +	case FL_ERASING: +		flags |= BLK_RW_CMP | ERS_CMP; +		break; +	case FL_LOCKING: +		flags |= BLK_RW_CMP; +		break; +	default: +		break; +	} + +	while (timeout--) { +		stat = readl(&onenand->reg->int_err_stat); +		if (stat & flags) +			break; +	} + +	/* To get correct interrupt status in timeout case */ +	stat = readl(&onenand->reg->int_err_stat); +	writel(stat, &onenand->reg->int_err_ack); + +	/* +	 * In the Spec. it checks the controller status first +	 * However if you get the correct information in case of +	 * power off recovery (POR) test, it should read ECC status first +	 */ +	if (stat & LOAD_CMP) { +		ecc = readl(&onenand->reg->ecc_err_stat); +		if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { +			printk(KERN_INFO "%s: ECC error = 0x%04x\n", +					__func__, ecc); +			mtd->ecc_stats.failed++; +			return -EBADMSG; +		} +	} + +	if (stat & (LOCKED_BLK | ERS_FAIL | PGM_FAIL | LD_FAIL_ECC_ERR)) { +		printk(KERN_INFO "%s: controller error = 0x%04x\n", +				__func__, stat); +		if (stat & LOCKED_BLK) +			printk(KERN_INFO "%s: it's locked error = 0x%04x\n", +					__func__, stat); + +		return -EIO; +	} + +	return 0; +} + +static int s3c_onenand_command(struct mtd_info *mtd, int cmd, +		loff_t addr, size_t len) +{ +	struct onenand_chip *this = mtd->priv; +	unsigned int *m, *s; +	int fba, fpa, fsa = 0; +	unsigned int mem_addr; +	int i, mcount, scount; +	int index; + +	fba = (int) (addr >> this->erase_shift); +	fpa = (int) (addr >> this->page_shift); +	fpa &= this->page_mask; + +	mem_addr = onenand->mem_addr(fba, fpa, fsa); + +	switch (cmd) { +	case ONENAND_CMD_READ: +	case ONENAND_CMD_READOOB: +	case ONENAND_CMD_BUFFERRAM: +		ONENAND_SET_NEXT_BUFFERRAM(this); +	default: +		break; +	} + +	index = ONENAND_CURRENT_BUFFERRAM(this); + +	/* +	 * Emulate Two BufferRAMs and access with 4 bytes pointer +	 */ +	m = (unsigned int *) onenand->page_buf; +	s = (unsigned int *) onenand->oob_buf; + +	if (index) { +		m += (this->writesize >> 2); +		s += (mtd->oobsize >> 2); +	} + +	mcount = mtd->writesize >> 2; +	scount = mtd->oobsize >> 2; + +	switch (cmd) { +	case ONENAND_CMD_READ: +		/* Main */ +		for (i = 0; i < mcount; i++) +			*m++ = s3c_read_cmd(CMD_MAP_01(mem_addr)); +		return 0; + +	case ONENAND_CMD_READOOB: +		writel(TSRF, &onenand->reg->trans_spare); +		/* Main */ +		for (i = 0; i < mcount; i++) +			*m++ = s3c_read_cmd(CMD_MAP_01(mem_addr)); + +		/* Spare */ +		for (i = 0; i < scount; i++) +			*s++ = s3c_read_cmd(CMD_MAP_01(mem_addr)); + +		writel(0, &onenand->reg->trans_spare); +		return 0; + +	case ONENAND_CMD_PROG: +		/* Main */ +		for (i = 0; i < mcount; i++) +			s3c_write_cmd(*m++, CMD_MAP_01(mem_addr)); +		return 0; + +	case ONENAND_CMD_PROGOOB: +		writel(TSRF, &onenand->reg->trans_spare); + +		/* Main - dummy write */ +		for (i = 0; i < mcount; i++) +			s3c_write_cmd(0xffffffff, CMD_MAP_01(mem_addr)); + +		/* Spare */ +		for (i = 0; i < scount; i++) +			s3c_write_cmd(*s++, CMD_MAP_01(mem_addr)); + +		writel(0, &onenand->reg->trans_spare); +		return 0; + +	case ONENAND_CMD_UNLOCK_ALL: +		s3c_write_cmd(ONENAND_UNLOCK_ALL, CMD_MAP_10(mem_addr)); +		return 0; + +	case ONENAND_CMD_ERASE: +		s3c_write_cmd(ONENAND_ERASE_START, CMD_MAP_10(mem_addr)); +		return 0; + +	case ONENAND_CMD_MULTIBLOCK_ERASE: +		s3c_write_cmd(ONENAND_MULTI_ERASE_SET, CMD_MAP_10(mem_addr)); +		return 0; + +	case ONENAND_CMD_ERASE_VERIFY: +		s3c_write_cmd(ONENAND_ERASE_VERIFY, CMD_MAP_10(mem_addr)); +		return 0; + +	default: +		break; +	} + +	return 0; +} + +static unsigned char *s3c_get_bufferram(struct mtd_info *mtd, int area) +{ +	struct onenand_chip *this = mtd->priv; +	int index = ONENAND_CURRENT_BUFFERRAM(this); +	unsigned char *p; + +	if (area == ONENAND_DATARAM) { +		p = (unsigned char *) onenand->page_buf; +		if (index == 1) +			p += this->writesize; +	} else { +		p = (unsigned char *) onenand->oob_buf; +		if (index == 1) +			p += mtd->oobsize; +	} + +	return p; +} + +static int onenand_read_bufferram(struct mtd_info *mtd, loff_t addr, int area, +				  unsigned char *buffer, int offset, +				  size_t count) +{ +	unsigned char *p; + +	p = s3c_get_bufferram(mtd, area); +	memcpy(buffer, p + offset, count); +	return 0; +} + +static int onenand_write_bufferram(struct mtd_info *mtd, loff_t addr, int area, +				   const unsigned char *buffer, int offset, +				   size_t count) +{ +	unsigned char *p; + +	p = s3c_get_bufferram(mtd, area); +	memcpy(p + offset, buffer, count); +	return 0; +} + +static int s3c_onenand_bbt_wait(struct mtd_info *mtd, int state) +{ +	struct samsung_onenand *reg = (struct samsung_onenand *)onenand->base; +	unsigned int flags = INT_ACT | LOAD_CMP; +	unsigned int stat; +	unsigned long timeout = 0x10000; + +	while (timeout--) { +		stat = readl(®->int_err_stat); +		if (stat & flags) +			break; +	} +	/* To get correct interrupt status in timeout case */ +	stat = readl(&onenand->reg->int_err_stat); +	writel(stat, &onenand->reg->int_err_ack); + +	if (stat & LD_FAIL_ECC_ERR) { +		s3c_onenand_reset(); +		return ONENAND_BBT_READ_ERROR; +	} + +	if (stat & LOAD_CMP) { +		int ecc = readl(&onenand->reg->ecc_err_stat); +		if (ecc & ONENAND_ECC_4BIT_UNCORRECTABLE) { +			s3c_onenand_reset(); +			return ONENAND_BBT_READ_ERROR; +		} +	} + +	return 0; +} + +static void s3c_onenand_check_lock_status(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	unsigned int block, end; + +	end = this->chipsize >> this->erase_shift; + +	for (block = 0; block < end; block++) { +		s3c_read_cmd(CMD_MAP_01(onenand->mem_addr(block, 0, 0))); + +		if (readl(&onenand->reg->int_err_stat) & LOCKED_BLK) { +			printf("block %d is write-protected!\n", block); +			writel(LOCKED_BLK, &onenand->reg->int_err_ack); +		} +	} +} + +static void s3c_onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, +		size_t len, int cmd) +{ +	struct onenand_chip *this = mtd->priv; +	int start, end, start_mem_addr, end_mem_addr; + +	start = ofs >> this->erase_shift; +	start_mem_addr = onenand->mem_addr(start, 0, 0); +	end = start + (len >> this->erase_shift) - 1; +	end_mem_addr = onenand->mem_addr(end, 0, 0); + +	if (cmd == ONENAND_CMD_LOCK) { +		s3c_write_cmd(ONENAND_LOCK_START, CMD_MAP_10(start_mem_addr)); +		s3c_write_cmd(ONENAND_LOCK_END, CMD_MAP_10(end_mem_addr)); +	} else { +		s3c_write_cmd(ONENAND_UNLOCK_START, CMD_MAP_10(start_mem_addr)); +		s3c_write_cmd(ONENAND_UNLOCK_END, CMD_MAP_10(end_mem_addr)); +	} + +	this->wait(mtd, FL_LOCKING); +} + +static void s3c_onenand_unlock_all(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	loff_t ofs = 0; +	size_t len = this->chipsize; + +	/* FIXME workaround */ +	this->subpagesize = mtd->writesize; +	mtd->subpage_sft = 0; + +	if (this->options & ONENAND_HAS_UNLOCK_ALL) { +		/* Write unlock command */ +		this->command(mtd, ONENAND_CMD_UNLOCK_ALL, 0, 0); + +		/* No need to check return value */ +		this->wait(mtd, FL_LOCKING); + +		/* Workaround for all block unlock in DDP */ +		if (!ONENAND_IS_DDP(this)) { +			s3c_onenand_check_lock_status(mtd); +			return; +		} + +		/* All blocks on another chip */ +		ofs = this->chipsize >> 1; +		len = this->chipsize >> 1; +	} + +	s3c_onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK); +	s3c_onenand_check_lock_status(mtd); +} + +int s5pc110_chip_probe(struct mtd_info *mtd) +{ +	return 0; +} + +int s5pc210_chip_probe(struct mtd_info *mtd) +{ +	return 0; +} + +void s3c_onenand_init(struct mtd_info *mtd) +{ +	struct onenand_chip *this = mtd->priv; +	u32 size = (4 << 10);	/* 4 KiB */ + +	onenand = malloc(sizeof(struct s3c_onenand)); +	if (!onenand) +		return; + +	onenand->page_buf = malloc(size * sizeof(char)); +	if (!onenand->page_buf) +		return; +	memset(onenand->page_buf, 0xff, size); + +	onenand->oob_buf = malloc(128 * sizeof(char)); +	if (!onenand->oob_buf) +		return; +	memset(onenand->oob_buf, 0xff, 128); + +	onenand->mtd = mtd; + +#if defined(CONFIG_S5P) +	onenand->base = (void *)0xE7100000; +	onenand->ahb_addr = (void *)0xB0000000; +#endif +	onenand->mem_addr = s3c_mem_addr; +	onenand->reg = (struct samsung_onenand *)onenand->base; + +	this->read_word = s3c_onenand_readw; +	this->write_word = s3c_onenand_writew; + +	this->wait = s3c_onenand_wait; +	this->bbt_wait = s3c_onenand_bbt_wait; +	this->unlock_all = s3c_onenand_unlock_all; +	this->command = s3c_onenand_command; + +	this->read_bufferram = onenand_read_bufferram; +	this->write_bufferram = onenand_write_bufferram; + +	this->options |= ONENAND_RUNTIME_BADBLOCK_CHECK; +} diff --git a/roms/u-boot/drivers/mtd/spi/Makefile b/roms/u-boot/drivers/mtd/spi/Makefile new file mode 100644 index 00000000..9e18fb41 --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/Makefile @@ -0,0 +1,17 @@ +# +# (C) Copyright 2006 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. +# +# SPDX-License-Identifier:	GPL-2.0+ +# + +ifdef CONFIG_SPL_BUILD +obj-$(CONFIG_SPL_SPI_LOAD)	+= spi_spl_load.o +obj-$(CONFIG_SPL_SPI_BOOT)	+= fsl_espi_spl.o +endif + +obj-$(CONFIG_CMD_SF) += sf.o +obj-$(CONFIG_SPI_FLASH) += sf_params.o sf_probe.o sf_ops.o +obj-$(CONFIG_SPI_FRAM_RAMTRON) += ramtron.o +obj-$(CONFIG_SPI_FLASH_SANDBOX) += sandbox.o +obj-$(CONFIG_SPI_M95XXX) += eeprom_m95xxx.o diff --git a/roms/u-boot/drivers/mtd/spi/eeprom_m95xxx.c b/roms/u-boot/drivers/mtd/spi/eeprom_m95xxx.c new file mode 100644 index 00000000..a019939b --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/eeprom_m95xxx.c @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2009 + * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com> + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <spi.h> + +#define SPI_EEPROM_WREN		0x06 +#define SPI_EEPROM_RDSR		0x05 +#define SPI_EEPROM_READ		0x03 +#define SPI_EEPROM_WRITE	0x02 + +#ifndef CONFIG_DEFAULT_SPI_BUS +#define CONFIG_DEFAULT_SPI_BUS 0 +#endif + +#ifndef CONFIG_DEFAULT_SPI_MODE +#define CONFIG_DEFAULT_SPI_MODE SPI_MODE_0 +#endif + +#ifndef CONFIG_SYS_SPI_WRITE_TOUT +#define CONFIG_SYS_SPI_WRITE_TOUT (5 * CONFIG_SYS_HZ) +#endif + +ssize_t spi_read(uchar *addr, int alen, uchar *buffer, int len) +{ +	struct spi_slave *slave; +	u8 cmd = SPI_EEPROM_READ; + +	slave = spi_setup_slave(CONFIG_DEFAULT_SPI_BUS, 1, 1000000, +			CONFIG_DEFAULT_SPI_MODE); +	if (!slave) +		return 0; + +	spi_claim_bus(slave); + +	/* command */ +	if (spi_xfer(slave, 8, &cmd, NULL, SPI_XFER_BEGIN)) +		return -1; + +	/* +	 * if alen == 3, addr[0] is the block number, we never use it here. +	 * All we need are the lower 16 bits. +	 */ +	if (alen == 3) +		addr++; + +	/* address, and data */ +	if (spi_xfer(slave, 16, addr, NULL, 0)) +		return -1; +	if (spi_xfer(slave, 8 * len, NULL, buffer, SPI_XFER_END)) +		return -1; + +	spi_release_bus(slave); +	spi_free_slave(slave); +	return len; +} + +ssize_t spi_write(uchar *addr, int alen, uchar *buffer, int len) +{ +	struct spi_slave *slave; +	char buf[3]; +	ulong start; + +	slave = spi_setup_slave(CONFIG_DEFAULT_SPI_BUS, 1, 1000000, +			CONFIG_DEFAULT_SPI_MODE); +	if (!slave) +		return 0; + +	spi_claim_bus(slave); + +	buf[0] = SPI_EEPROM_WREN; +	if (spi_xfer(slave, 8, buf, NULL, SPI_XFER_BEGIN | SPI_XFER_END)) +		return -1; + +	buf[0] = SPI_EEPROM_WRITE; + +	/* As for reading, drop addr[0] if alen is 3 */ +	if (alen == 3) { +		alen--; +		addr++; +	} + +	memcpy(buf + 1, addr, alen); +	/* command + addr, then data */ +	if (spi_xfer(slave, 24, buf, NULL, SPI_XFER_BEGIN)) +		return -1; +	if (spi_xfer(slave, len * 8, buffer, NULL, SPI_XFER_END)) +		return -1; + +	start = get_timer(0); +	do { +		buf[0] = SPI_EEPROM_RDSR; +		buf[1] = 0; +		spi_xfer(slave, 16, buf, buf, SPI_XFER_BEGIN | SPI_XFER_END); + +		if (!(buf[1] & 1)) +			break; + +	} while (get_timer(start) < CONFIG_SYS_SPI_WRITE_TOUT); + +	if (buf[1] & 1) +		printf("*** spi_write: Timeout while writing!\n"); + +	spi_release_bus(slave); +	spi_free_slave(slave); +	return len; +} diff --git a/roms/u-boot/drivers/mtd/spi/fsl_espi_spl.c b/roms/u-boot/drivers/mtd/spi/fsl_espi_spl.c new file mode 100644 index 00000000..b915469b --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/fsl_espi_spl.c @@ -0,0 +1,90 @@ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <spi_flash.h> +#include <malloc.h> + +#define ESPI_BOOT_IMAGE_SIZE	0x48 +#define ESPI_BOOT_IMAGE_ADDR	0x50 +#define CONFIG_CFG_DATA_SECTOR	0 + +void spi_spl_load_image(uint32_t offs, unsigned int size, void *vdst) +{ +	struct spi_flash *flash; + +	flash = spi_flash_probe(CONFIG_ENV_SPI_BUS, CONFIG_ENV_SPI_CS, +			CONFIG_ENV_SPI_MAX_HZ, CONFIG_ENV_SPI_MODE); +	if (flash == NULL) { +		puts("\nspi_flash_probe failed"); +		hang(); +	} + +	spi_flash_read(flash, offs, size, vdst); +} + +/* + * The main entry for SPI booting. It's necessary that SDRAM is already + * configured and available since this code loads the main U-Boot image + * from SPI into SDRAM and starts it from there. + */ +void spi_boot(void) +{ +	void (*uboot)(void) __noreturn; +	u32 offset, code_len, copy_len = 0; +#ifndef CONFIG_FSL_CORENET +	unsigned char *buf = NULL; +#endif +	struct spi_flash *flash; + +	flash = spi_flash_probe(CONFIG_ENV_SPI_BUS, CONFIG_ENV_SPI_CS, +			CONFIG_ENV_SPI_MAX_HZ, CONFIG_ENV_SPI_MODE); +	if (flash == NULL) { +		puts("\nspi_flash_probe failed"); +		hang(); +	} + +#ifdef CONFIG_FSL_CORENET +	offset = CONFIG_SYS_SPI_FLASH_U_BOOT_OFFS; +	code_len = CONFIG_SYS_SPI_FLASH_U_BOOT_SIZE; +#else +	/* +	* Load U-Boot image from SPI flash into RAM +	*/ +	buf = malloc(flash->page_size); +	if (buf == NULL) { +		puts("\nmalloc failed"); +		hang(); +	} +	memset(buf, 0, flash->page_size); + +	spi_flash_read(flash, CONFIG_CFG_DATA_SECTOR, +		       flash->page_size, (void *)buf); +	offset = *(u32 *)(buf + ESPI_BOOT_IMAGE_ADDR); +	/* Skip spl code */ +	offset += CONFIG_SYS_SPI_FLASH_U_BOOT_OFFS; +	/* Get the code size from offset 0x48 */ +	code_len = *(u32 *)(buf + ESPI_BOOT_IMAGE_SIZE); +	/* Skip spl code */ +	code_len = code_len - CONFIG_SPL_MAX_SIZE; +#endif +	/* copy code to DDR */ +	printf("Loading second stage boot loader "); +	while (copy_len <= code_len) { +		spi_flash_read(flash, offset + copy_len, 0x2000, +			       (void *)(CONFIG_SYS_SPI_FLASH_U_BOOT_DST +			       + copy_len)); +		copy_len = copy_len + 0x2000; +		putc('.'); +	} + +	/* +	* Jump to U-Boot image +	*/ +	flush_cache(CONFIG_SYS_SPI_FLASH_U_BOOT_DST, code_len); +	uboot = (void *)CONFIG_SYS_SPI_FLASH_U_BOOT_START; +	(*uboot)(); +} diff --git a/roms/u-boot/drivers/mtd/spi/ramtron.c b/roms/u-boot/drivers/mtd/spi/ramtron.c new file mode 100644 index 00000000..d50da37c --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/ramtron.c @@ -0,0 +1,403 @@ +/* + * (C) Copyright 2010 + * Reinhard Meyer, EMK Elektronik, reinhard.meyer@emk-elektronik.de + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +/* + * Note: RAMTRON SPI FRAMs are ferroelectric, nonvolatile RAMs + * with an interface identical to SPI flash devices. + * However since they behave like RAM there are no delays or + * busy polls required. They can sustain read or write at the + * allowed SPI bus speed, which can be 40 MHz for some devices. + * + * Unfortunately some RAMTRON devices do not have a means of + * identifying them. They will leave the SO line undriven when + * the READ-ID command is issued. It is therefore mandatory + * that the MISO line has a proper pull-up, so that READ-ID + * will return a row of 0xff. This 0xff pseudo-id will cause + * probes by all vendor specific functions that are designed + * to handle it. If the MISO line is not pulled up, READ-ID + * could return any random noise, even mimicking another + * device. + * + * We use CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC + * to define which device will be assumed after a simple status + * register verify. This method is prone to false positive + * detection and should therefore be the last to be tried. + * Enter it in the last position in the table in spi_flash.c! + * + * The define CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC both activates + * compilation of the special handler and defines the device + * to assume. + */ + +#include <common.h> +#include <malloc.h> +#include <spi_flash.h> +#include "sf_internal.h" + +/* + * Properties of supported FRAMs + * Note: speed is currently not used because we have no method to deliver that + * value to the upper layers + */ +struct ramtron_spi_fram_params { +	u32	size;		/* size in bytes */ +	u8	addr_len;	/* number of address bytes */ +	u8	merge_cmd;	/* some address bits are in the command byte */ +	u8	id1;		/* device ID 1 (family, density) */ +	u8	id2;		/* device ID 2 (sub, rev, rsvd) */ +	u32	speed;		/* max. SPI clock in Hz */ +	const char *name;	/* name for display and/or matching */ +}; + +struct ramtron_spi_fram { +	struct spi_flash flash; +	const struct ramtron_spi_fram_params *params; +}; + +static inline struct ramtron_spi_fram *to_ramtron_spi_fram(struct spi_flash +							     *flash) +{ +	return container_of(flash, struct ramtron_spi_fram, flash); +} + +/* + * table describing supported FRAM chips: + * chips without RDID command must have the values 0xff for id1 and id2 + */ +static const struct ramtron_spi_fram_params ramtron_spi_fram_table[] = { +	{ +		.size = 32*1024, +		.addr_len = 2, +		.merge_cmd = 0, +		.id1 = 0x22, +		.id2 = 0x00, +		.speed = 40000000, +		.name = "FM25V02", +	}, +	{ +		.size = 32*1024, +		.addr_len = 2, +		.merge_cmd = 0, +		.id1 = 0x22, +		.id2 = 0x01, +		.speed = 40000000, +		.name = "FM25VN02", +	}, +	{ +		.size = 64*1024, +		.addr_len = 2, +		.merge_cmd = 0, +		.id1 = 0x23, +		.id2 = 0x00, +		.speed = 40000000, +		.name = "FM25V05", +	}, +	{ +		.size = 64*1024, +		.addr_len = 2, +		.merge_cmd = 0, +		.id1 = 0x23, +		.id2 = 0x01, +		.speed = 40000000, +		.name = "FM25VN05", +	}, +	{ +		.size = 128*1024, +		.addr_len = 3, +		.merge_cmd = 0, +		.id1 = 0x24, +		.id2 = 0x00, +		.speed = 40000000, +		.name = "FM25V10", +	}, +	{ +		.size = 128*1024, +		.addr_len = 3, +		.merge_cmd = 0, +		.id1 = 0x24, +		.id2 = 0x01, +		.speed = 40000000, +		.name = "FM25VN10", +	}, +#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC +	{ +		.size = 256*1024, +		.addr_len = 3, +		.merge_cmd = 0, +		.id1 = 0xff, +		.id2 = 0xff, +		.speed = 40000000, +		.name = "FM25H20", +	}, +#endif +}; + +static int ramtron_common(struct spi_flash *flash, +		u32 offset, size_t len, void *buf, u8 command) +{ +	struct ramtron_spi_fram *sn = to_ramtron_spi_fram(flash); +	u8 cmd[4]; +	int cmd_len; +	int ret; + +	if (sn->params->addr_len == 3 && sn->params->merge_cmd == 0) { +		cmd[0] = command; +		cmd[1] = offset >> 16; +		cmd[2] = offset >> 8; +		cmd[3] = offset; +		cmd_len = 4; +	} else if (sn->params->addr_len == 2 && sn->params->merge_cmd == 0) { +		cmd[0] = command; +		cmd[1] = offset >> 8; +		cmd[2] = offset; +		cmd_len = 3; +	} else { +		printf("SF: unsupported addr_len or merge_cmd\n"); +		return -1; +	} + +	/* claim the bus */ +	ret = spi_claim_bus(flash->spi); +	if (ret) { +		debug("SF: Unable to claim SPI bus\n"); +		return ret; +	} + +	if (command == CMD_PAGE_PROGRAM) { +		/* send WREN */ +		ret = spi_flash_cmd_write_enable(flash); +		if (ret < 0) { +			debug("SF: Enabling Write failed\n"); +			goto releasebus; +		} +	} + +	/* do the transaction */ +	if (command == CMD_PAGE_PROGRAM) +		ret = spi_flash_cmd_write(flash->spi, cmd, cmd_len, buf, len); +	else +		ret = spi_flash_cmd_read(flash->spi, cmd, cmd_len, buf, len); +	if (ret < 0) +		debug("SF: Transaction failed\n"); + +releasebus: +	/* release the bus */ +	spi_release_bus(flash->spi); +	return ret; +} + +static int ramtron_read(struct spi_flash *flash, +		u32 offset, size_t len, void *buf) +{ +	return ramtron_common(flash, offset, len, buf, +		CMD_READ_ARRAY_SLOW); +} + +static int ramtron_write(struct spi_flash *flash, +		u32 offset, size_t len, const void *buf) +{ +	return ramtron_common(flash, offset, len, (void *)buf, +		CMD_PAGE_PROGRAM); +} + +static int ramtron_erase(struct spi_flash *flash, u32 offset, size_t len) +{ +	debug("SF: Erase of RAMTRON FRAMs is pointless\n"); +	return -1; +} + +/* + * nore: we are called here with idcode pointing to the first non-0x7f byte + * already! + */ +static struct spi_flash *spi_fram_probe_ramtron(struct spi_slave *spi, +		u8 *idcode) +{ +	const struct ramtron_spi_fram_params *params; +	struct ramtron_spi_fram *sn; +	unsigned int i; +#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC +	int ret; +	u8 sr; +#endif + +	/* NOTE: the bus has been claimed before this function is called! */ +	switch (idcode[0]) { +	case 0xc2: +		/* JEDEC conformant RAMTRON id */ +		for (i = 0; i < ARRAY_SIZE(ramtron_spi_fram_table); i++) { +			params = &ramtron_spi_fram_table[i]; +			if (idcode[1] == params->id1 && +			    idcode[2] == params->id2) +				goto found; +		} +		break; +#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC +	case 0xff: +		/* +		 * probably open MISO line, pulled up. +		 * We COULD have a non JEDEC conformant FRAM here, +		 * read the status register to verify +		 */ +		ret = spi_flash_cmd(spi, CMD_READ_STATUS, &sr, 1); +		if (ret) +			return NULL; + +		/* Bits 5,4,0 are fixed 0 for all devices */ +		if ((sr & 0x31) != 0x00) +			return NULL; +		/* now find the device */ +		for (i = 0; i < ARRAY_SIZE(ramtron_spi_fram_table); i++) { +			params = &ramtron_spi_fram_table[i]; +			if (!strcmp(params->name, +				    CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC)) +				goto found; +		} +		debug("SF: Unsupported non-JEDEC RAMTRON device " +			CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC "\n"); +		break; +#endif +	default: +		break; +	} + +	/* arriving here means no method has found a device we can handle */ +	debug("SF/ramtron: unsupported device id0=%02x id1=%02x id2=%02x\n", +	      idcode[0], idcode[1], idcode[2]); +	return NULL; + +found: +	sn = malloc(sizeof(*sn)); +	if (!sn) { +		debug("SF: Failed to allocate memory\n"); +		return NULL; +	} + +	sn->params = params; + +	sn->flash.write = ramtron_write; +	sn->flash.read = ramtron_read; +	sn->flash.erase = ramtron_erase; +	sn->flash.size = params->size; + +	return &sn->flash; +} + +/* + * The following table holds all device probe functions + * (All flashes are removed and implemented a common probe at + *  spi_flash_probe.c) + * + * shift:  number of continuation bytes before the ID + * idcode: the expected IDCODE or 0xff for non JEDEC devices + * probe:  the function to call + * + * Non JEDEC devices should be ordered in the table such that + * the probe functions with best detection algorithms come first. + * + * Several matching entries are permitted, they will be tried + * in sequence until a probe function returns non NULL. + * + * IDCODE_CONT_LEN may be redefined if a device needs to declare a + * larger "shift" value.  IDCODE_PART_LEN generally shouldn't be + * changed.  This is the max number of bytes probe functions may + * examine when looking up part-specific identification info. + * + * Probe functions will be given the idcode buffer starting at their + * manu id byte (the "idcode" in the table below).  In other words, + * all of the continuation bytes will be skipped (the "shift" below). + */ +#define IDCODE_CONT_LEN 0 +#define IDCODE_PART_LEN 5 +static const struct { +	const u8 shift; +	const u8 idcode; +	struct spi_flash *(*probe) (struct spi_slave *spi, u8 *idcode); +} flashes[] = { +	/* Keep it sorted by define name */ +#ifdef CONFIG_SPI_FRAM_RAMTRON +	{ 6, 0xc2, spi_fram_probe_ramtron, }, +# undef IDCODE_CONT_LEN +# define IDCODE_CONT_LEN 6 +#endif +#ifdef CONFIG_SPI_FRAM_RAMTRON_NON_JEDEC +	{ 0, 0xff, spi_fram_probe_ramtron, }, +#endif +}; +#define IDCODE_LEN (IDCODE_CONT_LEN + IDCODE_PART_LEN) + +struct spi_flash *spi_flash_probe(unsigned int bus, unsigned int cs, +		unsigned int max_hz, unsigned int spi_mode) +{ +	struct spi_slave *spi; +	struct spi_flash *flash = NULL; +	int ret, i, shift; +	u8 idcode[IDCODE_LEN], *idp; + +	spi = spi_setup_slave(bus, cs, max_hz, spi_mode); +	if (!spi) { +		printf("SF: Failed to set up slave\n"); +		return NULL; +	} + +	ret = spi_claim_bus(spi); +	if (ret) { +		debug("SF: Failed to claim SPI bus: %d\n", ret); +		goto err_claim_bus; +	} + +	/* Read the ID codes */ +	ret = spi_flash_cmd(spi, CMD_READ_ID, idcode, sizeof(idcode)); +	if (ret) +		goto err_read_id; + +#ifdef DEBUG +	printf("SF: Got idcodes\n"); +	print_buffer(0, idcode, 1, sizeof(idcode), 0); +#endif + +	/* count the number of continuation bytes */ +	for (shift = 0, idp = idcode; +	     shift < IDCODE_CONT_LEN && *idp == 0x7f; +	     ++shift, ++idp) +		continue; + +	/* search the table for matches in shift and id */ +	for (i = 0; i < ARRAY_SIZE(flashes); ++i) +		if (flashes[i].shift == shift && flashes[i].idcode == *idp) { +			/* we have a match, call probe */ +			flash = flashes[i].probe(spi, idp); +			if (flash) +				break; +		} + +	if (!flash) { +		printf("SF: Unsupported manufacturer %02x\n", *idp); +		goto err_manufacturer_probe; +	} + +	printf("SF: Detected %s with total size ", flash->name); +	print_size(flash->size, ""); +	puts("\n"); + +	spi_release_bus(spi); + +	return flash; + +err_manufacturer_probe: +err_read_id: +	spi_release_bus(spi); +err_claim_bus: +	spi_free_slave(spi); +	return NULL; +} + +void spi_flash_free(struct spi_flash *flash) +{ +	spi_free_slave(flash->spi); +	free(flash); +} diff --git a/roms/u-boot/drivers/mtd/spi/sandbox.c b/roms/u-boot/drivers/mtd/spi/sandbox.c new file mode 100644 index 00000000..a62ef4cb --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/sandbox.c @@ -0,0 +1,483 @@ +/* + * Simulate a SPI flash + * + * Copyright (c) 2011-2013 The Chromium OS Authors. + * See file CREDITS for list of people who contributed to this + * project. + * + * Licensed under the GPL-2 or later. + */ + +#include <common.h> +#include <malloc.h> +#include <spi.h> +#include <os.h> + +#include <spi_flash.h> +#include "sf_internal.h" + +#include <asm/getopt.h> +#include <asm/spi.h> +#include <asm/state.h> + +/* + * The different states that our SPI flash transitions between. + * We need to keep track of this across multiple xfer calls since + * the SPI bus could possibly call down into us multiple times. + */ +enum sandbox_sf_state { +	SF_CMD,   /* default state -- we're awaiting a command */ +	SF_ID,    /* read the flash's (jedec) ID code */ +	SF_ADDR,  /* processing the offset in the flash to read/etc... */ +	SF_READ,  /* reading data from the flash */ +	SF_WRITE, /* writing data to the flash, i.e. page programming */ +	SF_ERASE, /* erase the flash */ +	SF_READ_STATUS, /* read the flash's status register */ +	SF_READ_STATUS1, /* read the flash's status register upper 8 bits*/ +}; + +static const char *sandbox_sf_state_name(enum sandbox_sf_state state) +{ +	static const char * const states[] = { +		"CMD", "ID", "ADDR", "READ", "WRITE", "ERASE", "READ_STATUS", +	}; +	return states[state]; +} + +/* Bits for the status register */ +#define STAT_WIP	(1 << 0) +#define STAT_WEL	(1 << 1) + +/* Assume all SPI flashes have 3 byte addresses since they do atm */ +#define SF_ADDR_LEN	3 + +struct sandbox_spi_flash_erase_commands { +	u8 cmd; +	u32 size; +}; +#define IDCODE_LEN 5 +#define MAX_ERASE_CMDS 3 +struct sandbox_spi_flash_data { +	const char *name; +	u8 idcode[IDCODE_LEN]; +	u32 size; +	const struct sandbox_spi_flash_erase_commands +						erase_cmds[MAX_ERASE_CMDS]; +}; + +/* Structure describing all the flashes we know how to emulate */ +static const struct sandbox_spi_flash_data sandbox_sf_flashes[] = { +	{ +		"M25P16", { 0x20, 0x20, 0x15 }, (2 << 20), +		{	/* erase commands */ +			{ 0xd8, (64 << 10), }, /* sector */ +			{ 0xc7, (2 << 20), }, /* bulk */ +		}, +	}, +	{ +		"W25Q32", { 0xef, 0x40, 0x16 }, (4 << 20), +		{	/* erase commands */ +			{ 0x20, (4 << 10), }, /* 4KB */ +			{ 0xd8, (64 << 10), }, /* sector */ +			{ 0xc7, (4 << 20), }, /* bulk */ +		}, +	}, +	{ +		"W25Q128", { 0xef, 0x40, 0x18 }, (16 << 20), +		{	/* erase commands */ +			{ 0x20, (4 << 10), }, /* 4KB */ +			{ 0xd8, (64 << 10), }, /* sector */ +			{ 0xc7, (16 << 20), }, /* bulk */ +		}, +	}, +}; + +/* Used to quickly bulk erase backing store */ +static u8 sandbox_sf_0xff[0x1000]; + +/* Internal state data for each SPI flash */ +struct sandbox_spi_flash { +	/* +	 * As we receive data over the SPI bus, our flash transitions +	 * between states.  For example, we start off in the SF_CMD +	 * state where the first byte tells us what operation to perform +	 * (such as read or write the flash).  But the operation itself +	 * can go through a few states such as first reading in the +	 * offset in the flash to perform the requested operation. +	 * Thus "state" stores the exact state that our machine is in +	 * while "cmd" stores the overall command we're processing. +	 */ +	enum sandbox_sf_state state; +	uint cmd; +	const void *cmd_data; +	/* Current position in the flash; used when reading/writing/etc... */ +	uint off; +	/* How many address bytes we've consumed */ +	uint addr_bytes, pad_addr_bytes; +	/* The current flash status (see STAT_XXX defines above) */ +	u16 status; +	/* Data describing the flash we're emulating */ +	const struct sandbox_spi_flash_data *data; +	/* The file on disk to serv up data from */ +	int fd; +}; + +static int sandbox_sf_setup(void **priv, const char *spec) +{ +	/* spec = idcode:file */ +	struct sandbox_spi_flash *sbsf; +	const char *file; +	size_t i, len, idname_len; +	const struct sandbox_spi_flash_data *data; + +	file = strchr(spec, ':'); +	if (!file) { +		printf("sandbox_sf: unable to parse file\n"); +		goto error; +	} +	idname_len = file - spec; +	++file; + +	for (i = 0; i < ARRAY_SIZE(sandbox_sf_flashes); ++i) { +		data = &sandbox_sf_flashes[i]; +		len = strlen(data->name); +		if (idname_len != len) +			continue; +		if (!memcmp(spec, data->name, len)) +			break; +	} +	if (i == ARRAY_SIZE(sandbox_sf_flashes)) { +		printf("sandbox_sf: unknown flash '%*s'\n", (int)idname_len, +		       spec); +		goto error; +	} + +	if (sandbox_sf_0xff[0] == 0x00) +		memset(sandbox_sf_0xff, 0xff, sizeof(sandbox_sf_0xff)); + +	sbsf = calloc(sizeof(*sbsf), 1); +	if (!sbsf) { +		printf("sandbox_sf: out of memory\n"); +		goto error; +	} + +	sbsf->fd = os_open(file, 02); +	if (sbsf->fd == -1) { +		free(sbsf); +		printf("sandbox_sf: unable to open file '%s'\n", file); +		goto error; +	} + +	sbsf->data = data; + +	*priv = sbsf; +	return 0; + + error: +	return 1; +} + +static void sandbox_sf_free(void *priv) +{ +	struct sandbox_spi_flash *sbsf = priv; + +	os_close(sbsf->fd); +	free(sbsf); +} + +static void sandbox_sf_cs_activate(void *priv) +{ +	struct sandbox_spi_flash *sbsf = priv; + +	debug("sandbox_sf: CS activated; state is fresh!\n"); + +	/* CS is asserted, so reset state */ +	sbsf->off = 0; +	sbsf->addr_bytes = 0; +	sbsf->pad_addr_bytes = 0; +	sbsf->state = SF_CMD; +	sbsf->cmd = SF_CMD; +} + +static void sandbox_sf_cs_deactivate(void *priv) +{ +	debug("sandbox_sf: CS deactivated; cmd done processing!\n"); +} + +/* Figure out what command this stream is telling us to do */ +static int sandbox_sf_process_cmd(struct sandbox_spi_flash *sbsf, const u8 *rx, +				  u8 *tx) +{ +	enum sandbox_sf_state oldstate = sbsf->state; + +	/* We need to output a byte for the cmd byte we just ate */ +	sandbox_spi_tristate(tx, 1); + +	sbsf->cmd = rx[0]; +	switch (sbsf->cmd) { +	case CMD_READ_ID: +		sbsf->state = SF_ID; +		sbsf->cmd = SF_ID; +		break; +	case CMD_READ_ARRAY_FAST: +		sbsf->pad_addr_bytes = 1; +	case CMD_READ_ARRAY_SLOW: +	case CMD_PAGE_PROGRAM: + state_addr: +		sbsf->state = SF_ADDR; +		break; +	case CMD_WRITE_DISABLE: +		debug(" write disabled\n"); +		sbsf->status &= ~STAT_WEL; +		break; +	case CMD_READ_STATUS: +		sbsf->state = SF_READ_STATUS; +		break; +	case CMD_READ_STATUS1: +		sbsf->state = SF_READ_STATUS1; +		break; +	case CMD_WRITE_ENABLE: +		debug(" write enabled\n"); +		sbsf->status |= STAT_WEL; +		break; +	default: { +		size_t i; + +		/* handle erase commands first */ +		for (i = 0; i < MAX_ERASE_CMDS; ++i) { +			const struct sandbox_spi_flash_erase_commands * +				erase_cmd = &sbsf->data->erase_cmds[i]; + +			if (erase_cmd->cmd == 0x00) +				continue; +			if (sbsf->cmd != erase_cmd->cmd) +				continue; + +			sbsf->cmd_data = erase_cmd; +			goto state_addr; +		} + +		debug(" cmd unknown: %#x\n", sbsf->cmd); +		return 1; +	} +	} + +	if (oldstate != sbsf->state) +		debug(" cmd: transition to %s state\n", +		      sandbox_sf_state_name(sbsf->state)); + +	return 0; +} + +int sandbox_erase_part(struct sandbox_spi_flash *sbsf, int size) +{ +	int todo; +	int ret; + +	while (size > 0) { +		todo = min(size, sizeof(sandbox_sf_0xff)); +		ret = os_write(sbsf->fd, sandbox_sf_0xff, todo); +		if (ret != todo) +			return ret; +		size -= todo; +	} + +	return 0; +} + +static int sandbox_sf_xfer(void *priv, const u8 *rx, u8 *tx, +		uint bytes) +{ +	struct sandbox_spi_flash *sbsf = priv; +	uint cnt, pos = 0; +	int ret; + +	debug("sandbox_sf: state:%x(%s) bytes:%u\n", sbsf->state, +	      sandbox_sf_state_name(sbsf->state), bytes); + +	if (sbsf->state == SF_CMD) { +		/* Figure out the initial state */ +		if (sandbox_sf_process_cmd(sbsf, rx, tx)) +			return 1; +		++pos; +	} + +	/* Process the remaining data */ +	while (pos < bytes) { +		switch (sbsf->state) { +		case SF_ID: { +			u8 id; + +			debug(" id: off:%u tx:", sbsf->off); +			if (sbsf->off < IDCODE_LEN) +				id = sbsf->data->idcode[sbsf->off]; +			else +				id = 0; +			debug("%02x\n", id); +			tx[pos++] = id; +			++sbsf->off; +			break; +		} +		case SF_ADDR: +			debug(" addr: bytes:%u rx:%02x ", sbsf->addr_bytes, +			      rx[pos]); + +			if (sbsf->addr_bytes++ < SF_ADDR_LEN) +				sbsf->off = (sbsf->off << 8) | rx[pos]; +			debug("addr:%06x\n", sbsf->off); + +			sandbox_spi_tristate(&tx[pos++], 1); + +			/* See if we're done processing */ +			if (sbsf->addr_bytes < +					SF_ADDR_LEN + sbsf->pad_addr_bytes) +				break; + +			/* Next state! */ +			if (os_lseek(sbsf->fd, sbsf->off, OS_SEEK_SET) < 0) { +				puts("sandbox_sf: os_lseek() failed"); +				return 1; +			} +			switch (sbsf->cmd) { +			case CMD_READ_ARRAY_FAST: +			case CMD_READ_ARRAY_SLOW: +				sbsf->state = SF_READ; +				break; +			case CMD_PAGE_PROGRAM: +				sbsf->state = SF_WRITE; +				break; +			default: +				/* assume erase state ... */ +				sbsf->state = SF_ERASE; +				goto case_sf_erase; +			} +			debug(" cmd: transition to %s state\n", +			      sandbox_sf_state_name(sbsf->state)); +			break; +		case SF_READ: +			/* +			 * XXX: need to handle exotic behavior: +			 *      - reading past end of device +			 */ + +			cnt = bytes - pos; +			debug(" tx: read(%u)\n", cnt); +			ret = os_read(sbsf->fd, tx + pos, cnt); +			if (ret < 0) { +				puts("sandbox_spi: os_read() failed\n"); +				return 1; +			} +			pos += ret; +			break; +		case SF_READ_STATUS: +			debug(" read status: %#x\n", sbsf->status); +			cnt = bytes - pos; +			memset(tx + pos, sbsf->status, cnt); +			pos += cnt; +			break; +		case SF_READ_STATUS1: +			debug(" read status: %#x\n", sbsf->status); +			cnt = bytes - pos; +			memset(tx + pos, sbsf->status >> 8, cnt); +			pos += cnt; +			break; +		case SF_WRITE: +			/* +			 * XXX: need to handle exotic behavior: +			 *      - unaligned addresses +			 *      - more than a page (256) worth of data +			 *      - reading past end of device +			 */ +			if (!(sbsf->status & STAT_WEL)) { +				puts("sandbox_sf: write enable not set before write\n"); +				goto done; +			} + +			cnt = bytes - pos; +			debug(" rx: write(%u)\n", cnt); +			sandbox_spi_tristate(&tx[pos], cnt); +			ret = os_write(sbsf->fd, rx + pos, cnt); +			if (ret < 0) { +				puts("sandbox_spi: os_write() failed\n"); +				return 1; +			} +			pos += ret; +			sbsf->status &= ~STAT_WEL; +			break; +		case SF_ERASE: + case_sf_erase: { +			const struct sandbox_spi_flash_erase_commands * +						erase_cmd = sbsf->cmd_data; + +			if (!(sbsf->status & STAT_WEL)) { +				puts("sandbox_sf: write enable not set before erase\n"); +				goto done; +			} + +			/* verify address is aligned */ +			if (sbsf->off & (erase_cmd->size - 1)) { +				debug(" sector erase: cmd:%#x needs align:%#x, but we got %#x\n", +				      erase_cmd->cmd, erase_cmd->size, +				      sbsf->off); +				sbsf->status &= ~STAT_WEL; +				goto done; +			} + +			debug(" sector erase addr: %u\n", sbsf->off); + +			cnt = bytes - pos; +			sandbox_spi_tristate(&tx[pos], cnt); +			pos += cnt; + +			/* +			 * TODO(vapier@gentoo.org): latch WIP in status, and +			 * delay before clearing it ? +			 */ +			ret = sandbox_erase_part(sbsf, erase_cmd->size); +			sbsf->status &= ~STAT_WEL; +			if (ret) { +				debug("sandbox_sf: Erase failed\n"); +				goto done; +			} +			goto done; +		} +		default: +			debug(" ??? no idea what to do ???\n"); +			goto done; +		} +	} + + done: +	return pos == bytes ? 0 : 1; +} + +static const struct sandbox_spi_emu_ops sandbox_sf_ops = { +	.setup         = sandbox_sf_setup, +	.free          = sandbox_sf_free, +	.cs_activate   = sandbox_sf_cs_activate, +	.cs_deactivate = sandbox_sf_cs_deactivate, +	.xfer          = sandbox_sf_xfer, +}; + +static int sandbox_cmdline_cb_spi_sf(struct sandbox_state *state, +				     const char *arg) +{ +	unsigned long bus, cs; +	const char *spec = sandbox_spi_parse_spec(arg, &bus, &cs); + +	if (!spec) +		return 1; + +	/* +	 * It is safe to not make a copy of 'spec' because it comes from the +	 * command line. +	 * +	 * TODO(sjg@chromium.org): It would be nice if we could parse the +	 * spec here, but the problem is that no U-Boot init has been done +	 * yet. Perhaps we can figure something out. +	 */ +	state->spi[bus][cs].ops = &sandbox_sf_ops; +	state->spi[bus][cs].spec = spec; +	return 0; +} +SANDBOX_CMDLINE_OPT(spi_sf, 1, "connect a SPI flash: <bus>:<cs>:<id>:<file>"); diff --git a/roms/u-boot/drivers/mtd/spi/sf.c b/roms/u-boot/drivers/mtd/spi/sf.c new file mode 100644 index 00000000..664e8608 --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/sf.c @@ -0,0 +1,58 @@ +/* + * SPI flash interface + * + * Copyright (C) 2008 Atmel Corporation + * Copyright (C) 2010 Reinhard Meyer, EMK Elektronik + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <spi.h> + +static int spi_flash_read_write(struct spi_slave *spi, +				const u8 *cmd, size_t cmd_len, +				const u8 *data_out, u8 *data_in, +				size_t data_len) +{ +	unsigned long flags = SPI_XFER_BEGIN; +	int ret; + +#ifdef CONFIG_SF_DUAL_FLASH +	if (spi->flags & SPI_XFER_U_PAGE) +		flags |= SPI_XFER_U_PAGE; +#endif +	if (data_len == 0) +		flags |= SPI_XFER_END; + +	ret = spi_xfer(spi, cmd_len * 8, cmd, NULL, flags); +	if (ret) { +		debug("SF: Failed to send command (%zu bytes): %d\n", +		      cmd_len, ret); +	} else if (data_len != 0) { +		ret = spi_xfer(spi, data_len * 8, data_out, data_in, +					SPI_XFER_END); +		if (ret) +			debug("SF: Failed to transfer %zu bytes of data: %d\n", +			      data_len, ret); +	} + +	return ret; +} + +int spi_flash_cmd_read(struct spi_slave *spi, const u8 *cmd, +		size_t cmd_len, void *data, size_t data_len) +{ +	return spi_flash_read_write(spi, cmd, cmd_len, NULL, data, data_len); +} + +int spi_flash_cmd(struct spi_slave *spi, u8 cmd, void *response, size_t len) +{ +	return spi_flash_cmd_read(spi, &cmd, 1, response, len); +} + +int spi_flash_cmd_write(struct spi_slave *spi, const u8 *cmd, size_t cmd_len, +		const void *data, size_t data_len) +{ +	return spi_flash_read_write(spi, cmd, cmd_len, data, NULL, data_len); +} diff --git a/roms/u-boot/drivers/mtd/spi/sf_internal.h b/roms/u-boot/drivers/mtd/spi/sf_internal.h new file mode 100644 index 00000000..6bcd5220 --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/sf_internal.h @@ -0,0 +1,159 @@ +/* + * SPI flash internal definitions + * + * Copyright (C) 2008 Atmel Corporation + * Copyright (C) 2013 Jagannadha Sutradharudu Teki, Xilinx Inc. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#ifndef _SF_INTERNAL_H_ +#define _SF_INTERNAL_H_ + +#define SPI_FLASH_3B_ADDR_LEN		3 +#define SPI_FLASH_CMD_LEN		(1 + SPI_FLASH_3B_ADDR_LEN) +#define SPI_FLASH_16MB_BOUN		0x1000000 + +/* CFI Manufacture ID's */ +#define SPI_FLASH_CFI_MFR_SPANSION	0x01 +#define SPI_FLASH_CFI_MFR_STMICRO	0x20 +#define SPI_FLASH_CFI_MFR_MACRONIX	0xc2 +#define SPI_FLASH_CFI_MFR_WINBOND	0xef + +/* Erase commands */ +#define CMD_ERASE_4K			0x20 +#define CMD_ERASE_32K			0x52 +#define CMD_ERASE_CHIP			0xc7 +#define CMD_ERASE_64K			0xd8 + +/* Write commands */ +#define CMD_WRITE_STATUS		0x01 +#define CMD_PAGE_PROGRAM		0x02 +#define CMD_WRITE_DISABLE		0x04 +#define CMD_READ_STATUS			0x05 +#define CMD_QUAD_PAGE_PROGRAM		0x32 +#define CMD_READ_STATUS1		0x35 +#define CMD_WRITE_ENABLE		0x06 +#define CMD_READ_CONFIG			0x35 +#define CMD_FLAG_STATUS			0x70 + +/* Read commands */ +#define CMD_READ_ARRAY_SLOW		0x03 +#define CMD_READ_ARRAY_FAST		0x0b +#define CMD_READ_DUAL_OUTPUT_FAST	0x3b +#define CMD_READ_DUAL_IO_FAST		0xbb +#define CMD_READ_QUAD_OUTPUT_FAST	0x6b +#define CMD_READ_QUAD_IO_FAST		0xeb +#define CMD_READ_ID			0x9f + +/* Bank addr access commands */ +#ifdef CONFIG_SPI_FLASH_BAR +# define CMD_BANKADDR_BRWR		0x17 +# define CMD_BANKADDR_BRRD		0x16 +# define CMD_EXTNADDR_WREAR		0xC5 +# define CMD_EXTNADDR_RDEAR		0xC8 +#endif + +/* Common status */ +#define STATUS_WIP			(1 << 0) +#define STATUS_QEB_WINSPAN		(1 << 1) +#define STATUS_QEB_MXIC			(1 << 6) +#define STATUS_PEC			(1 << 7) + +/* Flash timeout values */ +#define SPI_FLASH_PROG_TIMEOUT		(2 * CONFIG_SYS_HZ) +#define SPI_FLASH_PAGE_ERASE_TIMEOUT	(5 * CONFIG_SYS_HZ) +#define SPI_FLASH_SECTOR_ERASE_TIMEOUT	(10 * CONFIG_SYS_HZ) + +/* SST specific */ +#ifdef CONFIG_SPI_FLASH_SST +# define SST_WP			0x01	/* Supports AAI word program */ +# define CMD_SST_BP		0x02    /* Byte Program */ +# define CMD_SST_AAI_WP		0xAD	/* Auto Address Incr Word Program */ + +int sst_write_wp(struct spi_flash *flash, u32 offset, size_t len, +		const void *buf); +#endif + +/* Send a single-byte command to the device and read the response */ +int spi_flash_cmd(struct spi_slave *spi, u8 cmd, void *response, size_t len); + +/* + * Send a multi-byte command to the device and read the response. Used + * for flash array reads, etc. + */ +int spi_flash_cmd_read(struct spi_slave *spi, const u8 *cmd, +		size_t cmd_len, void *data, size_t data_len); + +/* + * Send a multi-byte command to the device followed by (optional) + * data. Used for programming the flash array, etc. + */ +int spi_flash_cmd_write(struct spi_slave *spi, const u8 *cmd, size_t cmd_len, +		const void *data, size_t data_len); + + +/* Flash erase(sectors) operation, support all possible erase commands */ +int spi_flash_cmd_erase_ops(struct spi_flash *flash, u32 offset, size_t len); + +/* Read the status register */ +int spi_flash_cmd_read_status(struct spi_flash *flash, u8 *rs); + +/* Program the status register */ +int spi_flash_cmd_write_status(struct spi_flash *flash, u8 ws); + +/* Read the config register */ +int spi_flash_cmd_read_config(struct spi_flash *flash, u8 *rc); + +/* Program the config register */ +int spi_flash_cmd_write_config(struct spi_flash *flash, u8 wc); + +/* Enable writing on the SPI flash */ +static inline int spi_flash_cmd_write_enable(struct spi_flash *flash) +{ +	return spi_flash_cmd(flash->spi, CMD_WRITE_ENABLE, NULL, 0); +} + +/* Disable writing on the SPI flash */ +static inline int spi_flash_cmd_write_disable(struct spi_flash *flash) +{ +	return spi_flash_cmd(flash->spi, CMD_WRITE_DISABLE, NULL, 0); +} + +/* + * Send the read status command to the device and wait for the wip + * (write-in-progress) bit to clear itself. + */ +int spi_flash_cmd_wait_ready(struct spi_flash *flash, unsigned long timeout); + +/* + * Used for spi_flash write operation + * - SPI claim + * - spi_flash_cmd_write_enable + * - spi_flash_cmd_write + * - spi_flash_cmd_wait_ready + * - SPI release + */ +int spi_flash_write_common(struct spi_flash *flash, const u8 *cmd, +		size_t cmd_len, const void *buf, size_t buf_len); + +/* + * Flash write operation, support all possible write commands. + * Write the requested data out breaking it up into multiple write + * commands as needed per the write size. + */ +int spi_flash_cmd_write_ops(struct spi_flash *flash, u32 offset, +		size_t len, const void *buf); + +/* + * Same as spi_flash_cmd_read() except it also claims/releases the SPI + * bus. Used as common part of the ->read() operation. + */ +int spi_flash_read_common(struct spi_flash *flash, const u8 *cmd, +		size_t cmd_len, void *data, size_t data_len); + +/* Flash read operation, support all possible read commands */ +int spi_flash_cmd_read_ops(struct spi_flash *flash, u32 offset, +		size_t len, void *data); + +#endif /* _SF_INTERNAL_H_ */ diff --git a/roms/u-boot/drivers/mtd/spi/sf_ops.c b/roms/u-boot/drivers/mtd/spi/sf_ops.c new file mode 100644 index 00000000..ef91b924 --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/sf_ops.c @@ -0,0 +1,518 @@ +/* + * SPI flash operations + * + * Copyright (C) 2008 Atmel Corporation + * Copyright (C) 2010 Reinhard Meyer, EMK Elektronik + * Copyright (C) 2013 Jagannadha Sutradharudu Teki, Xilinx Inc. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <errno.h> +#include <malloc.h> +#include <spi.h> +#include <spi_flash.h> +#include <watchdog.h> + +#include "sf_internal.h" + +static void spi_flash_addr(u32 addr, u8 *cmd) +{ +	/* cmd[0] is actual command */ +	cmd[1] = addr >> 16; +	cmd[2] = addr >> 8; +	cmd[3] = addr >> 0; +} + +int spi_flash_cmd_read_status(struct spi_flash *flash, u8 *rs) +{ +	int ret; +	u8 cmd; + +	cmd = CMD_READ_STATUS; +	ret = spi_flash_read_common(flash, &cmd, 1, rs, 1); +	if (ret < 0) { +		debug("SF: fail to read status register\n"); +		return ret; +	} + +	return 0; +} + +int spi_flash_cmd_write_status(struct spi_flash *flash, u8 ws) +{ +	u8 cmd; +	int ret; + +	cmd = CMD_WRITE_STATUS; +	ret = spi_flash_write_common(flash, &cmd, 1, &ws, 1); +	if (ret < 0) { +		debug("SF: fail to write status register\n"); +		return ret; +	} + +	return 0; +} + +#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) +int spi_flash_cmd_read_config(struct spi_flash *flash, u8 *rc) +{ +	int ret; +	u8 cmd; + +	cmd = CMD_READ_CONFIG; +	ret = spi_flash_read_common(flash, &cmd, 1, rc, 1); +	if (ret < 0) { +		debug("SF: fail to read config register\n"); +		return ret; +	} + +	return 0; +} + +int spi_flash_cmd_write_config(struct spi_flash *flash, u8 wc) +{ +	u8 data[2]; +	u8 cmd; +	int ret; + +	ret = spi_flash_cmd_read_status(flash, &data[0]); +	if (ret < 0) +		return ret; + +	cmd = CMD_WRITE_STATUS; +	data[1] = wc; +	ret = spi_flash_write_common(flash, &cmd, 1, &data, 2); +	if (ret) { +		debug("SF: fail to write config register\n"); +		return ret; +	} + +	return 0; +} +#endif + +#ifdef CONFIG_SPI_FLASH_BAR +static int spi_flash_cmd_bankaddr_write(struct spi_flash *flash, u8 bank_sel) +{ +	u8 cmd; +	int ret; + +	if (flash->bank_curr == bank_sel) { +		debug("SF: not require to enable bank%d\n", bank_sel); +		return 0; +	} + +	cmd = flash->bank_write_cmd; +	ret = spi_flash_write_common(flash, &cmd, 1, &bank_sel, 1); +	if (ret < 0) { +		debug("SF: fail to write bank register\n"); +		return ret; +	} +	flash->bank_curr = bank_sel; + +	return 0; +} + +static int spi_flash_bank(struct spi_flash *flash, u32 offset) +{ +	u8 bank_sel; +	int ret; + +	bank_sel = offset / (SPI_FLASH_16MB_BOUN << flash->shift); + +	ret = spi_flash_cmd_bankaddr_write(flash, bank_sel); +	if (ret) { +		debug("SF: fail to set bank%d\n", bank_sel); +		return ret; +	} + +	return bank_sel; +} +#endif + +#ifdef CONFIG_SF_DUAL_FLASH +static void spi_flash_dual_flash(struct spi_flash *flash, u32 *addr) +{ +	switch (flash->dual_flash) { +	case SF_DUAL_STACKED_FLASH: +		if (*addr >= (flash->size >> 1)) { +			*addr -= flash->size >> 1; +			flash->spi->flags |= SPI_XFER_U_PAGE; +		} else { +			flash->spi->flags &= ~SPI_XFER_U_PAGE; +		} +		break; +	case SF_DUAL_PARALLEL_FLASH: +		*addr >>= flash->shift; +		break; +	default: +		debug("SF: Unsupported dual_flash=%d\n", flash->dual_flash); +		break; +	} +} +#endif + +int spi_flash_cmd_wait_ready(struct spi_flash *flash, unsigned long timeout) +{ +	struct spi_slave *spi = flash->spi; +	unsigned long timebase; +	unsigned long flags = SPI_XFER_BEGIN; +	int ret; +	u8 status; +	u8 check_status = 0x0; +	u8 poll_bit = STATUS_WIP; +	u8 cmd = flash->poll_cmd; + +	if (cmd == CMD_FLAG_STATUS) { +		poll_bit = STATUS_PEC; +		check_status = poll_bit; +	} + +#ifdef CONFIG_SF_DUAL_FLASH +	if (spi->flags & SPI_XFER_U_PAGE) +		flags |= SPI_XFER_U_PAGE; +#endif +	ret = spi_xfer(spi, 8, &cmd, NULL, flags); +	if (ret) { +		debug("SF: fail to read %s status register\n", +		      cmd == CMD_READ_STATUS ? "read" : "flag"); +		return ret; +	} + +	timebase = get_timer(0); +	do { +		WATCHDOG_RESET(); + +		ret = spi_xfer(spi, 8, NULL, &status, 0); +		if (ret) +			return -1; + +		if ((status & poll_bit) == check_status) +			break; + +	} while (get_timer(timebase) < timeout); + +	spi_xfer(spi, 0, NULL, NULL, SPI_XFER_END); + +	if ((status & poll_bit) == check_status) +		return 0; + +	/* Timed out */ +	debug("SF: time out!\n"); +	return -1; +} + +int spi_flash_write_common(struct spi_flash *flash, const u8 *cmd, +		size_t cmd_len, const void *buf, size_t buf_len) +{ +	struct spi_slave *spi = flash->spi; +	unsigned long timeout = SPI_FLASH_PROG_TIMEOUT; +	int ret; + +	if (buf == NULL) +		timeout = SPI_FLASH_PAGE_ERASE_TIMEOUT; + +	ret = spi_claim_bus(flash->spi); +	if (ret) { +		debug("SF: unable to claim SPI bus\n"); +		return ret; +	} + +	ret = spi_flash_cmd_write_enable(flash); +	if (ret < 0) { +		debug("SF: enabling write failed\n"); +		return ret; +	} + +	ret = spi_flash_cmd_write(spi, cmd, cmd_len, buf, buf_len); +	if (ret < 0) { +		debug("SF: write cmd failed\n"); +		return ret; +	} + +	ret = spi_flash_cmd_wait_ready(flash, timeout); +	if (ret < 0) { +		debug("SF: write %s timed out\n", +		      timeout == SPI_FLASH_PROG_TIMEOUT ? +			"program" : "page erase"); +		return ret; +	} + +	spi_release_bus(spi); + +	return ret; +} + +int spi_flash_cmd_erase_ops(struct spi_flash *flash, u32 offset, size_t len) +{ +	u32 erase_size, erase_addr; +	u8 cmd[SPI_FLASH_CMD_LEN]; +	int ret = -1; + +	erase_size = flash->erase_size; +	if (offset % erase_size || len % erase_size) { +		debug("SF: Erase offset/length not multiple of erase size\n"); +		return -1; +	} + +	cmd[0] = flash->erase_cmd; +	while (len) { +		erase_addr = offset; + +#ifdef CONFIG_SF_DUAL_FLASH +		if (flash->dual_flash > SF_SINGLE_FLASH) +			spi_flash_dual_flash(flash, &erase_addr); +#endif +#ifdef CONFIG_SPI_FLASH_BAR +		ret = spi_flash_bank(flash, erase_addr); +		if (ret < 0) +			return ret; +#endif +		spi_flash_addr(erase_addr, cmd); + +		debug("SF: erase %2x %2x %2x %2x (%x)\n", cmd[0], cmd[1], +		      cmd[2], cmd[3], erase_addr); + +		ret = spi_flash_write_common(flash, cmd, sizeof(cmd), NULL, 0); +		if (ret < 0) { +			debug("SF: erase failed\n"); +			break; +		} + +		offset += erase_size; +		len -= erase_size; +	} + +	return ret; +} + +int spi_flash_cmd_write_ops(struct spi_flash *flash, u32 offset, +		size_t len, const void *buf) +{ +	unsigned long byte_addr, page_size; +	u32 write_addr; +	size_t chunk_len, actual; +	u8 cmd[SPI_FLASH_CMD_LEN]; +	int ret = -1; + +	page_size = flash->page_size; + +	cmd[0] = flash->write_cmd; +	for (actual = 0; actual < len; actual += chunk_len) { +		write_addr = offset; + +#ifdef CONFIG_SF_DUAL_FLASH +		if (flash->dual_flash > SF_SINGLE_FLASH) +			spi_flash_dual_flash(flash, &write_addr); +#endif +#ifdef CONFIG_SPI_FLASH_BAR +		ret = spi_flash_bank(flash, write_addr); +		if (ret < 0) +			return ret; +#endif +		byte_addr = offset % page_size; +		chunk_len = min(len - actual, page_size - byte_addr); + +		if (flash->spi->max_write_size) +			chunk_len = min(chunk_len, flash->spi->max_write_size); + +		spi_flash_addr(write_addr, cmd); + +		debug("SF: 0x%p => cmd = { 0x%02x 0x%02x%02x%02x } chunk_len = %zu\n", +		      buf + actual, cmd[0], cmd[1], cmd[2], cmd[3], chunk_len); + +		ret = spi_flash_write_common(flash, cmd, sizeof(cmd), +					buf + actual, chunk_len); +		if (ret < 0) { +			debug("SF: write failed\n"); +			break; +		} + +		offset += chunk_len; +	} + +	return ret; +} + +int spi_flash_read_common(struct spi_flash *flash, const u8 *cmd, +		size_t cmd_len, void *data, size_t data_len) +{ +	struct spi_slave *spi = flash->spi; +	int ret; + +	ret = spi_claim_bus(flash->spi); +	if (ret) { +		debug("SF: unable to claim SPI bus\n"); +		return ret; +	} + +	ret = spi_flash_cmd_read(spi, cmd, cmd_len, data, data_len); +	if (ret < 0) { +		debug("SF: read cmd failed\n"); +		return ret; +	} + +	spi_release_bus(spi); + +	return ret; +} + +int spi_flash_cmd_read_ops(struct spi_flash *flash, u32 offset, +		size_t len, void *data) +{ +	u8 *cmd, cmdsz; +	u32 remain_len, read_len, read_addr; +	int bank_sel = 0; +	int ret = -1; + +	/* Handle memory-mapped SPI */ +	if (flash->memory_map) { +		ret = spi_claim_bus(flash->spi); +		if (ret) { +			debug("SF: unable to claim SPI bus\n"); +			return ret; +		} +		spi_xfer(flash->spi, 0, NULL, NULL, SPI_XFER_MMAP); +		memcpy(data, flash->memory_map + offset, len); +		spi_xfer(flash->spi, 0, NULL, NULL, SPI_XFER_MMAP_END); +		spi_release_bus(flash->spi); +		return 0; +	} + +	cmdsz = SPI_FLASH_CMD_LEN + flash->dummy_byte; +	cmd = calloc(1, cmdsz); +	if (!cmd) { +		debug("SF: Failed to allocate cmd\n"); +		return -ENOMEM; +	} + +	cmd[0] = flash->read_cmd; +	while (len) { +		read_addr = offset; + +#ifdef CONFIG_SF_DUAL_FLASH +		if (flash->dual_flash > SF_SINGLE_FLASH) +			spi_flash_dual_flash(flash, &read_addr); +#endif +#ifdef CONFIG_SPI_FLASH_BAR +		bank_sel = spi_flash_bank(flash, read_addr); +		if (bank_sel < 0) +			return ret; +#endif +		remain_len = ((SPI_FLASH_16MB_BOUN << flash->shift) * +				(bank_sel + 1)) - offset; +		if (len < remain_len) +			read_len = len; +		else +			read_len = remain_len; + +		spi_flash_addr(read_addr, cmd); + +		ret = spi_flash_read_common(flash, cmd, cmdsz, data, read_len); +		if (ret < 0) { +			debug("SF: read failed\n"); +			break; +		} + +		offset += read_len; +		len -= read_len; +		data += read_len; +	} + +	return ret; +} + +#ifdef CONFIG_SPI_FLASH_SST +static int sst_byte_write(struct spi_flash *flash, u32 offset, const void *buf) +{ +	int ret; +	u8 cmd[4] = { +		CMD_SST_BP, +		offset >> 16, +		offset >> 8, +		offset, +	}; + +	debug("BP[%02x]: 0x%p => cmd = { 0x%02x 0x%06x }\n", +	      spi_w8r8(flash->spi, CMD_READ_STATUS), buf, cmd[0], offset); + +	ret = spi_flash_cmd_write_enable(flash); +	if (ret) +		return ret; + +	ret = spi_flash_cmd_write(flash->spi, cmd, sizeof(cmd), buf, 1); +	if (ret) +		return ret; + +	return spi_flash_cmd_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT); +} + +int sst_write_wp(struct spi_flash *flash, u32 offset, size_t len, +		const void *buf) +{ +	size_t actual, cmd_len; +	int ret; +	u8 cmd[4]; + +	ret = spi_claim_bus(flash->spi); +	if (ret) { +		debug("SF: Unable to claim SPI bus\n"); +		return ret; +	} + +	/* If the data is not word aligned, write out leading single byte */ +	actual = offset % 2; +	if (actual) { +		ret = sst_byte_write(flash, offset, buf); +		if (ret) +			goto done; +	} +	offset += actual; + +	ret = spi_flash_cmd_write_enable(flash); +	if (ret) +		goto done; + +	cmd_len = 4; +	cmd[0] = CMD_SST_AAI_WP; +	cmd[1] = offset >> 16; +	cmd[2] = offset >> 8; +	cmd[3] = offset; + +	for (; actual < len - 1; actual += 2) { +		debug("WP[%02x]: 0x%p => cmd = { 0x%02x 0x%06x }\n", +		      spi_w8r8(flash->spi, CMD_READ_STATUS), buf + actual, +		      cmd[0], offset); + +		ret = spi_flash_cmd_write(flash->spi, cmd, cmd_len, +					buf + actual, 2); +		if (ret) { +			debug("SF: sst word program failed\n"); +			break; +		} + +		ret = spi_flash_cmd_wait_ready(flash, SPI_FLASH_PROG_TIMEOUT); +		if (ret) +			break; + +		cmd_len = 1; +		offset += 2; +	} + +	if (!ret) +		ret = spi_flash_cmd_write_disable(flash); + +	/* If there is a single trailing byte, write it out */ +	if (!ret && actual != len) +		ret = sst_byte_write(flash, offset, buf + actual); + + done: +	debug("SF: sst: program %s %zu bytes @ 0x%zx\n", +	      ret ? "failure" : "success", len, offset - actual); + +	spi_release_bus(flash->spi); +	return ret; +} +#endif diff --git a/roms/u-boot/drivers/mtd/spi/sf_params.c b/roms/u-boot/drivers/mtd/spi/sf_params.c new file mode 100644 index 00000000..eb372b75 --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/sf_params.c @@ -0,0 +1,131 @@ +/* + * SPI flash Params table + * + * Copyright (C) 2013 Jagannadha Sutradharudu Teki, Xilinx Inc. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <spi_flash.h> + +#include "sf_internal.h" + +/* SPI/QSPI flash device params structure */ +const struct spi_flash_params spi_flash_params_table[] = { +#ifdef CONFIG_SPI_FLASH_ATMEL		/* ATMEL */ +	{"AT45DB011D",	   0x1f2200, 0x0,	64 * 1024,     4,	0,		    SECT_4K}, +	{"AT45DB021D",	   0x1f2300, 0x0,	64 * 1024,     8,	0,		    SECT_4K}, +	{"AT45DB041D",	   0x1f2400, 0x0,	64 * 1024,     8,	0,		    SECT_4K}, +	{"AT45DB081D",	   0x1f2500, 0x0,	64 * 1024,    16,	0,		    SECT_4K}, +	{"AT45DB161D",	   0x1f2600, 0x0,	64 * 1024,    32,	0,		    SECT_4K}, +	{"AT45DB321D",	   0x1f2700, 0x0,	64 * 1024,    64,	0,		    SECT_4K}, +	{"AT45DB641D",	   0x1f2800, 0x0,	64 * 1024,   128,	0,		    SECT_4K}, +	{"AT25DF321",      0x1f4701, 0x0,	64 * 1024,    64,	0,		    SECT_4K}, +#endif +#ifdef CONFIG_SPI_FLASH_EON		/* EON */ +	{"EN25Q32B",	   0x1c3016, 0x0,	64 * 1024,    64,	0,			  0}, +	{"EN25Q64",	   0x1c3017, 0x0,	64 * 1024,   128,	0,		    SECT_4K}, +	{"EN25Q128B",	   0x1c3018, 0x0,       64 * 1024,   256,	0,			  0}, +	{"EN25S64",	   0x1c3817, 0x0,	64 * 1024,   128,	0,			  0}, +#endif +#ifdef CONFIG_SPI_FLASH_GIGADEVICE	/* GIGADEVICE */ +	{"GD25Q64B",	   0xc84017, 0x0,	64 * 1024,   128,	0,		    SECT_4K}, +	{"GD25LQ32",	   0xc86016, 0x0,	64 * 1024,    64,	0,		    SECT_4K}, +#endif +#ifdef CONFIG_SPI_FLASH_MACRONIX	/* MACRONIX */ +	{"MX25L2006E",	   0xc22012, 0x0,	64 * 1024,     4,	0,			  0}, +	{"MX25L4005",	   0xc22013, 0x0,	64 * 1024,     8,	0,			  0}, +	{"MX25L8005",	   0xc22014, 0x0,	64 * 1024,    16,	0,			  0}, +	{"MX25L1605D",	   0xc22015, 0x0,	64 * 1024,    32,	0,			  0}, +	{"MX25L3205D",	   0xc22016, 0x0,	64 * 1024,    64,	0,			  0}, +	{"MX25L6405D",	   0xc22017, 0x0,	64 * 1024,   128,	0,			  0}, +	{"MX25L12805",	   0xc22018, 0x0,	64 * 1024,   256, RD_FULL,		     WR_QPP}, +	{"MX25L25635F",	   0xc22019, 0x0,	64 * 1024,   512, RD_FULL,		     WR_QPP}, +	{"MX25L51235F",	   0xc2201a, 0x0,	64 * 1024,  1024, RD_FULL,		     WR_QPP}, +	{"MX25L12855E",	   0xc22618, 0x0,	64 * 1024,   256, RD_FULL,		     WR_QPP}, +#endif +#ifdef CONFIG_SPI_FLASH_SPANSION	/* SPANSION */ +	{"S25FL008A",	   0x010213, 0x0,	64 * 1024,    16,	0,			  0}, +	{"S25FL016A",	   0x010214, 0x0,	64 * 1024,    32,	0,			  0}, +	{"S25FL032A",	   0x010215, 0x0,	64 * 1024,    64,	0,			  0}, +	{"S25FL064A",	   0x010216, 0x0,	64 * 1024,   128,	0,			  0}, +	{"S25FL128P_256K", 0x012018, 0x0300,   256 * 1024,    64, RD_FULL,		     WR_QPP}, +	{"S25FL128P_64K",  0x012018, 0x0301,    64 * 1024,   256, RD_FULL,		     WR_QPP}, +	{"S25FL032P",	   0x010215, 0x4d00,    64 * 1024,    64, RD_FULL,		     WR_QPP}, +	{"S25FL064P",	   0x010216, 0x4d00,    64 * 1024,   128, RD_FULL,		     WR_QPP}, +	{"S25FL128S_256K", 0x012018, 0x4d00,   256 * 1024,    64, RD_FULL,		     WR_QPP}, +	{"S25FL128S_64K",  0x012018, 0x4d01,    64 * 1024,   256, RD_FULL,		     WR_QPP}, +	{"S25FL256S_256K", 0x010219, 0x4d00,   256 * 1024,   128, RD_FULL,		     WR_QPP}, +	{"S25FL256S_64K",  0x010219, 0x4d01,	64 * 1024,   512, RD_FULL,		     WR_QPP}, +	{"S25FL512S_256K", 0x010220, 0x4d00,   256 * 1024,   256, RD_FULL,		     WR_QPP}, +	{"S25FL512S_64K",  0x010220, 0x4d01,    64 * 1024,  1024, RD_FULL,		     WR_QPP}, +#endif +#ifdef CONFIG_SPI_FLASH_STMICRO		/* STMICRO */ +	{"M25P10",	   0x202011, 0x0,	32 * 1024,     4,	0,			  0}, +	{"M25P20",	   0x202012, 0x0,       64 * 1024,     4,	0,			  0}, +	{"M25P40",	   0x202013, 0x0,       64 * 1024,     8,	0,			  0}, +	{"M25P80",	   0x202014, 0x0,       64 * 1024,    16,	0,			  0}, +	{"M25P16",	   0x202015, 0x0,       64 * 1024,    32,	0,			  0}, +	{"M25P32",	   0x202016, 0x0,       64 * 1024,    64,	0,			  0}, +	{"M25P64",	   0x202017, 0x0,       64 * 1024,   128,	0,			  0}, +	{"M25P128",	   0x202018, 0x0,      256 * 1024,    64,	0,			  0}, +	{"N25Q32",	   0x20ba16, 0x0,       64 * 1024,    64, RD_FULL,	   WR_QPP | SECT_4K}, +	{"N25Q32A",	   0x20bb16, 0x0,       64 * 1024,    64, RD_FULL,	   WR_QPP | SECT_4K}, +	{"N25Q64",	   0x20ba17, 0x0,       64 * 1024,   128, RD_FULL,	   WR_QPP | SECT_4K}, +	{"N25Q64A",	   0x20bb17, 0x0,       64 * 1024,   128, RD_FULL,	   WR_QPP | SECT_4K}, +	{"N25Q128",	   0x20ba18, 0x0,       64 * 1024,   256, RD_FULL,		     WR_QPP}, +	{"N25Q128A",	   0x20bb18, 0x0,       64 * 1024,   256, RD_FULL,		     WR_QPP}, +	{"N25Q256",	   0x20ba19, 0x0,       64 * 1024,   512, RD_FULL,	   WR_QPP | SECT_4K}, +	{"N25Q256A",	   0x20bb19, 0x0,       64 * 1024,   512, RD_FULL,	   WR_QPP | SECT_4K}, +	{"N25Q512",	   0x20ba20, 0x0,       64 * 1024,  1024, RD_FULL, WR_QPP | E_FSR | SECT_4K}, +	{"N25Q512A",	   0x20bb20, 0x0,       64 * 1024,  1024, RD_FULL, WR_QPP | E_FSR | SECT_4K}, +	{"N25Q1024",	   0x20ba21, 0x0,       64 * 1024,  2048, RD_FULL, WR_QPP | E_FSR | SECT_4K}, +	{"N25Q1024A",	   0x20bb21, 0x0,       64 * 1024,  2048, RD_FULL, WR_QPP | E_FSR | SECT_4K}, +#endif +#ifdef CONFIG_SPI_FLASH_SST		/* SST */ +	{"SST25VF040B",	   0xbf258d, 0x0,	64 * 1024,     8,	0,          SECT_4K | SST_WP}, +	{"SST25VF080B",	   0xbf258e, 0x0,	64 * 1024,    16,	0,	    SECT_4K | SST_WP}, +	{"SST25VF016B",	   0xbf2541, 0x0,	64 * 1024,    32,	0,	    SECT_4K | SST_WP}, +	{"SST25VF032B",	   0xbf254a, 0x0,	64 * 1024,    64,	0,	    SECT_4K | SST_WP}, +	{"SST25VF064C",	   0xbf254b, 0x0,	64 * 1024,   128,	0,		     SECT_4K}, +	{"SST25WF512",	   0xbf2501, 0x0,	64 * 1024,     1,	0,	    SECT_4K | SST_WP}, +	{"SST25WF010",	   0xbf2502, 0x0,	64 * 1024,     2,       0,          SECT_4K | SST_WP}, +	{"SST25WF020",	   0xbf2503, 0x0,	64 * 1024,     4,       0,	    SECT_4K | SST_WP}, +	{"SST25WF040",	   0xbf2504, 0x0,	64 * 1024,     8,       0,	    SECT_4K | SST_WP}, +	{"SST25WF080",	   0xbf2505, 0x0,	64 * 1024,    16,       0,	    SECT_4K | SST_WP}, +#endif +#ifdef CONFIG_SPI_FLASH_WINBOND		/* WINBOND */ +	{"W25P80",	   0xef2014, 0x0,	64 * 1024,    16,	0,		           0}, +	{"W25P16",	   0xef2015, 0x0,	64 * 1024,    32,	0,		           0}, +	{"W25P32",	   0xef2016, 0x0,	64 * 1024,    64,	0,		           0}, +	{"W25X40",	   0xef3013, 0x0,	64 * 1024,     8,	0,		     SECT_4K}, +	{"W25X16",	   0xef3015, 0x0,	64 * 1024,    32,	0,		     SECT_4K}, +	{"W25X32",	   0xef3016, 0x0,	64 * 1024,    64,	0,		     SECT_4K}, +	{"W25X64",	   0xef3017, 0x0,	64 * 1024,   128,	0,		     SECT_4K}, +	{"W25Q80BL",	   0xef4014, 0x0,	64 * 1024,    16, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q16CL",	   0xef4015, 0x0,	64 * 1024,    32, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q32BV",	   0xef4016, 0x0,	64 * 1024,    64, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q64CV",	   0xef4017, 0x0,	64 * 1024,   128, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q128BV",	   0xef4018, 0x0,	64 * 1024,   256, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q256",	   0xef4019, 0x0,	64 * 1024,   512, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q80BW",	   0xef5014, 0x0,	64 * 1024,    16, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q16DW",	   0xef6015, 0x0,	64 * 1024,    32, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q32DW",	   0xef6016, 0x0,	64 * 1024,    64, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q64DW",	   0xef6017, 0x0,	64 * 1024,   128, RD_FULL,	    WR_QPP | SECT_4K}, +	{"W25Q128FW",	   0xef6018, 0x0,	64 * 1024,   256, RD_FULL,	    WR_QPP | SECT_4K}, +#endif +	/* +	 * Note: +	 * Below paired flash devices has similar spi_flash params. +	 * (S25FL129P_64K, S25FL128S_64K) +	 * (W25Q80BL, W25Q80BV) +	 * (W25Q16CL, W25Q16DV) +	 * (W25Q32BV, W25Q32FV_SPI) +	 * (W25Q64CV, W25Q64FV_SPI) +	 * (W25Q128BV, W25Q128FV_SPI) +	 * (W25Q32DW, W25Q32FV_QPI) +	 * (W25Q64DW, W25Q64FV_QPI) +	 * (W25Q128FW, W25Q128FV_QPI) +	 */ +}; diff --git a/roms/u-boot/drivers/mtd/spi/sf_probe.c b/roms/u-boot/drivers/mtd/spi/sf_probe.c new file mode 100644 index 00000000..0a46fe38 --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/sf_probe.c @@ -0,0 +1,391 @@ +/* + * SPI flash probing + * + * Copyright (C) 2008 Atmel Corporation + * Copyright (C) 2010 Reinhard Meyer, EMK Elektronik + * Copyright (C) 2013 Jagannadha Sutradharudu Teki, Xilinx Inc. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <fdtdec.h> +#include <malloc.h> +#include <spi.h> +#include <spi_flash.h> +#include <asm/io.h> + +#include "sf_internal.h" + +DECLARE_GLOBAL_DATA_PTR; + +/* Read commands array */ +static u8 spi_read_cmds_array[] = { +	CMD_READ_ARRAY_SLOW, +	CMD_READ_DUAL_OUTPUT_FAST, +	CMD_READ_DUAL_IO_FAST, +	CMD_READ_QUAD_OUTPUT_FAST, +	CMD_READ_QUAD_IO_FAST, +}; + +#ifdef CONFIG_SPI_FLASH_MACRONIX +static int spi_flash_set_qeb_mxic(struct spi_flash *flash) +{ +	u8 qeb_status; +	int ret; + +	ret = spi_flash_cmd_read_status(flash, &qeb_status); +	if (ret < 0) +		return ret; + +	if (qeb_status & STATUS_QEB_MXIC) { +		debug("SF: mxic: QEB is already set\n"); +	} else { +		ret = spi_flash_cmd_write_status(flash, STATUS_QEB_MXIC); +		if (ret < 0) +			return ret; +	} + +	return ret; +} +#endif + +#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) +static int spi_flash_set_qeb_winspan(struct spi_flash *flash) +{ +	u8 qeb_status; +	int ret; + +	ret = spi_flash_cmd_read_config(flash, &qeb_status); +	if (ret < 0) +		return ret; + +	if (qeb_status & STATUS_QEB_WINSPAN) { +		debug("SF: winspan: QEB is already set\n"); +	} else { +		ret = spi_flash_cmd_write_config(flash, STATUS_QEB_WINSPAN); +		if (ret < 0) +			return ret; +	} + +	return ret; +} +#endif + +static int spi_flash_set_qeb(struct spi_flash *flash, u8 idcode0) +{ +	switch (idcode0) { +#ifdef CONFIG_SPI_FLASH_MACRONIX +	case SPI_FLASH_CFI_MFR_MACRONIX: +		return spi_flash_set_qeb_mxic(flash); +#endif +#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) +	case SPI_FLASH_CFI_MFR_SPANSION: +	case SPI_FLASH_CFI_MFR_WINBOND: +		return spi_flash_set_qeb_winspan(flash); +#endif +#ifdef CONFIG_SPI_FLASH_STMICRO +	case SPI_FLASH_CFI_MFR_STMICRO: +		debug("SF: QEB is volatile for %02x flash\n", idcode0); +		return 0; +#endif +	default: +		printf("SF: Need set QEB func for %02x flash\n", idcode0); +		return -1; +	} +} + +static struct spi_flash *spi_flash_validate_params(struct spi_slave *spi, +		u8 *idcode) +{ +	const struct spi_flash_params *params; +	struct spi_flash *flash; +	u8 cmd; +	u16 jedec = idcode[1] << 8 | idcode[2]; +	u16 ext_jedec = idcode[3] << 8 | idcode[4]; + +	params = spi_flash_params_table; +	for (; params->name != NULL; params++) { +		if ((params->jedec >> 16) == idcode[0]) { +			if ((params->jedec & 0xFFFF) == jedec) { +				if (params->ext_jedec == 0) +					break; +				else if (params->ext_jedec == ext_jedec) +					break; +			} +		} +	} + +	if (!params->name) { +		printf("SF: Unsupported flash IDs: "); +		printf("manuf %02x, jedec %04x, ext_jedec %04x\n", +		       idcode[0], jedec, ext_jedec); +		return NULL; +	} + +	flash = calloc(1, sizeof(*flash)); +	if (!flash) { +		debug("SF: Failed to allocate spi_flash\n"); +		return NULL; +	} + +	/* Assign spi data */ +	flash->spi = spi; +	flash->name = params->name; +	flash->memory_map = spi->memory_map; +	flash->dual_flash = flash->spi->option; + +	/* Assign spi_flash ops */ +	flash->write = spi_flash_cmd_write_ops; +#ifdef CONFIG_SPI_FLASH_SST +	if (params->flags & SST_WP) +		flash->write = sst_write_wp; +#endif +	flash->erase = spi_flash_cmd_erase_ops; +	flash->read = spi_flash_cmd_read_ops; + +	/* Compute the flash size */ +	flash->shift = (flash->dual_flash & SF_DUAL_PARALLEL_FLASH) ? 1 : 0; +	/* +	 * The Spansion S25FL032P and S25FL064P have 256b pages, yet use the +	 * 0x4d00 Extended JEDEC code. The rest of the Spansion flashes with +	 * the 0x4d00 Extended JEDEC code have 512b pages. All of the others +	 * have 256b pages. +	 */ +	if (ext_jedec == 0x4d00) { +		if ((jedec == 0x0215) || (jedec == 0x216)) +			flash->page_size = 256; +		else +			flash->page_size = 512; +	} else { +		flash->page_size = 256; +	} +	flash->page_size <<= flash->shift; +	flash->sector_size = params->sector_size << flash->shift; +	flash->size = flash->sector_size * params->nr_sectors << flash->shift; +#ifdef CONFIG_SF_DUAL_FLASH +	if (flash->dual_flash & SF_DUAL_STACKED_FLASH) +		flash->size <<= 1; +#endif + +	/* Compute erase sector and command */ +	if (params->flags & SECT_4K) { +		flash->erase_cmd = CMD_ERASE_4K; +		flash->erase_size = 4096 << flash->shift; +	} else if (params->flags & SECT_32K) { +		flash->erase_cmd = CMD_ERASE_32K; +		flash->erase_size = 32768 << flash->shift; +	} else { +		flash->erase_cmd = CMD_ERASE_64K; +		flash->erase_size = flash->sector_size; +	} + +	/* Look for the fastest read cmd */ +	cmd = fls(params->e_rd_cmd & flash->spi->op_mode_rx); +	if (cmd) { +		cmd = spi_read_cmds_array[cmd - 1]; +		flash->read_cmd = cmd; +	} else { +		/* Go for default supported read cmd */ +		flash->read_cmd = CMD_READ_ARRAY_FAST; +	} + +	/* Not require to look for fastest only two write cmds yet */ +	if (params->flags & WR_QPP && flash->spi->op_mode_tx & SPI_OPM_TX_QPP) +		flash->write_cmd = CMD_QUAD_PAGE_PROGRAM; +	else +		/* Go for default supported write cmd */ +		flash->write_cmd = CMD_PAGE_PROGRAM; + +	/* Set the quad enable bit - only for quad commands */ +	if ((flash->read_cmd == CMD_READ_QUAD_OUTPUT_FAST) || +	    (flash->read_cmd == CMD_READ_QUAD_IO_FAST) || +	    (flash->write_cmd == CMD_QUAD_PAGE_PROGRAM)) { +		if (spi_flash_set_qeb(flash, idcode[0])) { +			debug("SF: Fail to set QEB for %02x\n", idcode[0]); +			return NULL; +		} +	} + +	/* Read dummy_byte: dummy byte is determined based on the +	 * dummy cycles of a particular command. +	 * Fast commands - dummy_byte = dummy_cycles/8 +	 * I/O commands- dummy_byte = (dummy_cycles * no.of lines)/8 +	 * For I/O commands except cmd[0] everything goes on no.of lines +	 * based on particular command but incase of fast commands except +	 * data all go on single line irrespective of command. +	 */ +	switch (flash->read_cmd) { +	case CMD_READ_QUAD_IO_FAST: +		flash->dummy_byte = 2; +		break; +	case CMD_READ_ARRAY_SLOW: +		flash->dummy_byte = 0; +		break; +	default: +		flash->dummy_byte = 1; +	} + +	/* Poll cmd selection */ +	flash->poll_cmd = CMD_READ_STATUS; +#ifdef CONFIG_SPI_FLASH_STMICRO +	if (params->flags & E_FSR) +		flash->poll_cmd = CMD_FLAG_STATUS; +#endif + +	/* Configure the BAR - discover bank cmds and read current bank */ +#ifdef CONFIG_SPI_FLASH_BAR +	u8 curr_bank = 0; +	if (flash->size > SPI_FLASH_16MB_BOUN) { +		flash->bank_read_cmd = (idcode[0] == 0x01) ? +					CMD_BANKADDR_BRRD : CMD_EXTNADDR_RDEAR; +		flash->bank_write_cmd = (idcode[0] == 0x01) ? +					CMD_BANKADDR_BRWR : CMD_EXTNADDR_WREAR; + +		if (spi_flash_read_common(flash, &flash->bank_read_cmd, 1, +					  &curr_bank, 1)) { +			debug("SF: fail to read bank addr register\n"); +			return NULL; +		} +		flash->bank_curr = curr_bank; +	} else { +		flash->bank_curr = curr_bank; +	} +#endif + +	/* Flash powers up read-only, so clear BP# bits */ +#if defined(CONFIG_SPI_FLASH_ATMEL) || \ +	defined(CONFIG_SPI_FLASH_MACRONIX) || \ +	defined(CONFIG_SPI_FLASH_SST) +		spi_flash_cmd_write_status(flash, 0); +#endif + +	return flash; +} + +#ifdef CONFIG_OF_CONTROL +int spi_flash_decode_fdt(const void *blob, struct spi_flash *flash) +{ +	fdt_addr_t addr; +	fdt_size_t size; +	int node; + +	/* If there is no node, do nothing */ +	node = fdtdec_next_compatible(blob, 0, COMPAT_GENERIC_SPI_FLASH); +	if (node < 0) +		return 0; + +	addr = fdtdec_get_addr_size(blob, node, "memory-map", &size); +	if (addr == FDT_ADDR_T_NONE) { +		debug("%s: Cannot decode address\n", __func__); +		return 0; +	} + +	if (flash->size != size) { +		debug("%s: Memory map must cover entire device\n", __func__); +		return -1; +	} +	flash->memory_map = map_sysmem(addr, size); + +	return 0; +} +#endif /* CONFIG_OF_CONTROL */ + +static struct spi_flash *spi_flash_probe_slave(struct spi_slave *spi) +{ +	struct spi_flash *flash = NULL; +	u8 idcode[5]; +	int ret; + +	/* Setup spi_slave */ +	if (!spi) { +		printf("SF: Failed to set up slave\n"); +		return NULL; +	} + +	/* Claim spi bus */ +	ret = spi_claim_bus(spi); +	if (ret) { +		debug("SF: Failed to claim SPI bus: %d\n", ret); +		goto err_claim_bus; +	} + +	/* Read the ID codes */ +	ret = spi_flash_cmd(spi, CMD_READ_ID, idcode, sizeof(idcode)); +	if (ret) { +		printf("SF: Failed to get idcodes\n"); +		goto err_read_id; +	} + +#ifdef DEBUG +	printf("SF: Got idcodes\n"); +	print_buffer(0, idcode, 1, sizeof(idcode), 0); +#endif + +	/* Validate params from spi_flash_params table */ +	flash = spi_flash_validate_params(spi, idcode); +	if (!flash) +		goto err_read_id; + +#ifdef CONFIG_OF_CONTROL +	if (spi_flash_decode_fdt(gd->fdt_blob, flash)) { +		debug("SF: FDT decode error\n"); +		goto err_read_id; +	} +#endif +#ifndef CONFIG_SPL_BUILD +	printf("SF: Detected %s with page size ", flash->name); +	print_size(flash->page_size, ", erase size "); +	print_size(flash->erase_size, ", total "); +	print_size(flash->size, ""); +	if (flash->memory_map) +		printf(", mapped at %p", flash->memory_map); +	puts("\n"); +#endif +#ifndef CONFIG_SPI_FLASH_BAR +	if (((flash->dual_flash == SF_SINGLE_FLASH) && +	     (flash->size > SPI_FLASH_16MB_BOUN)) || +	     ((flash->dual_flash > SF_SINGLE_FLASH) && +	     (flash->size > SPI_FLASH_16MB_BOUN << 1))) { +		puts("SF: Warning - Only lower 16MiB accessible,"); +		puts(" Full access #define CONFIG_SPI_FLASH_BAR\n"); +	} +#endif + +	/* Release spi bus */ +	spi_release_bus(spi); + +	return flash; + +err_read_id: +	spi_release_bus(spi); +err_claim_bus: +	spi_free_slave(spi); +	return NULL; +} + +struct spi_flash *spi_flash_probe(unsigned int bus, unsigned int cs, +		unsigned int max_hz, unsigned int spi_mode) +{ +	struct spi_slave *spi; + +	spi = spi_setup_slave(bus, cs, max_hz, spi_mode); +	return spi_flash_probe_slave(spi); +} + +#ifdef CONFIG_OF_SPI_FLASH +struct spi_flash *spi_flash_probe_fdt(const void *blob, int slave_node, +				      int spi_node) +{ +	struct spi_slave *spi; + +	spi = spi_setup_slave_fdt(blob, slave_node, spi_node); +	return spi_flash_probe_slave(spi); +} +#endif + +void spi_flash_free(struct spi_flash *flash) +{ +	spi_free_slave(flash->spi); +	free(flash); +} diff --git a/roms/u-boot/drivers/mtd/spi/spi_spl_load.c b/roms/u-boot/drivers/mtd/spi/spi_spl_load.c new file mode 100644 index 00000000..1954b7e8 --- /dev/null +++ b/roms/u-boot/drivers/mtd/spi/spi_spl_load.c @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2011 OMICRON electronics GmbH + * + * based on drivers/mtd/nand/nand_spl_load.c + * + * Copyright (C) 2011 + * Heiko Schocher, DENX Software Engineering, hs@denx.de. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <spi_flash.h> +#include <spl.h> + +#ifdef CONFIG_SPL_OS_BOOT +/* + * Load the kernel, check for a valid header we can parse, and if found load + * the kernel and then device tree. + */ +static int spi_load_image_os(struct spi_flash *flash, +			     struct image_header *header) +{ +	/* Read for a header, parse or error out. */ +	spi_flash_read(flash, CONFIG_SYS_SPI_KERNEL_OFFS, 0x40, +		       (void *)header); + +	if (image_get_magic(header) != IH_MAGIC) +		return -1; + +	spl_parse_image_header(header); + +	spi_flash_read(flash, CONFIG_SYS_SPI_KERNEL_OFFS, +		       spl_image.size, (void *)spl_image.load_addr); + +	/* Read device tree. */ +	spi_flash_read(flash, CONFIG_SYS_SPI_ARGS_OFFS, +		       CONFIG_SYS_SPI_ARGS_SIZE, +		       (void *)CONFIG_SYS_SPL_ARGS_ADDR); + +	return 0; +} +#endif + +/* + * The main entry for SPI booting. It's necessary that SDRAM is already + * configured and available since this code loads the main U-Boot image + * from SPI into SDRAM and starts it from there. + */ +void spl_spi_load_image(void) +{ +	struct spi_flash *flash; +	struct image_header *header; + +	/* +	 * Load U-Boot image from SPI flash into RAM +	 */ + +	flash = spi_flash_probe(CONFIG_SPL_SPI_BUS, CONFIG_SPL_SPI_CS, +				CONFIG_SF_DEFAULT_SPEED, SPI_MODE_3); +	if (!flash) { +		puts("SPI probe failed.\n"); +		hang(); +	} + +	/* use CONFIG_SYS_TEXT_BASE as temporary storage area */ +	header = (struct image_header *)(CONFIG_SYS_TEXT_BASE); + +#ifdef CONFIG_SPL_OS_BOOT +	if (spl_start_uboot() || spi_load_image_os(flash, header)) +#endif +	{ +		/* Load u-boot, mkimage header is 64 bytes. */ +		spi_flash_read(flash, CONFIG_SYS_SPI_U_BOOT_OFFS, 0x40, +			       (void *)header); +		spl_parse_image_header(header); +		spi_flash_read(flash, CONFIG_SYS_SPI_U_BOOT_OFFS, +			       spl_image.size, (void *)spl_image.load_addr); +	} +} diff --git a/roms/u-boot/drivers/mtd/st_smi.c b/roms/u-boot/drivers/mtd/st_smi.c new file mode 100644 index 00000000..208119c5 --- /dev/null +++ b/roms/u-boot/drivers/mtd/st_smi.c @@ -0,0 +1,565 @@ +/* + * (C) Copyright 2009 + * Vipin Kumar, ST Microelectronics, vipin.kumar@st.com. + * + * SPDX-License-Identifier:	GPL-2.0+ + */ + +#include <common.h> +#include <flash.h> +#include <linux/err.h> +#include <linux/mtd/st_smi.h> + +#include <asm/io.h> +#include <asm/arch/hardware.h> + +#if !defined(CONFIG_SYS_NO_FLASH) + +static struct smi_regs *const smicntl = +    (struct smi_regs * const)CONFIG_SYS_SMI_BASE; +static ulong bank_base[CONFIG_SYS_MAX_FLASH_BANKS] = +    CONFIG_SYS_FLASH_ADDR_BASE; +flash_info_t flash_info[CONFIG_SYS_MAX_FLASH_BANKS]; + +/* data structure to maintain flash ids from different vendors */ +struct flash_device { +	char *name; +	u8 erase_cmd; +	u32 device_id; +	u32 pagesize; +	unsigned long sectorsize; +	unsigned long size_in_bytes; +}; + +#define FLASH_ID(n, es, id, psize, ssize, size)	\ +{				\ +	.name = n,		\ +	.erase_cmd = es,	\ +	.device_id = id,	\ +	.pagesize = psize,	\ +	.sectorsize = ssize,	\ +	.size_in_bytes = size	\ +} + +/* + * List of supported flash devices. + * Currently the erase_cmd field is not used in this driver. + */ +static struct flash_device flash_devices[] = { +	FLASH_ID("st m25p16"     , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000), +	FLASH_ID("st m25p32"     , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000), +	FLASH_ID("st m25p64"     , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000), +	FLASH_ID("st m25p128"    , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000), +	FLASH_ID("st m25p05"     , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000), +	FLASH_ID("st m25p10"     , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000), +	FLASH_ID("st m25p20"     , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000), +	FLASH_ID("st m25p40"     , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000), +	FLASH_ID("st m25p80"     , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000), +	FLASH_ID("st m45pe10"    , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000), +	FLASH_ID("st m45pe20"    , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000), +	FLASH_ID("st m45pe40"    , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000), +	FLASH_ID("st m45pe80"    , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000), +	FLASH_ID("sp s25fl004"   , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000), +	FLASH_ID("sp s25fl008"   , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000), +	FLASH_ID("sp s25fl016"   , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000), +	FLASH_ID("sp s25fl032"   , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000), +	FLASH_ID("sp s25fl064"   , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000), +	FLASH_ID("mac 25l512"    , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000), +	FLASH_ID("mac 25l1005"   , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000), +	FLASH_ID("mac 25l2005"   , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000), +	FLASH_ID("mac 25l4005"   , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000), +	FLASH_ID("mac 25l4005a"  , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000), +	FLASH_ID("mac 25l8005"   , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000), +	FLASH_ID("mac 25l1605"   , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000), +	FLASH_ID("mac 25l1605a"  , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000), +	FLASH_ID("mac 25l3205"   , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000), +	FLASH_ID("mac 25l3205a"  , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000), +	FLASH_ID("mac 25l6405"   , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000), +	FLASH_ID("wbd w25q128" , 0xd8, 0x001840EF, 0x100, 0x10000, 0x1000000), +}; + +/* + * smi_wait_xfer_finish - Wait until TFF is set in status register + * @timeout:	 timeout in milliseconds + * + * Wait until TFF is set in status register + */ +static int smi_wait_xfer_finish(int timeout) +{ +	ulong start = get_timer(0); + +	while (get_timer(start) < timeout) { +		if (readl(&smicntl->smi_sr) & TFF) +			return 0; + +		/* Try after 10 ms */ +		udelay(10); +	}; + +	return -1; +} + +/* + * smi_read_id - Read flash id + * @info:	 flash_info structure pointer + * @banknum:	 bank number + * + * Read the flash id present at bank #banknum + */ +static unsigned int smi_read_id(flash_info_t *info, int banknum) +{ +	unsigned int value; + +	writel(readl(&smicntl->smi_cr1) | SW_MODE, &smicntl->smi_cr1); +	writel(READ_ID, &smicntl->smi_tr); +	writel((banknum << BANKSEL_SHIFT) | SEND | TX_LEN_1 | RX_LEN_3, +	       &smicntl->smi_cr2); + +	if (smi_wait_xfer_finish(XFER_FINISH_TOUT)) +		return -EIO; + +	value = (readl(&smicntl->smi_rr) & 0x00FFFFFF); + +	writel(readl(&smicntl->smi_sr) & ~TFF, &smicntl->smi_sr); +	writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1); + +	return value; +} + +/* + * flash_get_size - Detect the SMI flash by reading the ID. + * @base:	 Base address of the flash area bank #banknum + * @banknum:	 Bank number + * + * Detect the SMI flash by reading the ID. Initializes the flash_info structure + * with size, sector count etc. + */ +static ulong flash_get_size(ulong base, int banknum) +{ +	flash_info_t *info = &flash_info[banknum]; +	int value; +	int i; + +	value = smi_read_id(info, banknum); + +	if (value < 0) { +		printf("Flash id could not be read\n"); +		return 0; +	} + +	/* Matches chip-id to entire list of 'serial-nor flash' ids */ +	for (i = 0; i < ARRAY_SIZE(flash_devices); i++) { +		if (flash_devices[i].device_id == value) { +			info->size = flash_devices[i].size_in_bytes; +			info->flash_id = value; +			info->start[0] = base; +			info->sector_count = +					info->size/flash_devices[i].sectorsize; + +			return info->size; +		} +	} + +	return 0; +} + +/* + * smi_read_sr - Read status register of SMI + * @bank:	 bank number + * + * This routine will get the status register of the flash chip present at the + * given bank + */ +static int smi_read_sr(int bank) +{ +	u32 ctrlreg1, val; + +	/* store the CTRL REG1 state */ +	ctrlreg1 = readl(&smicntl->smi_cr1); + +	/* Program SMI in HW Mode */ +	writel(readl(&smicntl->smi_cr1) & ~(SW_MODE | WB_MODE), +	       &smicntl->smi_cr1); + +	/* Performing a RSR instruction in HW mode */ +	writel((bank << BANKSEL_SHIFT) | RD_STATUS_REG, &smicntl->smi_cr2); + +	if (smi_wait_xfer_finish(XFER_FINISH_TOUT)) +		return -1; + +	val = readl(&smicntl->smi_sr); + +	/* Restore the CTRL REG1 state */ +	writel(ctrlreg1, &smicntl->smi_cr1); + +	return val; +} + +/* + * smi_wait_till_ready - Wait till last operation is over. + * @bank:	 bank number shifted. + * @timeout:	 timeout in milliseconds. + * + * This routine checks for WIP(write in progress)bit in Status register(SMSR-b0) + * The routine checks for #timeout loops, each at interval of 1 milli-second. + * If successful the routine returns 0. + */ +static int smi_wait_till_ready(int bank, int timeout) +{ +	int sr; +	ulong start = get_timer(0); + +	/* One chip guarantees max 5 msec wait here after page writes, +	   but potentially three seconds (!) after page erase. */ +	while (get_timer(start) < timeout) { +		sr = smi_read_sr(bank); +		if ((sr >= 0) && (!(sr & WIP_BIT))) +			return 0; + +		/* Try again after 10 usec */ +		udelay(10); +	} while (timeout--); + +	printf("SMI controller is still in wait, timeout=%d\n", timeout); +	return -EIO; +} + +/* + * smi_write_enable - Enable the flash to do write operation + * @bank:	 bank number + * + * Set write enable latch with Write Enable command. + * Returns negative if error occurred. + */ +static int smi_write_enable(int bank) +{ +	u32 ctrlreg1; +	u32 start; +	int timeout = WMODE_TOUT; +	int sr; + +	/* Store the CTRL REG1 state */ +	ctrlreg1 = readl(&smicntl->smi_cr1); + +	/* Program SMI in H/W Mode */ +	writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1); + +	/* Give the Flash, Write Enable command */ +	writel((bank << BANKSEL_SHIFT) | WE, &smicntl->smi_cr2); + +	if (smi_wait_xfer_finish(XFER_FINISH_TOUT)) +		return -1; + +	/* Restore the CTRL REG1 state */ +	writel(ctrlreg1, &smicntl->smi_cr1); + +	start = get_timer(0); +	while (get_timer(start) < timeout) { +		sr = smi_read_sr(bank); +		if ((sr >= 0) && (sr & (1 << (bank + WM_SHIFT)))) +			return 0; + +		/* Try again after 10 usec */ +		udelay(10); +	}; + +	return -1; +} + +/* + * smi_init - SMI initialization routine + * + * SMI initialization routine. Sets SMI control register1. + */ +void smi_init(void) +{ +	/* Setting the fast mode values. SMI working at 166/4 = 41.5 MHz */ +	writel(HOLD1 | FAST_MODE | BANK_EN | DSEL_TIME | PRESCAL4, +	       &smicntl->smi_cr1); +} + +/* + * smi_sector_erase - Erase flash sector + * @info:	 flash_info structure pointer + * @sector:	 sector number + * + * Set write enable latch with Write Enable command. + * Returns negative if error occurred. + */ +static int smi_sector_erase(flash_info_t *info, unsigned int sector) +{ +	int bank; +	unsigned int sect_add; +	unsigned int instruction; + +	switch (info->start[0]) { +	case SMIBANK0_BASE: +		bank = BANK0; +		break; +	case SMIBANK1_BASE: +		bank = BANK1; +		break; +	case SMIBANK2_BASE: +		bank = BANK2; +		break; +	case SMIBANK3_BASE: +		bank = BANK3; +		break; +	default: +		return -1; +	} + +	sect_add = sector * (info->size / info->sector_count); +	instruction = ((sect_add >> 8) & 0x0000FF00) | SECTOR_ERASE; + +	writel(readl(&smicntl->smi_sr) & ~(ERF1 | ERF2), &smicntl->smi_sr); + +	/* Wait until finished previous write command. */ +	if (smi_wait_till_ready(bank, CONFIG_SYS_FLASH_ERASE_TOUT)) +		return -EBUSY; + +	/* Send write enable, before erase commands. */ +	if (smi_write_enable(bank)) +		return -EIO; + +	/* Put SMI in SW mode */ +	writel(readl(&smicntl->smi_cr1) | SW_MODE, &smicntl->smi_cr1); + +	/* Send Sector Erase command in SW Mode */ +	writel(instruction, &smicntl->smi_tr); +	writel((bank << BANKSEL_SHIFT) | SEND | TX_LEN_4, +		       &smicntl->smi_cr2); +	if (smi_wait_xfer_finish(XFER_FINISH_TOUT)) +		return -EIO; + +	if (smi_wait_till_ready(bank, CONFIG_SYS_FLASH_ERASE_TOUT)) +		return -EBUSY; + +	/* Put SMI in HW mode */ +	writel(readl(&smicntl->smi_cr1) & ~SW_MODE, +		       &smicntl->smi_cr1); + +	return 0; +} + +/* + * smi_write - Write to SMI flash + * @src_addr:	 source buffer + * @dst_addr:	 destination buffer + * @length:	 length to write in bytes + * @bank:	 bank base address + * + * Write to SMI flash + */ +static int smi_write(unsigned int *src_addr, unsigned int *dst_addr, +		     unsigned int length, ulong bank_addr) +{ +	u8 *src_addr8 = (u8 *)src_addr; +	u8 *dst_addr8 = (u8 *)dst_addr; +	int banknum; +	int i; + +	switch (bank_addr) { +	case SMIBANK0_BASE: +		banknum = BANK0; +		break; +	case SMIBANK1_BASE: +		banknum = BANK1; +		break; +	case SMIBANK2_BASE: +		banknum = BANK2; +		break; +	case SMIBANK3_BASE: +		banknum = BANK3; +		break; +	default: +		return -1; +	} + +	if (smi_wait_till_ready(banknum, CONFIG_SYS_FLASH_WRITE_TOUT)) +		return -EBUSY; + +	/* Set SMI in Hardware Mode */ +	writel(readl(&smicntl->smi_cr1) & ~SW_MODE, &smicntl->smi_cr1); + +	if (smi_write_enable(banknum)) +		return -EIO; + +	/* Perform the write command */ +	for (i = 0; i < length; i += 4) { +		if (((ulong) (dst_addr) % SFLASH_PAGE_SIZE) == 0) { +			if (smi_wait_till_ready(banknum, +						CONFIG_SYS_FLASH_WRITE_TOUT)) +				return -EBUSY; + +			if (smi_write_enable(banknum)) +				return -EIO; +		} + +		if (length < 4) { +			int k; + +			/* +			 * Handle special case, where length < 4 (redundant env) +			 */ +			for (k = 0; k < length; k++) +				*dst_addr8++ = *src_addr8++; +		} else { +			/* Normal 32bit write */ +			*dst_addr++ = *src_addr++; +		} + +		if ((readl(&smicntl->smi_sr) & (ERF1 | ERF2))) +			return -EIO; +	} + +	if (smi_wait_till_ready(banknum, CONFIG_SYS_FLASH_WRITE_TOUT)) +		return -EBUSY; + +	writel(readl(&smicntl->smi_sr) & ~(WCF), &smicntl->smi_sr); + +	return 0; +} + +/* + * write_buff - Write to SMI flash + * @info:	 flash info structure + * @src:	 source buffer + * @dest_addr:	 destination buffer + * @length:	 length to write in words + * + * Write to SMI flash + */ +int write_buff(flash_info_t *info, uchar *src, ulong dest_addr, ulong length) +{ +	return smi_write((unsigned int *)src, (unsigned int *)dest_addr, +			 length, info->start[0]); +} + +/* + * flash_init - SMI flash initialization + * + * SMI flash initialization + */ +unsigned long flash_init(void) +{ +	unsigned long size = 0; +	int i, j; + +	smi_init(); + +	for (i = 0; i < CONFIG_SYS_MAX_FLASH_BANKS; i++) { +		flash_info[i].flash_id = FLASH_UNKNOWN; +		size += flash_info[i].size = flash_get_size(bank_base[i], i); +	} + +	for (j = 0; j < CONFIG_SYS_MAX_FLASH_BANKS; j++) { +		for (i = 1; i < flash_info[j].sector_count; i++) +			flash_info[j].start[i] = +			    flash_info[j].start[i - 1] + +			    flash_info->size / flash_info->sector_count; + +	} + +	return size; +} + +/* + * flash_print_info - Print SMI flash information + * + * Print SMI flash information + */ +void flash_print_info(flash_info_t *info) +{ +	int i; +	if (info->flash_id == FLASH_UNKNOWN) { +		puts("missing or unknown FLASH type\n"); +		return; +	} + +	if (info->size >= 0x100000) +		printf("  Size: %ld MB in %d Sectors\n", +		       info->size >> 20, info->sector_count); +	else +		printf("  Size: %ld KB in %d Sectors\n", +		       info->size >> 10, info->sector_count); + +	puts("  Sector Start Addresses:"); +	for (i = 0; i < info->sector_count; ++i) { +#ifdef CONFIG_SYS_FLASH_EMPTY_INFO +		int size; +		int erased; +		u32 *flash; + +		/* +		 * Check if whole sector is erased +		 */ +		size = (info->size) / (info->sector_count); +		flash = (u32 *) info->start[i]; +		size = size / sizeof(int); + +		while ((size--) && (*flash++ == ~0)) +			; + +		size++; +		if (size) +			erased = 0; +		else +			erased = 1; + +		if ((i % 5) == 0) +			printf("\n"); + +		printf(" %08lX%s%s", +		       info->start[i], +		       erased ? " E" : "  ", info->protect[i] ? "RO " : "   "); +#else +		if ((i % 5) == 0) +			printf("\n   "); +		printf(" %08lX%s", +		       info->start[i], info->protect[i] ? " (RO)  " : "     "); +#endif +	} +	putc('\n'); +	return; +} + +/* + * flash_erase - Erase SMI flash + * + * Erase SMI flash + */ +int flash_erase(flash_info_t *info, int s_first, int s_last) +{ +	int rcode = 0; +	int prot = 0; +	flash_sect_t sect; + +	if ((s_first < 0) || (s_first > s_last)) { +		puts("- no sectors to erase\n"); +		return 1; +	} + +	for (sect = s_first; sect <= s_last; ++sect) { +		if (info->protect[sect]) +			prot++; +	} +	if (prot) { +		printf("- Warning: %d protected sectors will not be erased!\n", +		       prot); +	} else { +		putc('\n'); +	} + +	for (sect = s_first; sect <= s_last; sect++) { +		if (info->protect[sect] == 0) { +			if (smi_sector_erase(info, sect)) +				rcode = 1; +			else +				putc('.'); +		} +	} +	puts(" done\n"); +	return rcode; +} +#endif diff --git a/roms/u-boot/drivers/mtd/ubi/Makefile b/roms/u-boot/drivers/mtd/ubi/Makefile new file mode 100644 index 00000000..56c28234 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/Makefile @@ -0,0 +1,10 @@ +# +# (C) Copyright 2006 +# Wolfgang Denk, DENX Software Engineering, wd@denx.de. +# +# SPDX-License-Identifier:	GPL-2.0+ +# + +obj-y += build.o vtbl.o vmt.o upd.o kapi.o eba.o io.o wl.o scan.o crc32.o +obj-y += misc.o +obj-y += debug.o diff --git a/roms/u-boot/drivers/mtd/ubi/build.c b/roms/u-boot/drivers/mtd/ubi/build.c new file mode 100644 index 00000000..6d86c0b6 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/build.c @@ -0,0 +1,1181 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2007 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём), + *         Frank Haverkamp + */ + +/* + * This file includes UBI initialization and building of UBI devices. + * + * When UBI is initialized, it attaches all the MTD devices specified as the + * module load parameters or the kernel boot parameters. If MTD devices were + * specified, UBI does not attach any MTD device, but it is possible to do + * later using the "UBI control device". + * + * At the moment we only attach UBI devices by scanning, which will become a + * bottleneck when flashes reach certain large size. Then one may improve UBI + * and add other methods, although it does not seem to be easy to do. + */ + +#ifdef UBI_LINUX +#include <linux/err.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/stringify.h> +#include <linux/stat.h> +#include <linux/miscdevice.h> +#include <linux/log2.h> +#include <linux/kthread.h> +#endif +#include <ubi_uboot.h> +#include "ubi.h" + +#if (CONFIG_SYS_MALLOC_LEN < (512 << 10)) +#error Malloc area too small for UBI, increase CONFIG_SYS_MALLOC_LEN to >= 512k +#endif + +/* Maximum length of the 'mtd=' parameter */ +#define MTD_PARAM_LEN_MAX 64 + +/** + * struct mtd_dev_param - MTD device parameter description data structure. + * @name: MTD device name or number string + * @vid_hdr_offs: VID header offset + */ +struct mtd_dev_param +{ +	char name[MTD_PARAM_LEN_MAX]; +	int vid_hdr_offs; +}; + +/* Numbers of elements set in the @mtd_dev_param array */ +static int mtd_devs = 0; + +/* MTD devices specification parameters */ +static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; + +/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ +struct class *ubi_class; + +#ifdef UBI_LINUX +/* Slab cache for wear-leveling entries */ +struct kmem_cache *ubi_wl_entry_slab; + +/* UBI control character device */ +static struct miscdevice ubi_ctrl_cdev = { +	.minor = MISC_DYNAMIC_MINOR, +	.name = "ubi_ctrl", +	.fops = &ubi_ctrl_cdev_operations, +}; +#endif + +/* All UBI devices in system */ +struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; + +#ifdef UBI_LINUX +/* Serializes UBI devices creations and removals */ +DEFINE_MUTEX(ubi_devices_mutex); + +/* Protects @ubi_devices and @ubi->ref_count */ +static DEFINE_SPINLOCK(ubi_devices_lock); + +/* "Show" method for files in '/<sysfs>/class/ubi/' */ +static ssize_t ubi_version_show(struct class *class, char *buf) +{ +	return sprintf(buf, "%d\n", UBI_VERSION); +} + +/* UBI version attribute ('/<sysfs>/class/ubi/version') */ +static struct class_attribute ubi_version = +	__ATTR(version, S_IRUGO, ubi_version_show, NULL); + +static ssize_t dev_attribute_show(struct device *dev, +				  struct device_attribute *attr, char *buf); + +/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ +static struct device_attribute dev_eraseblock_size = +	__ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_avail_eraseblocks = +	__ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_total_eraseblocks = +	__ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_volumes_count = +	__ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_max_ec = +	__ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_reserved_for_bad = +	__ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_bad_peb_count = +	__ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_max_vol_count = +	__ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_min_io_size = +	__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_bgt_enabled = +	__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); +static struct device_attribute dev_mtd_num = +	__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); +#endif + +/** + * ubi_get_device - get UBI device. + * @ubi_num: UBI device number + * + * This function returns UBI device description object for UBI device number + * @ubi_num, or %NULL if the device does not exist. This function increases the + * device reference count to prevent removal of the device. In other words, the + * device cannot be removed if its reference count is not zero. + */ +struct ubi_device *ubi_get_device(int ubi_num) +{ +	struct ubi_device *ubi; + +	spin_lock(&ubi_devices_lock); +	ubi = ubi_devices[ubi_num]; +	if (ubi) { +		ubi_assert(ubi->ref_count >= 0); +		ubi->ref_count += 1; +		get_device(&ubi->dev); +	} +	spin_unlock(&ubi_devices_lock); + +	return ubi; +} + +/** + * ubi_put_device - drop an UBI device reference. + * @ubi: UBI device description object + */ +void ubi_put_device(struct ubi_device *ubi) +{ +	spin_lock(&ubi_devices_lock); +	ubi->ref_count -= 1; +	put_device(&ubi->dev); +	spin_unlock(&ubi_devices_lock); +} + +/** + * ubi_get_by_major - get UBI device description object by character device + *                    major number. + * @major: major number + * + * This function is similar to 'ubi_get_device()', but it searches the device + * by its major number. + */ +struct ubi_device *ubi_get_by_major(int major) +{ +	int i; +	struct ubi_device *ubi; + +	spin_lock(&ubi_devices_lock); +	for (i = 0; i < UBI_MAX_DEVICES; i++) { +		ubi = ubi_devices[i]; +		if (ubi && MAJOR(ubi->cdev.dev) == major) { +			ubi_assert(ubi->ref_count >= 0); +			ubi->ref_count += 1; +			get_device(&ubi->dev); +			spin_unlock(&ubi_devices_lock); +			return ubi; +		} +	} +	spin_unlock(&ubi_devices_lock); + +	return NULL; +} + +/** + * ubi_major2num - get UBI device number by character device major number. + * @major: major number + * + * This function searches UBI device number object by its major number. If UBI + * device was not found, this function returns -ENODEV, otherwise the UBI device + * number is returned. + */ +int ubi_major2num(int major) +{ +	int i, ubi_num = -ENODEV; + +	spin_lock(&ubi_devices_lock); +	for (i = 0; i < UBI_MAX_DEVICES; i++) { +		struct ubi_device *ubi = ubi_devices[i]; + +		if (ubi && MAJOR(ubi->cdev.dev) == major) { +			ubi_num = ubi->ubi_num; +			break; +		} +	} +	spin_unlock(&ubi_devices_lock); + +	return ubi_num; +} + +#ifdef UBI_LINUX +/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ +static ssize_t dev_attribute_show(struct device *dev, +				  struct device_attribute *attr, char *buf) +{ +	ssize_t ret; +	struct ubi_device *ubi; + +	/* +	 * The below code looks weird, but it actually makes sense. We get the +	 * UBI device reference from the contained 'struct ubi_device'. But it +	 * is unclear if the device was removed or not yet. Indeed, if the +	 * device was removed before we increased its reference count, +	 * 'ubi_get_device()' will return -ENODEV and we fail. +	 * +	 * Remember, 'struct ubi_device' is freed in the release function, so +	 * we still can use 'ubi->ubi_num'. +	 */ +	ubi = container_of(dev, struct ubi_device, dev); +	ubi = ubi_get_device(ubi->ubi_num); +	if (!ubi) +		return -ENODEV; + +	if (attr == &dev_eraseblock_size) +		ret = sprintf(buf, "%d\n", ubi->leb_size); +	else if (attr == &dev_avail_eraseblocks) +		ret = sprintf(buf, "%d\n", ubi->avail_pebs); +	else if (attr == &dev_total_eraseblocks) +		ret = sprintf(buf, "%d\n", ubi->good_peb_count); +	else if (attr == &dev_volumes_count) +		ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); +	else if (attr == &dev_max_ec) +		ret = sprintf(buf, "%d\n", ubi->max_ec); +	else if (attr == &dev_reserved_for_bad) +		ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); +	else if (attr == &dev_bad_peb_count) +		ret = sprintf(buf, "%d\n", ubi->bad_peb_count); +	else if (attr == &dev_max_vol_count) +		ret = sprintf(buf, "%d\n", ubi->vtbl_slots); +	else if (attr == &dev_min_io_size) +		ret = sprintf(buf, "%d\n", ubi->min_io_size); +	else if (attr == &dev_bgt_enabled) +		ret = sprintf(buf, "%d\n", ubi->thread_enabled); +	else if (attr == &dev_mtd_num) +		ret = sprintf(buf, "%d\n", ubi->mtd->index); +	else +		ret = -EINVAL; + +	ubi_put_device(ubi); +	return ret; +} + +/* Fake "release" method for UBI devices */ +static void dev_release(struct device *dev) { } + +/** + * ubi_sysfs_init - initialize sysfs for an UBI device. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int ubi_sysfs_init(struct ubi_device *ubi) +{ +	int err; + +	ubi->dev.release = dev_release; +	ubi->dev.devt = ubi->cdev.dev; +	ubi->dev.class = ubi_class; +	sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); +	err = device_register(&ubi->dev); +	if (err) +		return err; + +	err = device_create_file(&ubi->dev, &dev_eraseblock_size); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_total_eraseblocks); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_volumes_count); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_max_ec); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_reserved_for_bad); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_bad_peb_count); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_max_vol_count); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_min_io_size); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_bgt_enabled); +	if (err) +		return err; +	err = device_create_file(&ubi->dev, &dev_mtd_num); +	return err; +} + +/** + * ubi_sysfs_close - close sysfs for an UBI device. + * @ubi: UBI device description object + */ +static void ubi_sysfs_close(struct ubi_device *ubi) +{ +	device_remove_file(&ubi->dev, &dev_mtd_num); +	device_remove_file(&ubi->dev, &dev_bgt_enabled); +	device_remove_file(&ubi->dev, &dev_min_io_size); +	device_remove_file(&ubi->dev, &dev_max_vol_count); +	device_remove_file(&ubi->dev, &dev_bad_peb_count); +	device_remove_file(&ubi->dev, &dev_reserved_for_bad); +	device_remove_file(&ubi->dev, &dev_max_ec); +	device_remove_file(&ubi->dev, &dev_volumes_count); +	device_remove_file(&ubi->dev, &dev_total_eraseblocks); +	device_remove_file(&ubi->dev, &dev_avail_eraseblocks); +	device_remove_file(&ubi->dev, &dev_eraseblock_size); +	device_unregister(&ubi->dev); +} +#endif + +/** + * kill_volumes - destroy all volumes. + * @ubi: UBI device description object + */ +static void kill_volumes(struct ubi_device *ubi) +{ +	int i; + +	for (i = 0; i < ubi->vtbl_slots; i++) +		if (ubi->volumes[i]) +			ubi_free_volume(ubi, ubi->volumes[i]); +} + +/** + * uif_init - initialize user interfaces for an UBI device. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int uif_init(struct ubi_device *ubi) +{ +	int i, err; +#ifdef UBI_LINUX +	dev_t dev; +#endif + +	sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); + +	/* +	 * Major numbers for the UBI character devices are allocated +	 * dynamically. Major numbers of volume character devices are +	 * equivalent to ones of the corresponding UBI character device. Minor +	 * numbers of UBI character devices are 0, while minor numbers of +	 * volume character devices start from 1. Thus, we allocate one major +	 * number and ubi->vtbl_slots + 1 minor numbers. +	 */ +	err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); +	if (err) { +		ubi_err("cannot register UBI character devices"); +		return err; +	} + +	ubi_assert(MINOR(dev) == 0); +	cdev_init(&ubi->cdev, &ubi_cdev_operations); +	dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev)); +	ubi->cdev.owner = THIS_MODULE; + +	err = cdev_add(&ubi->cdev, dev, 1); +	if (err) { +		ubi_err("cannot add character device"); +		goto out_unreg; +	} + +	err = ubi_sysfs_init(ubi); +	if (err) +		goto out_sysfs; + +	for (i = 0; i < ubi->vtbl_slots; i++) +		if (ubi->volumes[i]) { +			err = ubi_add_volume(ubi, ubi->volumes[i]); +			if (err) { +				ubi_err("cannot add volume %d", i); +				goto out_volumes; +			} +		} + +	return 0; + +out_volumes: +	kill_volumes(ubi); +out_sysfs: +	ubi_sysfs_close(ubi); +	cdev_del(&ubi->cdev); +out_unreg: +	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); +	ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); +	return err; +} + +/** + * uif_close - close user interfaces for an UBI device. + * @ubi: UBI device description object + */ +static void uif_close(struct ubi_device *ubi) +{ +	kill_volumes(ubi); +	ubi_sysfs_close(ubi); +	cdev_del(&ubi->cdev); +	unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); +} + +/** + * attach_by_scanning - attach an MTD device using scanning method. + * @ubi: UBI device descriptor + * + * This function returns zero in case of success and a negative error code in + * case of failure. + * + * Note, currently this is the only method to attach UBI devices. Hopefully in + * the future we'll have more scalable attaching methods and avoid full media + * scanning. But even in this case scanning will be needed as a fall-back + * attaching method if there are some on-flash table corruptions. + */ +static int attach_by_scanning(struct ubi_device *ubi) +{ +	int err; +	struct ubi_scan_info *si; + +	si = ubi_scan(ubi); +	if (IS_ERR(si)) +		return PTR_ERR(si); + +	ubi->bad_peb_count = si->bad_peb_count; +	ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; +	ubi->max_ec = si->max_ec; +	ubi->mean_ec = si->mean_ec; + +	err = ubi_read_volume_table(ubi, si); +	if (err) +		goto out_si; + +	err = ubi_eba_init_scan(ubi, si); +	if (err) +		goto out_vtbl; + +	err = ubi_wl_init_scan(ubi, si); +	if (err) +		goto out_eba; + +	ubi_scan_destroy_si(si); +	return 0; + +out_eba: +	ubi_eba_close(ubi); +out_vtbl: +	vfree(ubi->vtbl); +out_si: +	ubi_scan_destroy_si(si); +	return err; +} + +/** + * io_init - initialize I/O unit for a given UBI device. + * @ubi: UBI device description object + * + * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are + * assumed: + *   o EC header is always at offset zero - this cannot be changed; + *   o VID header starts just after the EC header at the closest address + *     aligned to @io->hdrs_min_io_size; + *   o data starts just after the VID header at the closest address aligned to + *     @io->min_io_size + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int io_init(struct ubi_device *ubi) +{ +	if (ubi->mtd->numeraseregions != 0) { +		/* +		 * Some flashes have several erase regions. Different regions +		 * may have different eraseblock size and other +		 * characteristics. It looks like mostly multi-region flashes +		 * have one "main" region and one or more small regions to +		 * store boot loader code or boot parameters or whatever. I +		 * guess we should just pick the largest region. But this is +		 * not implemented. +		 */ +		ubi_err("multiple regions, not implemented"); +		return -EINVAL; +	} + +	if (ubi->vid_hdr_offset < 0) +		return -EINVAL; + +	/* +	 * Note, in this implementation we support MTD devices with 0x7FFFFFFF +	 * physical eraseblocks maximum. +	 */ + +	ubi->peb_size   = ubi->mtd->erasesize; +	ubi->peb_count  = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); +	ubi->flash_size = ubi->mtd->size; + +	if (mtd_can_have_bb(ubi->mtd)) +		ubi->bad_allowed = 1; + +	ubi->min_io_size = ubi->mtd->writesize; +	ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; + +	/* +	 * Make sure minimal I/O unit is power of 2. Note, there is no +	 * fundamental reason for this assumption. It is just an optimization +	 * which allows us to avoid costly division operations. +	 */ +	if (!is_power_of_2(ubi->min_io_size)) { +		ubi_err("min. I/O unit (%d) is not power of 2", +			ubi->min_io_size); +		return -EINVAL; +	} + +	ubi_assert(ubi->hdrs_min_io_size > 0); +	ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); +	ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); + +	/* Calculate default aligned sizes of EC and VID headers */ +	ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); +	ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); + +	dbg_msg("min_io_size      %d", ubi->min_io_size); +	dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); +	dbg_msg("ec_hdr_alsize    %d", ubi->ec_hdr_alsize); +	dbg_msg("vid_hdr_alsize   %d", ubi->vid_hdr_alsize); + +	if (ubi->vid_hdr_offset == 0) +		/* Default offset */ +		ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = +				      ubi->ec_hdr_alsize; +	else { +		ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & +						~(ubi->hdrs_min_io_size - 1); +		ubi->vid_hdr_shift = ubi->vid_hdr_offset - +						ubi->vid_hdr_aloffset; +	} + +	/* Similar for the data offset */ +	ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; +	ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); + +	dbg_msg("vid_hdr_offset   %d", ubi->vid_hdr_offset); +	dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); +	dbg_msg("vid_hdr_shift    %d", ubi->vid_hdr_shift); +	dbg_msg("leb_start        %d", ubi->leb_start); + +	/* The shift must be aligned to 32-bit boundary */ +	if (ubi->vid_hdr_shift % 4) { +		ubi_err("unaligned VID header shift %d", +			ubi->vid_hdr_shift); +		return -EINVAL; +	} + +	/* Check sanity */ +	if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || +	    ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || +	    ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || +	    ubi->leb_start & (ubi->min_io_size - 1)) { +		ubi_err("bad VID header (%d) or data offsets (%d)", +			ubi->vid_hdr_offset, ubi->leb_start); +		return -EINVAL; +	} + +	/* +	 * It may happen that EC and VID headers are situated in one minimal +	 * I/O unit. In this case we can only accept this UBI image in +	 * read-only mode. +	 */ +	if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { +		ubi_warn("EC and VID headers are in the same minimal I/O unit, " +			 "switch to read-only mode"); +		ubi->ro_mode = 1; +	} + +	ubi->leb_size = ubi->peb_size - ubi->leb_start; + +	if (!(ubi->mtd->flags & MTD_WRITEABLE)) { +		ubi_msg("MTD device %d is write-protected, attach in " +			"read-only mode", ubi->mtd->index); +		ubi->ro_mode = 1; +	} + +	ubi_msg("physical eraseblock size:   %d bytes (%d KiB)", +		ubi->peb_size, ubi->peb_size >> 10); +	ubi_msg("logical eraseblock size:    %d bytes", ubi->leb_size); +	ubi_msg("smallest flash I/O unit:    %d", ubi->min_io_size); +	if (ubi->hdrs_min_io_size != ubi->min_io_size) +		ubi_msg("sub-page size:              %d", +			ubi->hdrs_min_io_size); +	ubi_msg("VID header offset:          %d (aligned %d)", +		ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); +	ubi_msg("data offset:                %d", ubi->leb_start); + +	/* +	 * Note, ideally, we have to initialize ubi->bad_peb_count here. But +	 * unfortunately, MTD does not provide this information. We should loop +	 * over all physical eraseblocks and invoke mtd->block_is_bad() for +	 * each physical eraseblock. So, we skip ubi->bad_peb_count +	 * uninitialized and initialize it after scanning. +	 */ + +	return 0; +} + +/** + * autoresize - re-size the volume which has the "auto-resize" flag set. + * @ubi: UBI device description object + * @vol_id: ID of the volume to re-size + * + * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in + * the volume table to the largest possible size. See comments in ubi-header.h + * for more description of the flag. Returns zero in case of success and a + * negative error code in case of failure. + */ +static int autoresize(struct ubi_device *ubi, int vol_id) +{ +	struct ubi_volume_desc desc; +	struct ubi_volume *vol = ubi->volumes[vol_id]; +	int err, old_reserved_pebs = vol->reserved_pebs; + +	/* +	 * Clear the auto-resize flag in the volume in-memory copy of the +	 * volume table, and 'ubi_resize_volume()' will propogate this change +	 * to the flash. +	 */ +	ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; + +	if (ubi->avail_pebs == 0) { +		struct ubi_vtbl_record vtbl_rec; + +		/* +		 * No avalilable PEBs to re-size the volume, clear the flag on +		 * flash and exit. +		 */ +		memcpy(&vtbl_rec, &ubi->vtbl[vol_id], +		       sizeof(struct ubi_vtbl_record)); +		err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); +		if (err) +			ubi_err("cannot clean auto-resize flag for volume %d", +				vol_id); +	} else { +		desc.vol = vol; +		err = ubi_resize_volume(&desc, +					old_reserved_pebs + ubi->avail_pebs); +		if (err) +			ubi_err("cannot auto-resize volume %d", vol_id); +	} + +	if (err) +		return err; + +	ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, +		vol->name, old_reserved_pebs, vol->reserved_pebs); +	return 0; +} + +/** + * ubi_attach_mtd_dev - attach an MTD device. + * @mtd_dev: MTD device description object + * @ubi_num: number to assign to the new UBI device + * @vid_hdr_offset: VID header offset + * + * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number + * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in + * which case this function finds a vacant device nubert and assings it + * automatically. Returns the new UBI device number in case of success and a + * negative error code in case of failure. + * + * Note, the invocations of this function has to be serialized by the + * @ubi_devices_mutex. + */ +int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) +{ +	struct ubi_device *ubi; +	int i, err; + +	/* +	 * Check if we already have the same MTD device attached. +	 * +	 * Note, this function assumes that UBI devices creations and deletions +	 * are serialized, so it does not take the &ubi_devices_lock. +	 */ +	for (i = 0; i < UBI_MAX_DEVICES; i++) { +		ubi = ubi_devices[i]; +		if (ubi && mtd->index == ubi->mtd->index) { +			dbg_err("mtd%d is already attached to ubi%d", +				mtd->index, i); +			return -EEXIST; +		} +	} + +	/* +	 * Make sure this MTD device is not emulated on top of an UBI volume +	 * already. Well, generally this recursion works fine, but there are +	 * different problems like the UBI module takes a reference to itself +	 * by attaching (and thus, opening) the emulated MTD device. This +	 * results in inability to unload the module. And in general it makes +	 * no sense to attach emulated MTD devices, so we prohibit this. +	 */ +	if (mtd->type == MTD_UBIVOLUME) { +		ubi_err("refuse attaching mtd%d - it is already emulated on " +			"top of UBI", mtd->index); +		return -EINVAL; +	} + +	if (ubi_num == UBI_DEV_NUM_AUTO) { +		/* Search for an empty slot in the @ubi_devices array */ +		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) +			if (!ubi_devices[ubi_num]) +				break; +		if (ubi_num == UBI_MAX_DEVICES) { +			dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); +			return -ENFILE; +		} +	} else { +		if (ubi_num >= UBI_MAX_DEVICES) +			return -EINVAL; + +		/* Make sure ubi_num is not busy */ +		if (ubi_devices[ubi_num]) { +			dbg_err("ubi%d already exists", ubi_num); +			return -EEXIST; +		} +	} + +	ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); +	if (!ubi) +		return -ENOMEM; + +	ubi->mtd = mtd; +	ubi->ubi_num = ubi_num; +	ubi->vid_hdr_offset = vid_hdr_offset; +	ubi->autoresize_vol_id = -1; + +	mutex_init(&ubi->buf_mutex); +	mutex_init(&ubi->ckvol_mutex); +	mutex_init(&ubi->volumes_mutex); +	spin_lock_init(&ubi->volumes_lock); + +	ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); + +	err = io_init(ubi); +	if (err) +		goto out_free; + +	err = -ENOMEM; +	ubi->peb_buf1 = vmalloc(ubi->peb_size); +	if (!ubi->peb_buf1) +		goto out_free; + +	ubi->peb_buf2 = vmalloc(ubi->peb_size); +	if (!ubi->peb_buf2) +		goto out_free; + +#ifdef CONFIG_MTD_UBI_DEBUG +	mutex_init(&ubi->dbg_buf_mutex); +	ubi->dbg_peb_buf = vmalloc(ubi->peb_size); +	if (!ubi->dbg_peb_buf) +		goto out_free; +#endif + +	err = attach_by_scanning(ubi); +	if (err) { +		dbg_err("failed to attach by scanning, error %d", err); +		goto out_free; +	} + +	if (ubi->autoresize_vol_id != -1) { +		err = autoresize(ubi, ubi->autoresize_vol_id); +		if (err) +			goto out_detach; +	} + +	err = uif_init(ubi); +	if (err) +		goto out_detach; + +	ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); +	if (IS_ERR(ubi->bgt_thread)) { +		err = PTR_ERR(ubi->bgt_thread); +		ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, +			err); +		goto out_uif; +	} + +	ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); +	ubi_msg("MTD device name:            \"%s\"", mtd->name); +	ubi_msg("MTD device size:            %llu MiB", ubi->flash_size >> 20); +	ubi_msg("number of good PEBs:        %d", ubi->good_peb_count); +	ubi_msg("number of bad PEBs:         %d", ubi->bad_peb_count); +	ubi_msg("max. allowed volumes:       %d", ubi->vtbl_slots); +	ubi_msg("wear-leveling threshold:    %d", CONFIG_MTD_UBI_WL_THRESHOLD); +	ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); +	ubi_msg("number of user volumes:     %d", +		ubi->vol_count - UBI_INT_VOL_COUNT); +	ubi_msg("available PEBs:             %d", ubi->avail_pebs); +	ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); +	ubi_msg("number of PEBs reserved for bad PEB handling: %d", +		ubi->beb_rsvd_pebs); +	ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); + +	/* Enable the background thread */ +	if (!DBG_DISABLE_BGT) { +		ubi->thread_enabled = 1; +		wake_up_process(ubi->bgt_thread); +	} + +	ubi_devices[ubi_num] = ubi; +	return ubi_num; + +out_uif: +	uif_close(ubi); +out_detach: +	ubi_eba_close(ubi); +	ubi_wl_close(ubi); +	vfree(ubi->vtbl); +out_free: +	vfree(ubi->peb_buf1); +	vfree(ubi->peb_buf2); +#ifdef CONFIG_MTD_UBI_DEBUG +	vfree(ubi->dbg_peb_buf); +#endif +	kfree(ubi); +	return err; +} + +/** + * ubi_detach_mtd_dev - detach an MTD device. + * @ubi_num: UBI device number to detach from + * @anyway: detach MTD even if device reference count is not zero + * + * This function destroys an UBI device number @ubi_num and detaches the + * underlying MTD device. Returns zero in case of success and %-EBUSY if the + * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not + * exist. + * + * Note, the invocations of this function has to be serialized by the + * @ubi_devices_mutex. + */ +int ubi_detach_mtd_dev(int ubi_num, int anyway) +{ +	struct ubi_device *ubi; + +	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) +		return -EINVAL; + +	spin_lock(&ubi_devices_lock); +	ubi = ubi_devices[ubi_num]; +	if (!ubi) { +		spin_unlock(&ubi_devices_lock); +		return -EINVAL; +	} + +	if (ubi->ref_count) { +		if (!anyway) { +			spin_unlock(&ubi_devices_lock); +			return -EBUSY; +		} +		/* This may only happen if there is a bug */ +		ubi_err("%s reference count %d, destroy anyway", +			ubi->ubi_name, ubi->ref_count); +	} +	ubi_devices[ubi_num] = NULL; +	spin_unlock(&ubi_devices_lock); + +	ubi_assert(ubi_num == ubi->ubi_num); +	dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); + +	/* +	 * Before freeing anything, we have to stop the background thread to +	 * prevent it from doing anything on this device while we are freeing. +	 */ +	if (ubi->bgt_thread) +		kthread_stop(ubi->bgt_thread); + +	uif_close(ubi); +	ubi_eba_close(ubi); +	ubi_wl_close(ubi); +	vfree(ubi->vtbl); +	put_mtd_device(ubi->mtd); +	vfree(ubi->peb_buf1); +	vfree(ubi->peb_buf2); +#ifdef CONFIG_MTD_UBI_DEBUG +	vfree(ubi->dbg_peb_buf); +#endif +	ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); +	kfree(ubi); +	return 0; +} + +/** + * find_mtd_device - open an MTD device by its name or number. + * @mtd_dev: name or number of the device + * + * This function tries to open and MTD device described by @mtd_dev string, + * which is first treated as an ASCII number, and if it is not true, it is + * treated as MTD device name. Returns MTD device description object in case of + * success and a negative error code in case of failure. + */ +static struct mtd_info * __init open_mtd_device(const char *mtd_dev) +{ +	struct mtd_info *mtd; +	int mtd_num; +	char *endp; + +	mtd_num = simple_strtoul(mtd_dev, &endp, 0); +	if (*endp != '\0' || mtd_dev == endp) { +		/* +		 * This does not look like an ASCII integer, probably this is +		 * MTD device name. +		 */ +		mtd = get_mtd_device_nm(mtd_dev); +	} else +		mtd = get_mtd_device(NULL, mtd_num); + +	return mtd; +} + +int __init ubi_init(void) +{ +	int err, i, k; + +	/* Ensure that EC and VID headers have correct size */ +	BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); +	BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); + +	if (mtd_devs > UBI_MAX_DEVICES) { +		ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); +		return -EINVAL; +	} + +	/* Create base sysfs directory and sysfs files */ +	ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); +	if (IS_ERR(ubi_class)) { +		err = PTR_ERR(ubi_class); +		ubi_err("cannot create UBI class"); +		goto out; +	} + +	err = class_create_file(ubi_class, &ubi_version); +	if (err) { +		ubi_err("cannot create sysfs file"); +		goto out_class; +	} + +	err = misc_register(&ubi_ctrl_cdev); +	if (err) { +		ubi_err("cannot register device"); +		goto out_version; +	} + +#ifdef UBI_LINUX +	ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", +					      sizeof(struct ubi_wl_entry), +					      0, 0, NULL); +	if (!ubi_wl_entry_slab) +		goto out_dev_unreg; +#endif + +	/* Attach MTD devices */ +	for (i = 0; i < mtd_devs; i++) { +		struct mtd_dev_param *p = &mtd_dev_param[i]; +		struct mtd_info *mtd; + +		cond_resched(); + +		mtd = open_mtd_device(p->name); +		if (IS_ERR(mtd)) { +			err = PTR_ERR(mtd); +			goto out_detach; +		} + +		mutex_lock(&ubi_devices_mutex); +		err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, +					 p->vid_hdr_offs); +		mutex_unlock(&ubi_devices_mutex); +		if (err < 0) { +			put_mtd_device(mtd); +			ubi_err("cannot attach mtd%d", mtd->index); +			goto out_detach; +		} +	} + +	return 0; + +out_detach: +	for (k = 0; k < i; k++) +		if (ubi_devices[k]) { +			mutex_lock(&ubi_devices_mutex); +			ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); +			mutex_unlock(&ubi_devices_mutex); +		} +#ifdef UBI_LINUX +	kmem_cache_destroy(ubi_wl_entry_slab); +out_dev_unreg: +#endif +	misc_deregister(&ubi_ctrl_cdev); +out_version: +	class_remove_file(ubi_class, &ubi_version); +out_class: +	class_destroy(ubi_class); +out: +	mtd_devs = 0; +	ubi_err("UBI error: cannot initialize UBI, error %d", err); +	return err; +} +module_init(ubi_init); + +void __exit ubi_exit(void) +{ +	int i; + +	for (i = 0; i < UBI_MAX_DEVICES; i++) +		if (ubi_devices[i]) { +			mutex_lock(&ubi_devices_mutex); +			ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); +			mutex_unlock(&ubi_devices_mutex); +		} +	kmem_cache_destroy(ubi_wl_entry_slab); +	misc_deregister(&ubi_ctrl_cdev); +	class_remove_file(ubi_class, &ubi_version); +	class_destroy(ubi_class); +	mtd_devs = 0; +} +module_exit(ubi_exit); + +/** + * bytes_str_to_int - convert a string representing number of bytes to an + * integer. + * @str: the string to convert + * + * This function returns positive resulting integer in case of success and a + * negative error code in case of failure. + */ +static int __init bytes_str_to_int(const char *str) +{ +	char *endp; +	unsigned long result; + +	result = simple_strtoul(str, &endp, 0); +	if (str == endp || result < 0) { +		printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", +		       str); +		return -EINVAL; +	} + +	switch (*endp) { +	case 'G': +		result *= 1024; +	case 'M': +		result *= 1024; +	case 'K': +		result *= 1024; +		if (endp[1] == 'i' && endp[2] == 'B') +			endp += 2; +	case '\0': +		break; +	default: +		printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", +		       str); +		return -EINVAL; +	} + +	return result; +} + +/** + * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. + * @val: the parameter value to parse + * @kp: not used + * + * This function returns zero in case of success and a negative error code in + * case of error. + */ +int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) +{ +	int i, len; +	struct mtd_dev_param *p; +	char buf[MTD_PARAM_LEN_MAX]; +	char *pbuf = &buf[0]; +	char *tokens[2] = {NULL, NULL}; + +	if (!val) +		return -EINVAL; + +	if (mtd_devs == UBI_MAX_DEVICES) { +		printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", +		       UBI_MAX_DEVICES); +		return -EINVAL; +	} + +	len = strnlen(val, MTD_PARAM_LEN_MAX); +	if (len == MTD_PARAM_LEN_MAX) { +		printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " +		       "max. is %d\n", val, MTD_PARAM_LEN_MAX); +		return -EINVAL; +	} + +	if (len == 0) { +		printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " +		       "ignored\n"); +		return 0; +	} + +	strcpy(buf, val); + +	/* Get rid of the final newline */ +	if (buf[len - 1] == '\n') +		buf[len - 1] = '\0'; + +	for (i = 0; i < 2; i++) +		tokens[i] = strsep(&pbuf, ","); + +	if (pbuf) { +		printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", +		       val); +		return -EINVAL; +	} + +	p = &mtd_dev_param[mtd_devs]; +	strcpy(&p->name[0], tokens[0]); + +	if (tokens[1]) +		p->vid_hdr_offs = bytes_str_to_int(tokens[1]); + +	if (p->vid_hdr_offs < 0) +		return p->vid_hdr_offs; + +	mtd_devs += 1; +	return 0; +} + +module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); +MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " +		      "mtd=<name|num>[,<vid_hdr_offs>].\n" +		      "Multiple \"mtd\" parameters may be specified.\n" +		      "MTD devices may be specified by their number or name.\n" +		      "Optional \"vid_hdr_offs\" parameter specifies UBI VID " +		      "header position and data starting position to be used " +		      "by UBI.\n" +		      "Example: mtd=content,1984 mtd=4 - attach MTD device" +		      "with name \"content\" using VID header offset 1984, and " +		      "MTD device number 4 with default VID header offset."); + +MODULE_VERSION(__stringify(UBI_VERSION)); +MODULE_DESCRIPTION("UBI - Unsorted Block Images"); +MODULE_AUTHOR("Artem Bityutskiy"); +MODULE_LICENSE("GPL"); diff --git a/roms/u-boot/drivers/mtd/ubi/crc32.c b/roms/u-boot/drivers/mtd/ubi/crc32.c new file mode 100644 index 00000000..f1bebf58 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/crc32.c @@ -0,0 +1,510 @@ +/* + * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com> + * Nicer crc32 functions/docs submitted by linux@horizon.com.  Thanks! + * Code was from the public domain, copyright abandoned.  Code was + * subsequently included in the kernel, thus was re-licensed under the + * GNU GPL v2. + * + * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com> + * Same crc32 function was used in 5 other places in the kernel. + * I made one version, and deleted the others. + * There are various incantations of crc32().  Some use a seed of 0 or ~0. + * Some xor at the end with ~0.  The generic crc32() function takes + * seed as an argument, and doesn't xor at the end.  Then individual + * users can do whatever they need. + *   drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. + *   fs/jffs2 uses seed 0, doesn't xor with ~0. + *   fs/partitions/efi.c uses seed ~0, xor's with ~0. + * + * This source code is licensed under the GNU General Public License, + * Version 2.  See the file COPYING for more details. + */ + +#ifdef UBI_LINUX +#include <linux/crc32.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/compiler.h> +#endif +#include <linux/types.h> + +#include <asm/byteorder.h> + +#ifdef UBI_LINUX +#include <linux/slab.h> +#include <linux/init.h> +#include <asm/atomic.h> +#endif +#include "crc32defs.h" +#define CRC_LE_BITS 8 + +#if CRC_LE_BITS == 8 +#define tole(x) cpu_to_le32(x) +#define tobe(x) cpu_to_be32(x) +#else +#define tole(x) (x) +#define tobe(x) (x) +#endif +#include "crc32table.h" +#ifdef UBI_LINUX +MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); +MODULE_DESCRIPTION("Ethernet CRC32 calculations"); +MODULE_LICENSE("GPL"); +#endif +/** + * crc32_le() - Calculate bitwise little-endian Ethernet AUTODIN II CRC32 + * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for + *	other uses, or the previous crc32 value if computing incrementally. + * @p: pointer to buffer over which CRC is run + * @len: length of buffer @p + */ +u32  crc32_le(u32 crc, unsigned char const *p, size_t len); + +#if CRC_LE_BITS == 1 +/* + * In fact, the table-based code will work in this case, but it can be + * simplified by inlining the table in ?: form. + */ + +u32 crc32_le(u32 crc, unsigned char const *p, size_t len) +{ +	int i; +	while (len--) { +		crc ^= *p++; +		for (i = 0; i < 8; i++) +			crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_LE : 0); +	} +	return crc; +} +#else				/* Table-based approach */ + +u32 crc32_le(u32 crc, unsigned char const *p, size_t len) +{ +# if CRC_LE_BITS == 8 +	const u32      *b =(u32 *)p; +	const u32      *tab = crc32table_le; + +# ifdef __LITTLE_ENDIAN +#  define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) +# else +#  define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) +# endif +	/* printf("Crc32_le crc=%x\n",crc); */ +	crc = __cpu_to_le32(crc); +	/* Align it */ +	if((((long)b)&3 && len)){ +		do { +			u8 *p = (u8 *)b; +			DO_CRC(*p++); +			b = (void *)p; +		} while ((--len) && ((long)b)&3 ); +	} +	if((len >= 4)){ +		/* load data 32 bits wide, xor data 32 bits wide. */ +		size_t save_len = len & 3; +		len = len >> 2; +		--b; /* use pre increment below(*++b) for speed */ +		do { +			crc ^= *++b; +			DO_CRC(0); +			DO_CRC(0); +			DO_CRC(0); +			DO_CRC(0); +		} while (--len); +		b++; /* point to next byte(s) */ +		len = save_len; +	} +	/* And the last few bytes */ +	if(len){ +		do { +			u8 *p = (u8 *)b; +			DO_CRC(*p++); +			b = (void *)p; +		} while (--len); +	} + +	return __le32_to_cpu(crc); +#undef ENDIAN_SHIFT +#undef DO_CRC + +# elif CRC_LE_BITS == 4 +	while (len--) { +		crc ^= *p++; +		crc = (crc >> 4) ^ crc32table_le[crc & 15]; +		crc = (crc >> 4) ^ crc32table_le[crc & 15]; +	} +	return crc; +# elif CRC_LE_BITS == 2 +	while (len--) { +		crc ^= *p++; +		crc = (crc >> 2) ^ crc32table_le[crc & 3]; +		crc = (crc >> 2) ^ crc32table_le[crc & 3]; +		crc = (crc >> 2) ^ crc32table_le[crc & 3]; +		crc = (crc >> 2) ^ crc32table_le[crc & 3]; +	} +	return crc; +# endif +} +#endif +#ifdef UBI_LINUX +/** + * crc32_be() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 + * @crc: seed value for computation.  ~0 for Ethernet, sometimes 0 for + *	other uses, or the previous crc32 value if computing incrementally. + * @p: pointer to buffer over which CRC is run + * @len: length of buffer @p + */ +u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len); + +#if CRC_BE_BITS == 1 +/* + * In fact, the table-based code will work in this case, but it can be + * simplified by inlining the table in ?: form. + */ + +u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len) +{ +	int i; +	while (len--) { +		crc ^= *p++ << 24; +		for (i = 0; i < 8; i++) +			crc = +			    (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : +					  0); +	} +	return crc; +} + +#else				/* Table-based approach */ +u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len) +{ +# if CRC_BE_BITS == 8 +	const u32      *b =(u32 *)p; +	const u32      *tab = crc32table_be; + +# ifdef __LITTLE_ENDIAN +#  define DO_CRC(x) crc = tab[ (crc ^ (x)) & 255 ] ^ (crc>>8) +# else +#  define DO_CRC(x) crc = tab[ ((crc >> 24) ^ (x)) & 255] ^ (crc<<8) +# endif + +	crc = __cpu_to_be32(crc); +	/* Align it */ +	if(unlikely(((long)b)&3 && len)){ +		do { +			u8 *p = (u8 *)b; +			DO_CRC(*p++); +			b = (u32 *)p; +		} while ((--len) && ((long)b)&3 ); +	} +	if(likely(len >= 4)){ +		/* load data 32 bits wide, xor data 32 bits wide. */ +		size_t save_len = len & 3; +		len = len >> 2; +		--b; /* use pre increment below(*++b) for speed */ +		do { +			crc ^= *++b; +			DO_CRC(0); +			DO_CRC(0); +			DO_CRC(0); +			DO_CRC(0); +		} while (--len); +		b++; /* point to next byte(s) */ +		len = save_len; +	} +	/* And the last few bytes */ +	if(len){ +		do { +			u8 *p = (u8 *)b; +			DO_CRC(*p++); +			b = (void *)p; +		} while (--len); +	} +	return __be32_to_cpu(crc); +#undef ENDIAN_SHIFT +#undef DO_CRC + +# elif CRC_BE_BITS == 4 +	while (len--) { +		crc ^= *p++ << 24; +		crc = (crc << 4) ^ crc32table_be[crc >> 28]; +		crc = (crc << 4) ^ crc32table_be[crc >> 28]; +	} +	return crc; +# elif CRC_BE_BITS == 2 +	while (len--) { +		crc ^= *p++ << 24; +		crc = (crc << 2) ^ crc32table_be[crc >> 30]; +		crc = (crc << 2) ^ crc32table_be[crc >> 30]; +		crc = (crc << 2) ^ crc32table_be[crc >> 30]; +		crc = (crc << 2) ^ crc32table_be[crc >> 30]; +	} +	return crc; +# endif +} +#endif + +EXPORT_SYMBOL(crc32_le); +EXPORT_SYMBOL(crc32_be); +#endif +/* + * A brief CRC tutorial. + * + * A CRC is a long-division remainder.  You add the CRC to the message, + * and the whole thing (message+CRC) is a multiple of the given + * CRC polynomial.  To check the CRC, you can either check that the + * CRC matches the recomputed value, *or* you can check that the + * remainder computed on the message+CRC is 0.  This latter approach + * is used by a lot of hardware implementations, and is why so many + * protocols put the end-of-frame flag after the CRC. + * + * It's actually the same long division you learned in school, except that + * - We're working in binary, so the digits are only 0 and 1, and + * - When dividing polynomials, there are no carries.  Rather than add and + *   subtract, we just xor.  Thus, we tend to get a bit sloppy about + *   the difference between adding and subtracting. + * + * A 32-bit CRC polynomial is actually 33 bits long.  But since it's + * 33 bits long, bit 32 is always going to be set, so usually the CRC + * is written in hex with the most significant bit omitted.  (If you're + * familiar with the IEEE 754 floating-point format, it's the same idea.) + * + * Note that a CRC is computed over a string of *bits*, so you have + * to decide on the endianness of the bits within each byte.  To get + * the best error-detecting properties, this should correspond to the + * order they're actually sent.  For example, standard RS-232 serial is + * little-endian; the most significant bit (sometimes used for parity) + * is sent last.  And when appending a CRC word to a message, you should + * do it in the right order, matching the endianness. + * + * Just like with ordinary division, the remainder is always smaller than + * the divisor (the CRC polynomial) you're dividing by.  Each step of the + * division, you take one more digit (bit) of the dividend and append it + * to the current remainder.  Then you figure out the appropriate multiple + * of the divisor to subtract to being the remainder back into range. + * In binary, it's easy - it has to be either 0 or 1, and to make the + * XOR cancel, it's just a copy of bit 32 of the remainder. + * + * When computing a CRC, we don't care about the quotient, so we can + * throw the quotient bit away, but subtract the appropriate multiple of + * the polynomial from the remainder and we're back to where we started, + * ready to process the next bit. + * + * A big-endian CRC written this way would be coded like: + * for (i = 0; i < input_bits; i++) { + * 	multiple = remainder & 0x80000000 ? CRCPOLY : 0; + * 	remainder = (remainder << 1 | next_input_bit()) ^ multiple; + * } + * Notice how, to get at bit 32 of the shifted remainder, we look + * at bit 31 of the remainder *before* shifting it. + * + * But also notice how the next_input_bit() bits we're shifting into + * the remainder don't actually affect any decision-making until + * 32 bits later.  Thus, the first 32 cycles of this are pretty boring. + * Also, to add the CRC to a message, we need a 32-bit-long hole for it at + * the end, so we have to add 32 extra cycles shifting in zeros at the + * end of every message, + * + * So the standard trick is to rearrage merging in the next_input_bit() + * until the moment it's needed.  Then the first 32 cycles can be precomputed, + * and merging in the final 32 zero bits to make room for the CRC can be + * skipped entirely. + * This changes the code to: + * for (i = 0; i < input_bits; i++) { + *      remainder ^= next_input_bit() << 31; + * 	multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + * 	remainder = (remainder << 1) ^ multiple; + * } + * With this optimization, the little-endian code is simpler: + * for (i = 0; i < input_bits; i++) { + *      remainder ^= next_input_bit(); + * 	multiple = (remainder & 1) ? CRCPOLY : 0; + * 	remainder = (remainder >> 1) ^ multiple; + * } + * + * Note that the other details of endianness have been hidden in CRCPOLY + * (which must be bit-reversed) and next_input_bit(). + * + * However, as long as next_input_bit is returning the bits in a sensible + * order, we can actually do the merging 8 or more bits at a time rather + * than one bit at a time: + * for (i = 0; i < input_bytes; i++) { + * 	remainder ^= next_input_byte() << 24; + * 	for (j = 0; j < 8; j++) { + * 		multiple = (remainder & 0x80000000) ? CRCPOLY : 0; + * 		remainder = (remainder << 1) ^ multiple; + * 	} + * } + * Or in little-endian: + * for (i = 0; i < input_bytes; i++) { + * 	remainder ^= next_input_byte(); + * 	for (j = 0; j < 8; j++) { + * 		multiple = (remainder & 1) ? CRCPOLY : 0; + * 		remainder = (remainder << 1) ^ multiple; + * 	} + * } + * If the input is a multiple of 32 bits, you can even XOR in a 32-bit + * word at a time and increase the inner loop count to 32. + * + * You can also mix and match the two loop styles, for example doing the + * bulk of a message byte-at-a-time and adding bit-at-a-time processing + * for any fractional bytes at the end. + * + * The only remaining optimization is to the byte-at-a-time table method. + * Here, rather than just shifting one bit of the remainder to decide + * in the correct multiple to subtract, we can shift a byte at a time. + * This produces a 40-bit (rather than a 33-bit) intermediate remainder, + * but again the multiple of the polynomial to subtract depends only on + * the high bits, the high 8 bits in this case. + * + * The multile we need in that case is the low 32 bits of a 40-bit + * value whose high 8 bits are given, and which is a multiple of the + * generator polynomial.  This is simply the CRC-32 of the given + * one-byte message. + * + * Two more details: normally, appending zero bits to a message which + * is already a multiple of a polynomial produces a larger multiple of that + * polynomial.  To enable a CRC to detect this condition, it's common to + * invert the CRC before appending it.  This makes the remainder of the + * message+crc come out not as zero, but some fixed non-zero value. + * + * The same problem applies to zero bits prepended to the message, and + * a similar solution is used.  Instead of starting with a remainder of + * 0, an initial remainder of all ones is used.  As long as you start + * the same way on decoding, it doesn't make a difference. + */ + +#ifdef UNITTEST + +#include <stdlib.h> +#include <stdio.h> + +#ifdef UBI_LINUX				/*Not used at present */ +static void +buf_dump(char const *prefix, unsigned char const *buf, size_t len) +{ +	fputs(prefix, stdout); +	while (len--) +		printf(" %02x", *buf++); +	putchar('\n'); + +} +#endif + +static void bytereverse(unsigned char *buf, size_t len) +{ +	while (len--) { +		unsigned char x = bitrev8(*buf); +		*buf++ = x; +	} +} + +static void random_garbage(unsigned char *buf, size_t len) +{ +	while (len--) +		*buf++ = (unsigned char) random(); +} + +#ifdef UBI_LINUX				/* Not used at present */ +static void store_le(u32 x, unsigned char *buf) +{ +	buf[0] = (unsigned char) x; +	buf[1] = (unsigned char) (x >> 8); +	buf[2] = (unsigned char) (x >> 16); +	buf[3] = (unsigned char) (x >> 24); +} +#endif + +static void store_be(u32 x, unsigned char *buf) +{ +	buf[0] = (unsigned char) (x >> 24); +	buf[1] = (unsigned char) (x >> 16); +	buf[2] = (unsigned char) (x >> 8); +	buf[3] = (unsigned char) x; +} + +/* + * This checks that CRC(buf + CRC(buf)) = 0, and that + * CRC commutes with bit-reversal.  This has the side effect + * of bytewise bit-reversing the input buffer, and returns + * the CRC of the reversed buffer. + */ +static u32 test_step(u32 init, unsigned char *buf, size_t len) +{ +	u32 crc1, crc2; +	size_t i; + +	crc1 = crc32_be(init, buf, len); +	store_be(crc1, buf + len); +	crc2 = crc32_be(init, buf, len + 4); +	if (crc2) +		printf("\nCRC cancellation fail: 0x%08x should be 0\n", +		       crc2); + +	for (i = 0; i <= len + 4; i++) { +		crc2 = crc32_be(init, buf, i); +		crc2 = crc32_be(crc2, buf + i, len + 4 - i); +		if (crc2) +			printf("\nCRC split fail: 0x%08x\n", crc2); +	} + +	/* Now swap it around for the other test */ + +	bytereverse(buf, len + 4); +	init = bitrev32(init); +	crc2 = bitrev32(crc1); +	if (crc1 != bitrev32(crc2)) +		printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n", +		       crc1, crc2, bitrev32(crc2)); +	crc1 = crc32_le(init, buf, len); +	if (crc1 != crc2) +		printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1, +		       crc2); +	crc2 = crc32_le(init, buf, len + 4); +	if (crc2) +		printf("\nCRC cancellation fail: 0x%08x should be 0\n", +		       crc2); + +	for (i = 0; i <= len + 4; i++) { +		crc2 = crc32_le(init, buf, i); +		crc2 = crc32_le(crc2, buf + i, len + 4 - i); +		if (crc2) +			printf("\nCRC split fail: 0x%08x\n", crc2); +	} + +	return crc1; +} + +#define SIZE 64 +#define INIT1 0 +#define INIT2 0 + +int main(void) +{ +	unsigned char buf1[SIZE + 4]; +	unsigned char buf2[SIZE + 4]; +	unsigned char buf3[SIZE + 4]; +	int i, j; +	u32 crc1, crc2, crc3; + +	for (i = 0; i <= SIZE; i++) { +		printf("\rTesting length %d...", i); +		fflush(stdout); +		random_garbage(buf1, i); +		random_garbage(buf2, i); +		for (j = 0; j < i; j++) +			buf3[j] = buf1[j] ^ buf2[j]; + +		crc1 = test_step(INIT1, buf1, i); +		crc2 = test_step(INIT2, buf2, i); +		/* Now check that CRC(buf1 ^ buf2) = CRC(buf1) ^ CRC(buf2) */ +		crc3 = test_step(INIT1 ^ INIT2, buf3, i); +		if (crc3 != (crc1 ^ crc2)) +			printf("CRC XOR fail: 0x%08x != 0x%08x ^ 0x%08x\n", +			       crc3, crc1, crc2); +	} +	printf("\nAll test complete.  No failures expected.\n"); +	return 0; +} + +#endif				/* UNITTEST */ diff --git a/roms/u-boot/drivers/mtd/ubi/crc32defs.h b/roms/u-boot/drivers/mtd/ubi/crc32defs.h new file mode 100644 index 00000000..f5a54017 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/crc32defs.h @@ -0,0 +1,32 @@ +/* + * There are multiple 16-bit CRC polynomials in common use, but this is + * *the* standard CRC-32 polynomial, first popularized by Ethernet. + * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0 + */ +#define CRCPOLY_LE 0xedb88320 +#define CRCPOLY_BE 0x04c11db7 + +/* How many bits at a time to use.  Requires a table of 4<<CRC_xx_BITS bytes. */ +/* For less performance-sensitive, use 4 */ +#ifndef CRC_LE_BITS +# define CRC_LE_BITS 8 +#endif +#ifndef CRC_BE_BITS +# define CRC_BE_BITS 8 +#endif + +/* + * Little-endian CRC computation.  Used with serial bit streams sent + * lsbit-first.  Be sure to use cpu_to_le32() to append the computed CRC. + */ +#if CRC_LE_BITS > 8 || CRC_LE_BITS < 1 || CRC_LE_BITS & CRC_LE_BITS-1 +# error CRC_LE_BITS must be a power of 2 between 1 and 8 +#endif + +/* + * Big-endian CRC computation.  Used with serial bit streams sent + * msbit-first.  Be sure to use cpu_to_be32() to append the computed CRC. + */ +#if CRC_BE_BITS > 8 || CRC_BE_BITS < 1 || CRC_BE_BITS & CRC_BE_BITS-1 +# error CRC_BE_BITS must be a power of 2 between 1 and 8 +#endif diff --git a/roms/u-boot/drivers/mtd/ubi/crc32table.h b/roms/u-boot/drivers/mtd/ubi/crc32table.h new file mode 100644 index 00000000..0438af43 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/crc32table.h @@ -0,0 +1,136 @@ +/* this file is generated - do not edit */ + +static const u32 crc32table_le[] = { +tole(0x00000000L), tole(0x77073096L), tole(0xee0e612cL), tole(0x990951baL), +tole(0x076dc419L), tole(0x706af48fL), tole(0xe963a535L), tole(0x9e6495a3L), +tole(0x0edb8832L), tole(0x79dcb8a4L), tole(0xe0d5e91eL), tole(0x97d2d988L), +tole(0x09b64c2bL), tole(0x7eb17cbdL), tole(0xe7b82d07L), tole(0x90bf1d91L), +tole(0x1db71064L), tole(0x6ab020f2L), tole(0xf3b97148L), tole(0x84be41deL), +tole(0x1adad47dL), tole(0x6ddde4ebL), tole(0xf4d4b551L), tole(0x83d385c7L), +tole(0x136c9856L), tole(0x646ba8c0L), tole(0xfd62f97aL), tole(0x8a65c9ecL), +tole(0x14015c4fL), tole(0x63066cd9L), tole(0xfa0f3d63L), tole(0x8d080df5L), +tole(0x3b6e20c8L), tole(0x4c69105eL), tole(0xd56041e4L), tole(0xa2677172L), +tole(0x3c03e4d1L), tole(0x4b04d447L), tole(0xd20d85fdL), tole(0xa50ab56bL), +tole(0x35b5a8faL), tole(0x42b2986cL), tole(0xdbbbc9d6L), tole(0xacbcf940L), +tole(0x32d86ce3L), tole(0x45df5c75L), tole(0xdcd60dcfL), tole(0xabd13d59L), +tole(0x26d930acL), tole(0x51de003aL), tole(0xc8d75180L), tole(0xbfd06116L), +tole(0x21b4f4b5L), tole(0x56b3c423L), tole(0xcfba9599L), tole(0xb8bda50fL), +tole(0x2802b89eL), tole(0x5f058808L), tole(0xc60cd9b2L), tole(0xb10be924L), +tole(0x2f6f7c87L), tole(0x58684c11L), tole(0xc1611dabL), tole(0xb6662d3dL), +tole(0x76dc4190L), tole(0x01db7106L), tole(0x98d220bcL), tole(0xefd5102aL), +tole(0x71b18589L), tole(0x06b6b51fL), tole(0x9fbfe4a5L), tole(0xe8b8d433L), +tole(0x7807c9a2L), tole(0x0f00f934L), tole(0x9609a88eL), tole(0xe10e9818L), +tole(0x7f6a0dbbL), tole(0x086d3d2dL), tole(0x91646c97L), tole(0xe6635c01L), +tole(0x6b6b51f4L), tole(0x1c6c6162L), tole(0x856530d8L), tole(0xf262004eL), +tole(0x6c0695edL), tole(0x1b01a57bL), tole(0x8208f4c1L), tole(0xf50fc457L), +tole(0x65b0d9c6L), tole(0x12b7e950L), tole(0x8bbeb8eaL), tole(0xfcb9887cL), +tole(0x62dd1ddfL), tole(0x15da2d49L), tole(0x8cd37cf3L), tole(0xfbd44c65L), +tole(0x4db26158L), tole(0x3ab551ceL), tole(0xa3bc0074L), tole(0xd4bb30e2L), +tole(0x4adfa541L), tole(0x3dd895d7L), tole(0xa4d1c46dL), tole(0xd3d6f4fbL), +tole(0x4369e96aL), tole(0x346ed9fcL), tole(0xad678846L), tole(0xda60b8d0L), +tole(0x44042d73L), tole(0x33031de5L), tole(0xaa0a4c5fL), tole(0xdd0d7cc9L), +tole(0x5005713cL), tole(0x270241aaL), tole(0xbe0b1010L), tole(0xc90c2086L), +tole(0x5768b525L), tole(0x206f85b3L), tole(0xb966d409L), tole(0xce61e49fL), +tole(0x5edef90eL), tole(0x29d9c998L), tole(0xb0d09822L), tole(0xc7d7a8b4L), +tole(0x59b33d17L), tole(0x2eb40d81L), tole(0xb7bd5c3bL), tole(0xc0ba6cadL), +tole(0xedb88320L), tole(0x9abfb3b6L), tole(0x03b6e20cL), tole(0x74b1d29aL), +tole(0xead54739L), tole(0x9dd277afL), tole(0x04db2615L), tole(0x73dc1683L), +tole(0xe3630b12L), tole(0x94643b84L), tole(0x0d6d6a3eL), tole(0x7a6a5aa8L), +tole(0xe40ecf0bL), tole(0x9309ff9dL), tole(0x0a00ae27L), tole(0x7d079eb1L), +tole(0xf00f9344L), tole(0x8708a3d2L), tole(0x1e01f268L), tole(0x6906c2feL), +tole(0xf762575dL), tole(0x806567cbL), tole(0x196c3671L), tole(0x6e6b06e7L), +tole(0xfed41b76L), tole(0x89d32be0L), tole(0x10da7a5aL), tole(0x67dd4accL), +tole(0xf9b9df6fL), tole(0x8ebeeff9L), tole(0x17b7be43L), tole(0x60b08ed5L), +tole(0xd6d6a3e8L), tole(0xa1d1937eL), tole(0x38d8c2c4L), tole(0x4fdff252L), +tole(0xd1bb67f1L), tole(0xa6bc5767L), tole(0x3fb506ddL), tole(0x48b2364bL), +tole(0xd80d2bdaL), tole(0xaf0a1b4cL), tole(0x36034af6L), tole(0x41047a60L), +tole(0xdf60efc3L), tole(0xa867df55L), tole(0x316e8eefL), tole(0x4669be79L), +tole(0xcb61b38cL), tole(0xbc66831aL), tole(0x256fd2a0L), tole(0x5268e236L), +tole(0xcc0c7795L), tole(0xbb0b4703L), tole(0x220216b9L), tole(0x5505262fL), +tole(0xc5ba3bbeL), tole(0xb2bd0b28L), tole(0x2bb45a92L), tole(0x5cb36a04L), +tole(0xc2d7ffa7L), tole(0xb5d0cf31L), tole(0x2cd99e8bL), tole(0x5bdeae1dL), +tole(0x9b64c2b0L), tole(0xec63f226L), tole(0x756aa39cL), tole(0x026d930aL), +tole(0x9c0906a9L), tole(0xeb0e363fL), tole(0x72076785L), tole(0x05005713L), +tole(0x95bf4a82L), tole(0xe2b87a14L), tole(0x7bb12baeL), tole(0x0cb61b38L), +tole(0x92d28e9bL), tole(0xe5d5be0dL), tole(0x7cdcefb7L), tole(0x0bdbdf21L), +tole(0x86d3d2d4L), tole(0xf1d4e242L), tole(0x68ddb3f8L), tole(0x1fda836eL), +tole(0x81be16cdL), tole(0xf6b9265bL), tole(0x6fb077e1L), tole(0x18b74777L), +tole(0x88085ae6L), tole(0xff0f6a70L), tole(0x66063bcaL), tole(0x11010b5cL), +tole(0x8f659effL), tole(0xf862ae69L), tole(0x616bffd3L), tole(0x166ccf45L), +tole(0xa00ae278L), tole(0xd70dd2eeL), tole(0x4e048354L), tole(0x3903b3c2L), +tole(0xa7672661L), tole(0xd06016f7L), tole(0x4969474dL), tole(0x3e6e77dbL), +tole(0xaed16a4aL), tole(0xd9d65adcL), tole(0x40df0b66L), tole(0x37d83bf0L), +tole(0xa9bcae53L), tole(0xdebb9ec5L), tole(0x47b2cf7fL), tole(0x30b5ffe9L), +tole(0xbdbdf21cL), tole(0xcabac28aL), tole(0x53b39330L), tole(0x24b4a3a6L), +tole(0xbad03605L), tole(0xcdd70693L), tole(0x54de5729L), tole(0x23d967bfL), +tole(0xb3667a2eL), tole(0xc4614ab8L), tole(0x5d681b02L), tole(0x2a6f2b94L), +tole(0xb40bbe37L), tole(0xc30c8ea1L), tole(0x5a05df1bL), tole(0x2d02ef8dL) +}; +#ifdef UBI_LINUX +static const u32 crc32table_be[] = { +tobe(0x00000000L), tobe(0x04c11db7L), tobe(0x09823b6eL), tobe(0x0d4326d9L), +tobe(0x130476dcL), tobe(0x17c56b6bL), tobe(0x1a864db2L), tobe(0x1e475005L), +tobe(0x2608edb8L), tobe(0x22c9f00fL), tobe(0x2f8ad6d6L), tobe(0x2b4bcb61L), +tobe(0x350c9b64L), tobe(0x31cd86d3L), tobe(0x3c8ea00aL), tobe(0x384fbdbdL), +tobe(0x4c11db70L), tobe(0x48d0c6c7L), tobe(0x4593e01eL), tobe(0x4152fda9L), +tobe(0x5f15adacL), tobe(0x5bd4b01bL), tobe(0x569796c2L), tobe(0x52568b75L), +tobe(0x6a1936c8L), tobe(0x6ed82b7fL), tobe(0x639b0da6L), tobe(0x675a1011L), +tobe(0x791d4014L), tobe(0x7ddc5da3L), tobe(0x709f7b7aL), tobe(0x745e66cdL), +tobe(0x9823b6e0L), tobe(0x9ce2ab57L), tobe(0x91a18d8eL), tobe(0x95609039L), +tobe(0x8b27c03cL), tobe(0x8fe6dd8bL), tobe(0x82a5fb52L), tobe(0x8664e6e5L), +tobe(0xbe2b5b58L), tobe(0xbaea46efL), tobe(0xb7a96036L), tobe(0xb3687d81L), +tobe(0xad2f2d84L), tobe(0xa9ee3033L), tobe(0xa4ad16eaL), tobe(0xa06c0b5dL), +tobe(0xd4326d90L), tobe(0xd0f37027L), tobe(0xddb056feL), tobe(0xd9714b49L), +tobe(0xc7361b4cL), tobe(0xc3f706fbL), tobe(0xceb42022L), tobe(0xca753d95L), +tobe(0xf23a8028L), tobe(0xf6fb9d9fL), tobe(0xfbb8bb46L), tobe(0xff79a6f1L), +tobe(0xe13ef6f4L), tobe(0xe5ffeb43L), tobe(0xe8bccd9aL), tobe(0xec7dd02dL), +tobe(0x34867077L), tobe(0x30476dc0L), tobe(0x3d044b19L), tobe(0x39c556aeL), +tobe(0x278206abL), tobe(0x23431b1cL), tobe(0x2e003dc5L), tobe(0x2ac12072L), +tobe(0x128e9dcfL), tobe(0x164f8078L), tobe(0x1b0ca6a1L), tobe(0x1fcdbb16L), +tobe(0x018aeb13L), tobe(0x054bf6a4L), tobe(0x0808d07dL), tobe(0x0cc9cdcaL), +tobe(0x7897ab07L), tobe(0x7c56b6b0L), tobe(0x71159069L), tobe(0x75d48ddeL), +tobe(0x6b93dddbL), tobe(0x6f52c06cL), tobe(0x6211e6b5L), tobe(0x66d0fb02L), +tobe(0x5e9f46bfL), tobe(0x5a5e5b08L), tobe(0x571d7dd1L), tobe(0x53dc6066L), +tobe(0x4d9b3063L), tobe(0x495a2dd4L), tobe(0x44190b0dL), tobe(0x40d816baL), +tobe(0xaca5c697L), tobe(0xa864db20L), tobe(0xa527fdf9L), tobe(0xa1e6e04eL), +tobe(0xbfa1b04bL), tobe(0xbb60adfcL), tobe(0xb6238b25L), tobe(0xb2e29692L), +tobe(0x8aad2b2fL), tobe(0x8e6c3698L), tobe(0x832f1041L), tobe(0x87ee0df6L), +tobe(0x99a95df3L), tobe(0x9d684044L), tobe(0x902b669dL), tobe(0x94ea7b2aL), +tobe(0xe0b41de7L), tobe(0xe4750050L), tobe(0xe9362689L), tobe(0xedf73b3eL), +tobe(0xf3b06b3bL), tobe(0xf771768cL), tobe(0xfa325055L), tobe(0xfef34de2L), +tobe(0xc6bcf05fL), tobe(0xc27dede8L), tobe(0xcf3ecb31L), tobe(0xcbffd686L), +tobe(0xd5b88683L), tobe(0xd1799b34L), tobe(0xdc3abdedL), tobe(0xd8fba05aL), +tobe(0x690ce0eeL), tobe(0x6dcdfd59L), tobe(0x608edb80L), tobe(0x644fc637L), +tobe(0x7a089632L), tobe(0x7ec98b85L), tobe(0x738aad5cL), tobe(0x774bb0ebL), +tobe(0x4f040d56L), tobe(0x4bc510e1L), tobe(0x46863638L), tobe(0x42472b8fL), +tobe(0x5c007b8aL), tobe(0x58c1663dL), tobe(0x558240e4L), tobe(0x51435d53L), +tobe(0x251d3b9eL), tobe(0x21dc2629L), tobe(0x2c9f00f0L), tobe(0x285e1d47L), +tobe(0x36194d42L), tobe(0x32d850f5L), tobe(0x3f9b762cL), tobe(0x3b5a6b9bL), +tobe(0x0315d626L), tobe(0x07d4cb91L), tobe(0x0a97ed48L), tobe(0x0e56f0ffL), +tobe(0x1011a0faL), tobe(0x14d0bd4dL), tobe(0x19939b94L), tobe(0x1d528623L), +tobe(0xf12f560eL), tobe(0xf5ee4bb9L), tobe(0xf8ad6d60L), tobe(0xfc6c70d7L), +tobe(0xe22b20d2L), tobe(0xe6ea3d65L), tobe(0xeba91bbcL), tobe(0xef68060bL), +tobe(0xd727bbb6L), tobe(0xd3e6a601L), tobe(0xdea580d8L), tobe(0xda649d6fL), +tobe(0xc423cd6aL), tobe(0xc0e2d0ddL), tobe(0xcda1f604L), tobe(0xc960ebb3L), +tobe(0xbd3e8d7eL), tobe(0xb9ff90c9L), tobe(0xb4bcb610L), tobe(0xb07daba7L), +tobe(0xae3afba2L), tobe(0xaafbe615L), tobe(0xa7b8c0ccL), tobe(0xa379dd7bL), +tobe(0x9b3660c6L), tobe(0x9ff77d71L), tobe(0x92b45ba8L), tobe(0x9675461fL), +tobe(0x8832161aL), tobe(0x8cf30badL), tobe(0x81b02d74L), tobe(0x857130c3L), +tobe(0x5d8a9099L), tobe(0x594b8d2eL), tobe(0x5408abf7L), tobe(0x50c9b640L), +tobe(0x4e8ee645L), tobe(0x4a4ffbf2L), tobe(0x470cdd2bL), tobe(0x43cdc09cL), +tobe(0x7b827d21L), tobe(0x7f436096L), tobe(0x7200464fL), tobe(0x76c15bf8L), +tobe(0x68860bfdL), tobe(0x6c47164aL), tobe(0x61043093L), tobe(0x65c52d24L), +tobe(0x119b4be9L), tobe(0x155a565eL), tobe(0x18197087L), tobe(0x1cd86d30L), +tobe(0x029f3d35L), tobe(0x065e2082L), tobe(0x0b1d065bL), tobe(0x0fdc1becL), +tobe(0x3793a651L), tobe(0x3352bbe6L), tobe(0x3e119d3fL), tobe(0x3ad08088L), +tobe(0x2497d08dL), tobe(0x2056cd3aL), tobe(0x2d15ebe3L), tobe(0x29d4f654L), +tobe(0xc5a92679L), tobe(0xc1683bceL), tobe(0xcc2b1d17L), tobe(0xc8ea00a0L), +tobe(0xd6ad50a5L), tobe(0xd26c4d12L), tobe(0xdf2f6bcbL), tobe(0xdbee767cL), +tobe(0xe3a1cbc1L), tobe(0xe760d676L), tobe(0xea23f0afL), tobe(0xeee2ed18L), +tobe(0xf0a5bd1dL), tobe(0xf464a0aaL), tobe(0xf9278673L), tobe(0xfde69bc4L), +tobe(0x89b8fd09L), tobe(0x8d79e0beL), tobe(0x803ac667L), tobe(0x84fbdbd0L), +tobe(0x9abc8bd5L), tobe(0x9e7d9662L), tobe(0x933eb0bbL), tobe(0x97ffad0cL), +tobe(0xafb010b1L), tobe(0xab710d06L), tobe(0xa6322bdfL), tobe(0xa2f33668L), +tobe(0xbcb4666dL), tobe(0xb8757bdaL), tobe(0xb5365d03L), tobe(0xb1f740b4L) +}; +#endif diff --git a/roms/u-boot/drivers/mtd/ubi/debug.c b/roms/u-boot/drivers/mtd/ubi/debug.c new file mode 100644 index 00000000..6c22301d --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/debug.c @@ -0,0 +1,180 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * Here we keep all the UBI debugging stuff which should normally be disabled + * and compiled-out, but it is extremely helpful when hunting bugs or doing big + * changes. + */ +#include <ubi_uboot.h> + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG + +#include "ubi.h" + +/** + * ubi_dbg_dump_ec_hdr - dump an erase counter header. + * @ec_hdr: the erase counter header to dump + */ +void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) +{ +	dbg_msg("erase counter header dump:"); +	dbg_msg("magic          %#08x", be32_to_cpu(ec_hdr->magic)); +	dbg_msg("version        %d",    (int)ec_hdr->version); +	dbg_msg("ec             %llu",  (long long)be64_to_cpu(ec_hdr->ec)); +	dbg_msg("vid_hdr_offset %d",    be32_to_cpu(ec_hdr->vid_hdr_offset)); +	dbg_msg("data_offset    %d",    be32_to_cpu(ec_hdr->data_offset)); +	dbg_msg("hdr_crc        %#08x", be32_to_cpu(ec_hdr->hdr_crc)); +	dbg_msg("erase counter header hexdump:"); +	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, +		       ec_hdr, UBI_EC_HDR_SIZE, 1); +} + +/** + * ubi_dbg_dump_vid_hdr - dump a volume identifier header. + * @vid_hdr: the volume identifier header to dump + */ +void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) +{ +	dbg_msg("volume identifier header dump:"); +	dbg_msg("magic     %08x", be32_to_cpu(vid_hdr->magic)); +	dbg_msg("version   %d",   (int)vid_hdr->version); +	dbg_msg("vol_type  %d",   (int)vid_hdr->vol_type); +	dbg_msg("copy_flag %d",   (int)vid_hdr->copy_flag); +	dbg_msg("compat    %d",   (int)vid_hdr->compat); +	dbg_msg("vol_id    %d",   be32_to_cpu(vid_hdr->vol_id)); +	dbg_msg("lnum      %d",   be32_to_cpu(vid_hdr->lnum)); +	dbg_msg("leb_ver   %u",   be32_to_cpu(vid_hdr->leb_ver)); +	dbg_msg("data_size %d",   be32_to_cpu(vid_hdr->data_size)); +	dbg_msg("used_ebs  %d",   be32_to_cpu(vid_hdr->used_ebs)); +	dbg_msg("data_pad  %d",   be32_to_cpu(vid_hdr->data_pad)); +	dbg_msg("sqnum     %llu", +		(unsigned long long)be64_to_cpu(vid_hdr->sqnum)); +	dbg_msg("hdr_crc   %08x", be32_to_cpu(vid_hdr->hdr_crc)); +	dbg_msg("volume identifier header hexdump:"); +} + +/** + * ubi_dbg_dump_vol_info- dump volume information. + * @vol: UBI volume description object + */ +void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) +{ +	dbg_msg("volume information dump:"); +	dbg_msg("vol_id          %d", vol->vol_id); +	dbg_msg("reserved_pebs   %d", vol->reserved_pebs); +	dbg_msg("alignment       %d", vol->alignment); +	dbg_msg("data_pad        %d", vol->data_pad); +	dbg_msg("vol_type        %d", vol->vol_type); +	dbg_msg("name_len        %d", vol->name_len); +	dbg_msg("usable_leb_size %d", vol->usable_leb_size); +	dbg_msg("used_ebs        %d", vol->used_ebs); +	dbg_msg("used_bytes      %lld", vol->used_bytes); +	dbg_msg("last_eb_bytes   %d", vol->last_eb_bytes); +	dbg_msg("corrupted       %d", vol->corrupted); +	dbg_msg("upd_marker      %d", vol->upd_marker); + +	if (vol->name_len <= UBI_VOL_NAME_MAX && +	    strnlen(vol->name, vol->name_len + 1) == vol->name_len) { +		dbg_msg("name            %s", vol->name); +	} else { +		dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c", +			vol->name[0], vol->name[1], vol->name[2], +			vol->name[3], vol->name[4]); +	} +} + +/** + * ubi_dbg_dump_vtbl_record - dump a &struct ubi_vtbl_record object. + * @r: the object to dump + * @idx: volume table index + */ +void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) +{ +	int name_len = be16_to_cpu(r->name_len); + +	dbg_msg("volume table record %d dump:", idx); +	dbg_msg("reserved_pebs   %d", be32_to_cpu(r->reserved_pebs)); +	dbg_msg("alignment       %d", be32_to_cpu(r->alignment)); +	dbg_msg("data_pad        %d", be32_to_cpu(r->data_pad)); +	dbg_msg("vol_type        %d", (int)r->vol_type); +	dbg_msg("upd_marker      %d", (int)r->upd_marker); +	dbg_msg("name_len        %d", name_len); + +	if (r->name[0] == '\0') { +		dbg_msg("name            NULL"); +		return; +	} + +	if (name_len <= UBI_VOL_NAME_MAX && +	    strnlen(&r->name[0], name_len + 1) == name_len) { +		dbg_msg("name            %s", &r->name[0]); +	} else { +		dbg_msg("1st 5 characters of the name: %c%c%c%c%c", +			r->name[0], r->name[1], r->name[2], r->name[3], +			r->name[4]); +	} +	dbg_msg("crc             %#08x", be32_to_cpu(r->crc)); +} + +/** + * ubi_dbg_dump_sv - dump a &struct ubi_scan_volume object. + * @sv: the object to dump + */ +void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) +{ +	dbg_msg("volume scanning information dump:"); +	dbg_msg("vol_id         %d", sv->vol_id); +	dbg_msg("highest_lnum   %d", sv->highest_lnum); +	dbg_msg("leb_count      %d", sv->leb_count); +	dbg_msg("compat         %d", sv->compat); +	dbg_msg("vol_type       %d", sv->vol_type); +	dbg_msg("used_ebs       %d", sv->used_ebs); +	dbg_msg("last_data_size %d", sv->last_data_size); +	dbg_msg("data_pad       %d", sv->data_pad); +} + +/** + * ubi_dbg_dump_seb - dump a &struct ubi_scan_leb object. + * @seb: the object to dump + * @type: object type: 0 - not corrupted, 1 - corrupted + */ +void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) +{ +	dbg_msg("eraseblock scanning information dump:"); +	dbg_msg("ec       %d", seb->ec); +	dbg_msg("pnum     %d", seb->pnum); +	if (type == 0) { +		dbg_msg("lnum     %d", seb->lnum); +		dbg_msg("scrub    %d", seb->scrub); +		dbg_msg("sqnum    %llu", seb->sqnum); +		dbg_msg("leb_ver  %u", seb->leb_ver); +	} +} + +/** + * ubi_dbg_dump_mkvol_req - dump a &struct ubi_mkvol_req object. + * @req: the object to dump + */ +void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) +{ +	char nm[17]; + +	dbg_msg("volume creation request dump:"); +	dbg_msg("vol_id    %d",   req->vol_id); +	dbg_msg("alignment %d",   req->alignment); +	dbg_msg("bytes     %lld", (long long)req->bytes); +	dbg_msg("vol_type  %d",   req->vol_type); +	dbg_msg("name_len  %d",   req->name_len); + +	memcpy(nm, req->name, 16); +	nm[16] = 0; +	dbg_msg("the 1st 16 characters of the name: %s", nm); +} + +#endif /* CONFIG_MTD_UBI_DEBUG_MSG */ diff --git a/roms/u-boot/drivers/mtd/ubi/debug.h b/roms/u-boot/drivers/mtd/ubi/debug.h new file mode 100644 index 00000000..222b2b8a --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/debug.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __UBI_DEBUG_H__ +#define __UBI_DEBUG_H__ + +#ifdef CONFIG_MTD_UBI_DEBUG +#ifdef UBI_LINUX +#include <linux/random.h> +#endif + +#define ubi_assert(expr)  BUG_ON(!(expr)) +#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) +#else +#define ubi_assert(expr)  ({}) +#define dbg_err(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT +#define DBG_DISABLE_BGT 1 +#else +#define DBG_DISABLE_BGT 0 +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG +/* Generic debugging message */ +#define dbg_msg(fmt, ...)                                    \ +	printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", \ +	       __FUNCTION__, ##__VA_ARGS__) + +#define ubi_dbg_dump_stack() dump_stack() + +struct ubi_ec_hdr; +struct ubi_vid_hdr; +struct ubi_volume; +struct ubi_vtbl_record; +struct ubi_scan_volume; +struct ubi_scan_leb; +struct ubi_mkvol_req; + +void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr); +void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr); +void ubi_dbg_dump_vol_info(const struct ubi_volume *vol); +void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx); +void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv); +void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); +void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); + +#else + +#define dbg_msg(fmt, ...)    ({}) +#define ubi_dbg_dump_stack() ({}) +#define ubi_dbg_dump_ec_hdr(ec_hdr)      ({}) +#define ubi_dbg_dump_vid_hdr(vid_hdr)    ({}) +#define ubi_dbg_dump_vol_info(vol)       ({}) +#define ubi_dbg_dump_vtbl_record(r, idx) ({}) +#define ubi_dbg_dump_sv(sv)              ({}) +#define ubi_dbg_dump_seb(seb, type)      ({}) +#define ubi_dbg_dump_mkvol_req(req)      ({}) + +#endif /* CONFIG_MTD_UBI_DEBUG_MSG */ + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA +/* Messages from the eraseblock association unit */ +#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) +#else +#define dbg_eba(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL +/* Messages from the wear-leveling unit */ +#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) +#else +#define dbg_wl(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO +/* Messages from the input/output unit */ +#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) +#else +#define dbg_io(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD +/* Initialization and build messages */ +#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) +#else +#define dbg_bld(fmt, ...) ({}) +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS +/** + * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. + * + * Returns non-zero if a bit-flip should be emulated, otherwise returns zero. + */ +static inline int ubi_dbg_is_bitflip(void) +{ +	return !(random32() % 200); +} +#else +#define ubi_dbg_is_bitflip() 0 +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES +/** + * ubi_dbg_is_write_failure - if it is time to emulate a write failure. + * + * Returns non-zero if a write failure should be emulated, otherwise returns + * zero. + */ +static inline int ubi_dbg_is_write_failure(void) +{ +	return !(random32() % 500); +} +#else +#define ubi_dbg_is_write_failure() 0 +#endif + +#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES +/** + * ubi_dbg_is_erase_failure - if its time to emulate an erase failure. + * + * Returns non-zero if an erase failure should be emulated, otherwise returns + * zero. + */ +static inline int ubi_dbg_is_erase_failure(void) +{ +		return !(random32() % 400); +} +#else +#define ubi_dbg_is_erase_failure() 0 +#endif + +#endif /* !__UBI_DEBUG_H__ */ diff --git a/roms/u-boot/drivers/mtd/ubi/eba.c b/roms/u-boot/drivers/mtd/ubi/eba.c new file mode 100644 index 00000000..7d27edae --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/eba.c @@ -0,0 +1,1244 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * The UBI Eraseblock Association (EBA) unit. + * + * This unit is responsible for I/O to/from logical eraseblock. + * + * Although in this implementation the EBA table is fully kept and managed in + * RAM, which assumes poor scalability, it might be (partially) maintained on + * flash in future implementations. + * + * The EBA unit implements per-logical eraseblock locking. Before accessing a + * logical eraseblock it is locked for reading or writing. The per-logical + * eraseblock locking is implemented by means of the lock tree. The lock tree + * is an RB-tree which refers all the currently locked logical eraseblocks. The + * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by + * (@vol_id, @lnum) pairs. + * + * EBA also maintains the global sequence counter which is incremented each + * time a logical eraseblock is mapped to a physical eraseblock and it is + * stored in the volume identifier header. This means that each VID header has + * a unique sequence number. The sequence number is only increased an we assume + * 64 bits is enough to never overflow. + */ + +#ifdef UBI_LINUX +#include <linux/slab.h> +#include <linux/crc32.h> +#include <linux/err.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +/* Number of physical eraseblocks reserved for atomic LEB change operation */ +#define EBA_RESERVED_PEBS 1 + +/** + * next_sqnum - get next sequence number. + * @ubi: UBI device description object + * + * This function returns next sequence number to use, which is just the current + * global sequence counter value. It also increases the global sequence + * counter. + */ +static unsigned long long next_sqnum(struct ubi_device *ubi) +{ +	unsigned long long sqnum; + +	spin_lock(&ubi->ltree_lock); +	sqnum = ubi->global_sqnum++; +	spin_unlock(&ubi->ltree_lock); + +	return sqnum; +} + +/** + * ubi_get_compat - get compatibility flags of a volume. + * @ubi: UBI device description object + * @vol_id: volume ID + * + * This function returns compatibility flags for an internal volume. User + * volumes have no compatibility flags, so %0 is returned. + */ +static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) +{ +	if (vol_id == UBI_LAYOUT_VOLUME_ID) +		return UBI_LAYOUT_VOLUME_COMPAT; +	return 0; +} + +/** + * ltree_lookup - look up the lock tree. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function returns a pointer to the corresponding &struct ubi_ltree_entry + * object if the logical eraseblock is locked and %NULL if it is not. + * @ubi->ltree_lock has to be locked. + */ +static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, +					    int lnum) +{ +	struct rb_node *p; + +	p = ubi->ltree.rb_node; +	while (p) { +		struct ubi_ltree_entry *le; + +		le = rb_entry(p, struct ubi_ltree_entry, rb); + +		if (vol_id < le->vol_id) +			p = p->rb_left; +		else if (vol_id > le->vol_id) +			p = p->rb_right; +		else { +			if (lnum < le->lnum) +				p = p->rb_left; +			else if (lnum > le->lnum) +				p = p->rb_right; +			else +				return le; +		} +	} + +	return NULL; +} + +/** + * ltree_add_entry - add new entry to the lock tree. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the + * lock tree. If such entry is already there, its usage counter is increased. + * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation + * failed. + */ +static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, +					       int vol_id, int lnum) +{ +	struct ubi_ltree_entry *le, *le1, *le_free; + +	le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS); +	if (!le) +		return ERR_PTR(-ENOMEM); + +	le->users = 0; +	init_rwsem(&le->mutex); +	le->vol_id = vol_id; +	le->lnum = lnum; + +	spin_lock(&ubi->ltree_lock); +	le1 = ltree_lookup(ubi, vol_id, lnum); + +	if (le1) { +		/* +		 * This logical eraseblock is already locked. The newly +		 * allocated lock entry is not needed. +		 */ +		le_free = le; +		le = le1; +	} else { +		struct rb_node **p, *parent = NULL; + +		/* +		 * No lock entry, add the newly allocated one to the +		 * @ubi->ltree RB-tree. +		 */ +		le_free = NULL; + +		p = &ubi->ltree.rb_node; +		while (*p) { +			parent = *p; +			le1 = rb_entry(parent, struct ubi_ltree_entry, rb); + +			if (vol_id < le1->vol_id) +				p = &(*p)->rb_left; +			else if (vol_id > le1->vol_id) +				p = &(*p)->rb_right; +			else { +				ubi_assert(lnum != le1->lnum); +				if (lnum < le1->lnum) +					p = &(*p)->rb_left; +				else +					p = &(*p)->rb_right; +			} +		} + +		rb_link_node(&le->rb, parent, p); +		rb_insert_color(&le->rb, &ubi->ltree); +	} +	le->users += 1; +	spin_unlock(&ubi->ltree_lock); + +	if (le_free) +		kfree(le_free); + +	return le; +} + +/** + * leb_read_lock - lock logical eraseblock for reading. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function locks a logical eraseblock for reading. Returns zero in case + * of success and a negative error code in case of failure. + */ +static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) +{ +	struct ubi_ltree_entry *le; + +	le = ltree_add_entry(ubi, vol_id, lnum); +	if (IS_ERR(le)) +		return PTR_ERR(le); +	down_read(&le->mutex); +	return 0; +} + +/** + * leb_read_unlock - unlock logical eraseblock. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + */ +static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) +{ +	int _free = 0; +	struct ubi_ltree_entry *le; + +	spin_lock(&ubi->ltree_lock); +	le = ltree_lookup(ubi, vol_id, lnum); +	le->users -= 1; +	ubi_assert(le->users >= 0); +	if (le->users == 0) { +		rb_erase(&le->rb, &ubi->ltree); +		_free = 1; +	} +	spin_unlock(&ubi->ltree_lock); + +	up_read(&le->mutex); +	if (_free) +		kfree(le); +} + +/** + * leb_write_lock - lock logical eraseblock for writing. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function locks a logical eraseblock for writing. Returns zero in case + * of success and a negative error code in case of failure. + */ +static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) +{ +	struct ubi_ltree_entry *le; + +	le = ltree_add_entry(ubi, vol_id, lnum); +	if (IS_ERR(le)) +		return PTR_ERR(le); +	down_write(&le->mutex); +	return 0; +} + +/** + * leb_write_lock - lock logical eraseblock for writing. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + * + * This function locks a logical eraseblock for writing if there is no + * contention and does nothing if there is contention. Returns %0 in case of + * success, %1 in case of contention, and and a negative error code in case of + * failure. + */ +static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) +{ +	int _free; +	struct ubi_ltree_entry *le; + +	le = ltree_add_entry(ubi, vol_id, lnum); +	if (IS_ERR(le)) +		return PTR_ERR(le); +	if (down_write_trylock(&le->mutex)) +		return 0; + +	/* Contention, cancel */ +	spin_lock(&ubi->ltree_lock); +	le->users -= 1; +	ubi_assert(le->users >= 0); +	if (le->users == 0) { +		rb_erase(&le->rb, &ubi->ltree); +		_free = 1; +	} else +		_free = 0; +	spin_unlock(&ubi->ltree_lock); +	if (_free) +		kfree(le); + +	return 1; +} + +/** + * leb_write_unlock - unlock logical eraseblock. + * @ubi: UBI device description object + * @vol_id: volume ID + * @lnum: logical eraseblock number + */ +static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) +{ +	int _free; +	struct ubi_ltree_entry *le; + +	spin_lock(&ubi->ltree_lock); +	le = ltree_lookup(ubi, vol_id, lnum); +	le->users -= 1; +	ubi_assert(le->users >= 0); +	if (le->users == 0) { +		rb_erase(&le->rb, &ubi->ltree); +		_free = 1; +	} else +		_free = 0; +	spin_unlock(&ubi->ltree_lock); + +	up_write(&le->mutex); +	if (_free) +		kfree(le); +} + +/** + * ubi_eba_unmap_leb - un-map logical eraseblock. + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * + * This function un-maps logical eraseblock @lnum and schedules corresponding + * physical eraseblock for erasure. Returns zero in case of success and a + * negative error code in case of failure. + */ +int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, +		      int lnum) +{ +	int err, pnum, vol_id = vol->vol_id; + +	if (ubi->ro_mode) +		return -EROFS; + +	err = leb_write_lock(ubi, vol_id, lnum); +	if (err) +		return err; + +	pnum = vol->eba_tbl[lnum]; +	if (pnum < 0) +		/* This logical eraseblock is already unmapped */ +		goto out_unlock; + +	dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); + +	vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; +	err = ubi_wl_put_peb(ubi, pnum, 0); + +out_unlock: +	leb_write_unlock(ubi, vol_id, lnum); +	return err; +} + +/** + * ubi_eba_read_leb - read data. + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * @buf: buffer to store the read data + * @offset: offset from where to read + * @len: how many bytes to read + * @check: data CRC check flag + * + * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF + * bytes. The @check flag only makes sense for static volumes and forces + * eraseblock data CRC checking. + * + * In case of success this function returns zero. In case of a static volume, + * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be + * returned for any volume type if an ECC error was detected by the MTD device + * driver. Other negative error cored may be returned in case of other errors. + */ +int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, +		     void *buf, int offset, int len, int check) +{ +	int err, pnum, scrub = 0, vol_id = vol->vol_id; +	struct ubi_vid_hdr *vid_hdr; +	uint32_t uninitialized_var(crc); + +	err = leb_read_lock(ubi, vol_id, lnum); +	if (err) +		return err; + +	pnum = vol->eba_tbl[lnum]; +	if (pnum < 0) { +		/* +		 * The logical eraseblock is not mapped, fill the whole buffer +		 * with 0xFF bytes. The exception is static volumes for which +		 * it is an error to read unmapped logical eraseblocks. +		 */ +		dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)", +			len, offset, vol_id, lnum); +		leb_read_unlock(ubi, vol_id, lnum); +		ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); +		memset(buf, 0xFF, len); +		return 0; +	} + +	dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d", +		len, offset, vol_id, lnum, pnum); + +	if (vol->vol_type == UBI_DYNAMIC_VOLUME) +		check = 0; + +retry: +	if (check) { +		vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); +		if (!vid_hdr) { +			err = -ENOMEM; +			goto out_unlock; +		} + +		err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); +		if (err && err != UBI_IO_BITFLIPS) { +			if (err > 0) { +				/* +				 * The header is either absent or corrupted. +				 * The former case means there is a bug - +				 * switch to read-only mode just in case. +				 * The latter case means a real corruption - we +				 * may try to recover data. FIXME: but this is +				 * not implemented. +				 */ +				if (err == UBI_IO_BAD_VID_HDR) { +					ubi_warn("bad VID header at PEB %d, LEB" +						 "%d:%d", pnum, vol_id, lnum); +					err = -EBADMSG; +				} else +					ubi_ro_mode(ubi); +			} +			goto out_free; +		} else if (err == UBI_IO_BITFLIPS) +			scrub = 1; + +		ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); +		ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); + +		crc = be32_to_cpu(vid_hdr->data_crc); +		ubi_free_vid_hdr(ubi, vid_hdr); +	} + +	err = ubi_io_read_data(ubi, buf, pnum, offset, len); +	if (err) { +		if (err == UBI_IO_BITFLIPS) { +			scrub = 1; +			err = 0; +		} else if (mtd_is_eccerr(err)) { +			if (vol->vol_type == UBI_DYNAMIC_VOLUME) +				goto out_unlock; +			scrub = 1; +			if (!check) { +				ubi_msg("force data checking"); +				check = 1; +				goto retry; +			} +		} else +			goto out_unlock; +	} + +	if (check) { +		uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); +		if (crc1 != crc) { +			ubi_warn("CRC error: calculated %#08x, must be %#08x", +				 crc1, crc); +			err = -EBADMSG; +			goto out_unlock; +		} +	} + +	if (scrub) +		err = ubi_wl_scrub_peb(ubi, pnum); + +	leb_read_unlock(ubi, vol_id, lnum); +	return err; + +out_free: +	ubi_free_vid_hdr(ubi, vid_hdr); +out_unlock: +	leb_read_unlock(ubi, vol_id, lnum); +	return err; +} + +/** + * recover_peb - recover from write failure. + * @ubi: UBI device description object + * @pnum: the physical eraseblock to recover + * @vol_id: volume ID + * @lnum: logical eraseblock number + * @buf: data which was not written because of the write failure + * @offset: offset of the failed write + * @len: how many bytes should have been written + * + * This function is called in case of a write failure and moves all good data + * from the potentially bad physical eraseblock to a good physical eraseblock. + * This function also writes the data which was not written due to the failure. + * Returns new physical eraseblock number in case of success, and a negative + * error code in case of failure. + */ +static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, +		       const void *buf, int offset, int len) +{ +	int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; +	struct ubi_volume *vol = ubi->volumes[idx]; +	struct ubi_vid_hdr *vid_hdr; + +	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); +	if (!vid_hdr) { +		return -ENOMEM; +	} + +	mutex_lock(&ubi->buf_mutex); + +retry: +	new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); +	if (new_pnum < 0) { +		mutex_unlock(&ubi->buf_mutex); +		ubi_free_vid_hdr(ubi, vid_hdr); +		return new_pnum; +	} + +	ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); + +	err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); +	if (err && err != UBI_IO_BITFLIPS) { +		if (err > 0) +			err = -EIO; +		goto out_put; +	} + +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); +	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); +	if (err) +		goto write_error; + +	data_size = offset + len; +	memset(ubi->peb_buf1 + offset, 0xFF, len); + +	/* Read everything before the area where the write failure happened */ +	if (offset > 0) { +		err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); +		if (err && err != UBI_IO_BITFLIPS) +			goto out_put; +	} + +	memcpy(ubi->peb_buf1 + offset, buf, len); + +	err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); +	if (err) +		goto write_error; + +	mutex_unlock(&ubi->buf_mutex); +	ubi_free_vid_hdr(ubi, vid_hdr); + +	vol->eba_tbl[lnum] = new_pnum; +	ubi_wl_put_peb(ubi, pnum, 1); + +	ubi_msg("data was successfully recovered"); +	return 0; + +out_put: +	mutex_unlock(&ubi->buf_mutex); +	ubi_wl_put_peb(ubi, new_pnum, 1); +	ubi_free_vid_hdr(ubi, vid_hdr); +	return err; + +write_error: +	/* +	 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to +	 * get another one. +	 */ +	ubi_warn("failed to write to PEB %d", new_pnum); +	ubi_wl_put_peb(ubi, new_pnum, 1); +	if (++tries > UBI_IO_RETRIES) { +		mutex_unlock(&ubi->buf_mutex); +		ubi_free_vid_hdr(ubi, vid_hdr); +		return err; +	} +	ubi_msg("try again"); +	goto retry; +} + +/** + * ubi_eba_write_leb - write data to dynamic volume. + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * @buf: the data to write + * @offset: offset within the logical eraseblock where to write + * @len: how many bytes to write + * @dtype: data type + * + * This function writes data to logical eraseblock @lnum of a dynamic volume + * @vol. Returns zero in case of success and a negative error code in case + * of failure. In case of error, it is possible that something was still + * written to the flash media, but may be some garbage. + */ +int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, +		      const void *buf, int offset, int len, int dtype) +{ +	int err, pnum, tries = 0, vol_id = vol->vol_id; +	struct ubi_vid_hdr *vid_hdr; + +	if (ubi->ro_mode) +		return -EROFS; + +	err = leb_write_lock(ubi, vol_id, lnum); +	if (err) +		return err; + +	pnum = vol->eba_tbl[lnum]; +	if (pnum >= 0) { +		dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d", +			len, offset, vol_id, lnum, pnum); + +		err = ubi_io_write_data(ubi, buf, pnum, offset, len); +		if (err) { +			ubi_warn("failed to write data to PEB %d", pnum); +			if (err == -EIO && ubi->bad_allowed) +				err = recover_peb(ubi, pnum, vol_id, lnum, buf, +						  offset, len); +			if (err) +				ubi_ro_mode(ubi); +		} +		leb_write_unlock(ubi, vol_id, lnum); +		return err; +	} + +	/* +	 * The logical eraseblock is not mapped. We have to get a free physical +	 * eraseblock and write the volume identifier header there first. +	 */ +	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); +	if (!vid_hdr) { +		leb_write_unlock(ubi, vol_id, lnum); +		return -ENOMEM; +	} + +	vid_hdr->vol_type = UBI_VID_DYNAMIC; +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); +	vid_hdr->vol_id = cpu_to_be32(vol_id); +	vid_hdr->lnum = cpu_to_be32(lnum); +	vid_hdr->compat = ubi_get_compat(ubi, vol_id); +	vid_hdr->data_pad = cpu_to_be32(vol->data_pad); + +retry: +	pnum = ubi_wl_get_peb(ubi, dtype); +	if (pnum < 0) { +		ubi_free_vid_hdr(ubi, vid_hdr); +		leb_write_unlock(ubi, vol_id, lnum); +		return pnum; +	} + +	dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d", +		len, offset, vol_id, lnum, pnum); + +	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); +	if (err) { +		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", +			 vol_id, lnum, pnum); +		goto write_error; +	} + +	if (len) { +		err = ubi_io_write_data(ubi, buf, pnum, offset, len); +		if (err) { +			ubi_warn("failed to write %d bytes at offset %d of " +				 "LEB %d:%d, PEB %d", len, offset, vol_id, +				 lnum, pnum); +			goto write_error; +		} +	} + +	vol->eba_tbl[lnum] = pnum; + +	leb_write_unlock(ubi, vol_id, lnum); +	ubi_free_vid_hdr(ubi, vid_hdr); +	return 0; + +write_error: +	if (err != -EIO || !ubi->bad_allowed) { +		ubi_ro_mode(ubi); +		leb_write_unlock(ubi, vol_id, lnum); +		ubi_free_vid_hdr(ubi, vid_hdr); +		return err; +	} + +	/* +	 * Fortunately, this is the first write operation to this physical +	 * eraseblock, so just put it and request a new one. We assume that if +	 * this physical eraseblock went bad, the erase code will handle that. +	 */ +	err = ubi_wl_put_peb(ubi, pnum, 1); +	if (err || ++tries > UBI_IO_RETRIES) { +		ubi_ro_mode(ubi); +		leb_write_unlock(ubi, vol_id, lnum); +		ubi_free_vid_hdr(ubi, vid_hdr); +		return err; +	} + +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); +	ubi_msg("try another PEB"); +	goto retry; +} + +/** + * ubi_eba_write_leb_st - write data to static volume. + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * @buf: data to write + * @len: how many bytes to write + * @dtype: data type + * @used_ebs: how many logical eraseblocks will this volume contain + * + * This function writes data to logical eraseblock @lnum of static volume + * @vol. The @used_ebs argument should contain total number of logical + * eraseblock in this static volume. + * + * When writing to the last logical eraseblock, the @len argument doesn't have + * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent + * to the real data size, although the @buf buffer has to contain the + * alignment. In all other cases, @len has to be aligned. + * + * It is prohibited to write more then once to logical eraseblocks of static + * volumes. This function returns zero in case of success and a negative error + * code in case of failure. + */ +int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, +			 int lnum, const void *buf, int len, int dtype, +			 int used_ebs) +{ +	int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id; +	struct ubi_vid_hdr *vid_hdr; +	uint32_t crc; + +	if (ubi->ro_mode) +		return -EROFS; + +	if (lnum == used_ebs - 1) +		/* If this is the last LEB @len may be unaligned */ +		len = ALIGN(data_size, ubi->min_io_size); +	else +		ubi_assert(!(len & (ubi->min_io_size - 1))); + +	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); +	if (!vid_hdr) +		return -ENOMEM; + +	err = leb_write_lock(ubi, vol_id, lnum); +	if (err) { +		ubi_free_vid_hdr(ubi, vid_hdr); +		return err; +	} + +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); +	vid_hdr->vol_id = cpu_to_be32(vol_id); +	vid_hdr->lnum = cpu_to_be32(lnum); +	vid_hdr->compat = ubi_get_compat(ubi, vol_id); +	vid_hdr->data_pad = cpu_to_be32(vol->data_pad); + +	crc = crc32(UBI_CRC32_INIT, buf, data_size); +	vid_hdr->vol_type = UBI_VID_STATIC; +	vid_hdr->data_size = cpu_to_be32(data_size); +	vid_hdr->used_ebs = cpu_to_be32(used_ebs); +	vid_hdr->data_crc = cpu_to_be32(crc); + +retry: +	pnum = ubi_wl_get_peb(ubi, dtype); +	if (pnum < 0) { +		ubi_free_vid_hdr(ubi, vid_hdr); +		leb_write_unlock(ubi, vol_id, lnum); +		return pnum; +	} + +	dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d", +		len, vol_id, lnum, pnum, used_ebs); + +	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); +	if (err) { +		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", +			 vol_id, lnum, pnum); +		goto write_error; +	} + +	err = ubi_io_write_data(ubi, buf, pnum, 0, len); +	if (err) { +		ubi_warn("failed to write %d bytes of data to PEB %d", +			 len, pnum); +		goto write_error; +	} + +	ubi_assert(vol->eba_tbl[lnum] < 0); +	vol->eba_tbl[lnum] = pnum; + +	leb_write_unlock(ubi, vol_id, lnum); +	ubi_free_vid_hdr(ubi, vid_hdr); +	return 0; + +write_error: +	if (err != -EIO || !ubi->bad_allowed) { +		/* +		 * This flash device does not admit of bad eraseblocks or +		 * something nasty and unexpected happened. Switch to read-only +		 * mode just in case. +		 */ +		ubi_ro_mode(ubi); +		leb_write_unlock(ubi, vol_id, lnum); +		ubi_free_vid_hdr(ubi, vid_hdr); +		return err; +	} + +	err = ubi_wl_put_peb(ubi, pnum, 1); +	if (err || ++tries > UBI_IO_RETRIES) { +		ubi_ro_mode(ubi); +		leb_write_unlock(ubi, vol_id, lnum); +		ubi_free_vid_hdr(ubi, vid_hdr); +		return err; +	} + +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); +	ubi_msg("try another PEB"); +	goto retry; +} + +/* + * ubi_eba_atomic_leb_change - change logical eraseblock atomically. + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * @buf: data to write + * @len: how many bytes to write + * @dtype: data type + * + * This function changes the contents of a logical eraseblock atomically. @buf + * has to contain new logical eraseblock data, and @len - the length of the + * data, which has to be aligned. This function guarantees that in case of an + * unclean reboot the old contents is preserved. Returns zero in case of + * success and a negative error code in case of failure. + * + * UBI reserves one LEB for the "atomic LEB change" operation, so only one + * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. + */ +int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, +			      int lnum, const void *buf, int len, int dtype) +{ +	int err, pnum, tries = 0, vol_id = vol->vol_id; +	struct ubi_vid_hdr *vid_hdr; +	uint32_t crc; + +	if (ubi->ro_mode) +		return -EROFS; + +	if (len == 0) { +		/* +		 * Special case when data length is zero. In this case the LEB +		 * has to be unmapped and mapped somewhere else. +		 */ +		err = ubi_eba_unmap_leb(ubi, vol, lnum); +		if (err) +			return err; +		return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); +	} + +	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); +	if (!vid_hdr) +		return -ENOMEM; + +	mutex_lock(&ubi->alc_mutex); +	err = leb_write_lock(ubi, vol_id, lnum); +	if (err) +		goto out_mutex; + +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); +	vid_hdr->vol_id = cpu_to_be32(vol_id); +	vid_hdr->lnum = cpu_to_be32(lnum); +	vid_hdr->compat = ubi_get_compat(ubi, vol_id); +	vid_hdr->data_pad = cpu_to_be32(vol->data_pad); + +	crc = crc32(UBI_CRC32_INIT, buf, len); +	vid_hdr->vol_type = UBI_VID_DYNAMIC; +	vid_hdr->data_size = cpu_to_be32(len); +	vid_hdr->copy_flag = 1; +	vid_hdr->data_crc = cpu_to_be32(crc); + +retry: +	pnum = ubi_wl_get_peb(ubi, dtype); +	if (pnum < 0) { +		err = pnum; +		goto out_leb_unlock; +	} + +	dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", +		vol_id, lnum, vol->eba_tbl[lnum], pnum); + +	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); +	if (err) { +		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", +			 vol_id, lnum, pnum); +		goto write_error; +	} + +	err = ubi_io_write_data(ubi, buf, pnum, 0, len); +	if (err) { +		ubi_warn("failed to write %d bytes of data to PEB %d", +			 len, pnum); +		goto write_error; +	} + +	if (vol->eba_tbl[lnum] >= 0) { +		err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); +		if (err) +			goto out_leb_unlock; +	} + +	vol->eba_tbl[lnum] = pnum; + +out_leb_unlock: +	leb_write_unlock(ubi, vol_id, lnum); +out_mutex: +	mutex_unlock(&ubi->alc_mutex); +	ubi_free_vid_hdr(ubi, vid_hdr); +	return err; + +write_error: +	if (err != -EIO || !ubi->bad_allowed) { +		/* +		 * This flash device does not admit of bad eraseblocks or +		 * something nasty and unexpected happened. Switch to read-only +		 * mode just in case. +		 */ +		ubi_ro_mode(ubi); +		goto out_leb_unlock; +	} + +	err = ubi_wl_put_peb(ubi, pnum, 1); +	if (err || ++tries > UBI_IO_RETRIES) { +		ubi_ro_mode(ubi); +		goto out_leb_unlock; +	} + +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); +	ubi_msg("try another PEB"); +	goto retry; +} + +/** + * ubi_eba_copy_leb - copy logical eraseblock. + * @ubi: UBI device description object + * @from: physical eraseblock number from where to copy + * @to: physical eraseblock number where to copy + * @vid_hdr: VID header of the @from physical eraseblock + * + * This function copies logical eraseblock from physical eraseblock @from to + * physical eraseblock @to. The @vid_hdr buffer may be changed by this + * function. Returns: + *   o %0  in case of success; + *   o %1 if the operation was canceled and should be tried later (e.g., + *     because a bit-flip was detected at the target PEB); + *   o %2 if the volume is being deleted and this LEB should not be moved. + */ +int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, +		     struct ubi_vid_hdr *vid_hdr) +{ +	int err, vol_id, lnum, data_size, aldata_size, idx; +	struct ubi_volume *vol; +	uint32_t crc; + +	vol_id = be32_to_cpu(vid_hdr->vol_id); +	lnum = be32_to_cpu(vid_hdr->lnum); + +	dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); + +	if (vid_hdr->vol_type == UBI_VID_STATIC) { +		data_size = be32_to_cpu(vid_hdr->data_size); +		aldata_size = ALIGN(data_size, ubi->min_io_size); +	} else +		data_size = aldata_size = +			    ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); + +	idx = vol_id2idx(ubi, vol_id); +	spin_lock(&ubi->volumes_lock); +	/* +	 * Note, we may race with volume deletion, which means that the volume +	 * this logical eraseblock belongs to might be being deleted. Since the +	 * volume deletion unmaps all the volume's logical eraseblocks, it will +	 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. +	 */ +	vol = ubi->volumes[idx]; +	if (!vol) { +		/* No need to do further work, cancel */ +		dbg_eba("volume %d is being removed, cancel", vol_id); +		spin_unlock(&ubi->volumes_lock); +		return 2; +	} +	spin_unlock(&ubi->volumes_lock); + +	/* +	 * We do not want anybody to write to this logical eraseblock while we +	 * are moving it, so lock it. +	 * +	 * Note, we are using non-waiting locking here, because we cannot sleep +	 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is +	 * unmapping the LEB which is mapped to the PEB we are going to move +	 * (@from). This task locks the LEB and goes sleep in the +	 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are +	 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the +	 * LEB is already locked, we just do not move it and return %1. +	 */ +	err = leb_write_trylock(ubi, vol_id, lnum); +	if (err) { +		dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); +		return err; +	} + +	/* +	 * The LEB might have been put meanwhile, and the task which put it is +	 * probably waiting on @ubi->move_mutex. No need to continue the work, +	 * cancel it. +	 */ +	if (vol->eba_tbl[lnum] != from) { +		dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " +			"PEB %d, cancel", vol_id, lnum, from, +			vol->eba_tbl[lnum]); +		err = 1; +		goto out_unlock_leb; +	} + +	/* +	 * OK, now the LEB is locked and we can safely start moving iy. Since +	 * this function utilizes thie @ubi->peb1_buf buffer which is shared +	 * with some other functions, so lock the buffer by taking the +	 * @ubi->buf_mutex. +	 */ +	mutex_lock(&ubi->buf_mutex); +	dbg_eba("read %d bytes of data", aldata_size); +	err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); +	if (err && err != UBI_IO_BITFLIPS) { +		ubi_warn("error %d while reading data from PEB %d", +			 err, from); +		goto out_unlock_buf; +	} + +	/* +	 * Now we have got to calculate how much data we have to to copy. In +	 * case of a static volume it is fairly easy - the VID header contains +	 * the data size. In case of a dynamic volume it is more difficult - we +	 * have to read the contents, cut 0xFF bytes from the end and copy only +	 * the first part. We must do this to avoid writing 0xFF bytes as it +	 * may have some side-effects. And not only this. It is important not +	 * to include those 0xFFs to CRC because later the they may be filled +	 * by data. +	 */ +	if (vid_hdr->vol_type == UBI_VID_DYNAMIC) +		aldata_size = data_size = +			ubi_calc_data_len(ubi, ubi->peb_buf1, data_size); + +	cond_resched(); +	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size); +	cond_resched(); + +	/* +	 * It may turn out to me that the whole @from physical eraseblock +	 * contains only 0xFF bytes. Then we have to only write the VID header +	 * and do not write any data. This also means we should not set +	 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. +	 */ +	if (data_size > 0) { +		vid_hdr->copy_flag = 1; +		vid_hdr->data_size = cpu_to_be32(data_size); +		vid_hdr->data_crc = cpu_to_be32(crc); +	} +	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); + +	err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); +	if (err) +		goto out_unlock_buf; + +	cond_resched(); + +	/* Read the VID header back and check if it was written correctly */ +	err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); +	if (err) { +		if (err != UBI_IO_BITFLIPS) +			ubi_warn("cannot read VID header back from PEB %d", to); +		else +			err = 1; +		goto out_unlock_buf; +	} + +	if (data_size > 0) { +		err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); +		if (err) +			goto out_unlock_buf; + +		cond_resched(); + +		/* +		 * We've written the data and are going to read it back to make +		 * sure it was written correctly. +		 */ + +		err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); +		if (err) { +			if (err != UBI_IO_BITFLIPS) +				ubi_warn("cannot read data back from PEB %d", +					 to); +			else +				err = 1; +			goto out_unlock_buf; +		} + +		cond_resched(); + +		if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { +			ubi_warn("read data back from PEB %d - it is different", +				 to); +			goto out_unlock_buf; +		} +	} + +	ubi_assert(vol->eba_tbl[lnum] == from); +	vol->eba_tbl[lnum] = to; + +out_unlock_buf: +	mutex_unlock(&ubi->buf_mutex); +out_unlock_leb: +	leb_write_unlock(ubi, vol_id, lnum); +	return err; +} + +/** + * ubi_eba_init_scan - initialize the EBA unit using scanning information. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) +{ +	int i, j, err, num_volumes; +	struct ubi_scan_volume *sv; +	struct ubi_volume *vol; +	struct ubi_scan_leb *seb; +	struct rb_node *rb; + +	dbg_eba("initialize EBA unit"); + +	spin_lock_init(&ubi->ltree_lock); +	mutex_init(&ubi->alc_mutex); +	ubi->ltree = RB_ROOT; + +	ubi->global_sqnum = si->max_sqnum + 1; +	num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; + +	for (i = 0; i < num_volumes; i++) { +		vol = ubi->volumes[i]; +		if (!vol) +			continue; + +		cond_resched(); + +		vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), +				       GFP_KERNEL); +		if (!vol->eba_tbl) { +			err = -ENOMEM; +			goto out_free; +		} + +		for (j = 0; j < vol->reserved_pebs; j++) +			vol->eba_tbl[j] = UBI_LEB_UNMAPPED; + +		sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i)); +		if (!sv) +			continue; + +		ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { +			if (seb->lnum >= vol->reserved_pebs) +				/* +				 * This may happen in case of an unclean reboot +				 * during re-size. +				 */ +				ubi_scan_move_to_list(sv, seb, &si->erase); +			vol->eba_tbl[seb->lnum] = seb->pnum; +		} +	} + +	if (ubi->avail_pebs < EBA_RESERVED_PEBS) { +		ubi_err("no enough physical eraseblocks (%d, need %d)", +			ubi->avail_pebs, EBA_RESERVED_PEBS); +		err = -ENOSPC; +		goto out_free; +	} +	ubi->avail_pebs -= EBA_RESERVED_PEBS; +	ubi->rsvd_pebs += EBA_RESERVED_PEBS; + +	if (ubi->bad_allowed) { +		ubi_calculate_reserved(ubi); + +		if (ubi->avail_pebs < ubi->beb_rsvd_level) { +			/* No enough free physical eraseblocks */ +			ubi->beb_rsvd_pebs = ubi->avail_pebs; +			ubi_warn("cannot reserve enough PEBs for bad PEB " +				 "handling, reserved %d, need %d", +				 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); +		} else +			ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; + +		ubi->avail_pebs -= ubi->beb_rsvd_pebs; +		ubi->rsvd_pebs  += ubi->beb_rsvd_pebs; +	} + +	dbg_eba("EBA unit is initialized"); +	return 0; + +out_free: +	for (i = 0; i < num_volumes; i++) { +		if (!ubi->volumes[i]) +			continue; +		kfree(ubi->volumes[i]->eba_tbl); +	} +	return err; +} + +/** + * ubi_eba_close - close EBA unit. + * @ubi: UBI device description object + */ +void ubi_eba_close(const struct ubi_device *ubi) +{ +	int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; + +	dbg_eba("close EBA unit"); + +	for (i = 0; i < num_volumes; i++) { +		if (!ubi->volumes[i]) +			continue; +		kfree(ubi->volumes[i]->eba_tbl); +	} +} diff --git a/roms/u-boot/drivers/mtd/ubi/io.c b/roms/u-boot/drivers/mtd/ubi/io.c new file mode 100644 index 00000000..960befc6 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/io.c @@ -0,0 +1,1262 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006, 2007 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * UBI input/output unit. + * + * This unit provides a uniform way to work with all kinds of the underlying + * MTD devices. It also implements handy functions for reading and writing UBI + * headers. + * + * We are trying to have a paranoid mindset and not to trust to what we read + * from the flash media in order to be more secure and robust. So this unit + * validates every single header it reads from the flash media. + * + * Some words about how the eraseblock headers are stored. + * + * The erase counter header is always stored at offset zero. By default, the + * VID header is stored after the EC header at the closest aligned offset + * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID + * header at the closest aligned offset. But this default layout may be + * changed. For example, for different reasons (e.g., optimization) UBI may be + * asked to put the VID header at further offset, and even at an unaligned + * offset. Of course, if the offset of the VID header is unaligned, UBI adds + * proper padding in front of it. Data offset may also be changed but it has to + * be aligned. + * + * About minimal I/O units. In general, UBI assumes flash device model where + * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1, + * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the + * @ubi->mtd->writesize field. But as an exception, UBI admits of using another + * (smaller) minimal I/O unit size for EC and VID headers to make it possible + * to do different optimizations. + * + * This is extremely useful in case of NAND flashes which admit of several + * write operations to one NAND page. In this case UBI can fit EC and VID + * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal + * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still + * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI + * users. + * + * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so + * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID + * headers. + * + * Q: why not just to treat sub-page as a minimal I/O unit of this flash + * device, e.g., make @ubi->min_io_size = 512 in the example above? + * + * A: because when writing a sub-page, MTD still writes a full 2K page but the + * bytes which are no relevant to the sub-page are 0xFF. So, basically, writing + * 4x512 sub-pages is 4 times slower then writing one 2KiB NAND page. Thus, we + * prefer to use sub-pages only for EV and VID headers. + * + * As it was noted above, the VID header may start at a non-aligned offset. + * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page, + * the VID header may reside at offset 1984 which is the last 64 bytes of the + * last sub-page (EC header is always at offset zero). This causes some + * difficulties when reading and writing VID headers. + * + * Suppose we have a 64-byte buffer and we read a VID header at it. We change + * the data and want to write this VID header out. As we can only write in + * 512-byte chunks, we have to allocate one more buffer and copy our VID header + * to offset 448 of this buffer. + * + * The I/O unit does the following trick in order to avoid this extra copy. + * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header + * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the + * VID header is being written out, it shifts the VID header pointer back and + * writes the whole sub-page. + */ + +#ifdef UBI_LINUX +#include <linux/crc32.h> +#include <linux/err.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum); +static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); +static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, +				 const struct ubi_ec_hdr *ec_hdr); +static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum); +static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, +				  const struct ubi_vid_hdr *vid_hdr); +static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, +				 int len); +#else +#define paranoid_check_not_bad(ubi, pnum) 0 +#define paranoid_check_peb_ec_hdr(ubi, pnum)  0 +#define paranoid_check_ec_hdr(ubi, pnum, ec_hdr)  0 +#define paranoid_check_peb_vid_hdr(ubi, pnum) 0 +#define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 +#define paranoid_check_all_ff(ubi, pnum, offset, len) 0 +#endif + +/** + * ubi_io_read - read data from a physical eraseblock. + * @ubi: UBI device description object + * @buf: buffer where to store the read data + * @pnum: physical eraseblock number to read from + * @offset: offset within the physical eraseblock from where to read + * @len: how many bytes to read + * + * This function reads data from offset @offset of physical eraseblock @pnum + * and stores the read data in the @buf buffer. The following return codes are + * possible: + * + * o %0 if all the requested data were successfully read; + * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but + *   correctable bit-flips were detected; this is harmless but may indicate + *   that this eraseblock may become bad soon (but do not have to); + * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for + *   example it can be an ECC error in case of NAND; this most probably means + *   that the data is corrupted; + * o %-EIO if some I/O error occurred; + * o other negative error codes in case of other errors. + */ +int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, +		int len) +{ +	int err, retries = 0; +	size_t read; +	loff_t addr; + +	dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset); + +	ubi_assert(pnum >= 0 && pnum < ubi->peb_count); +	ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); +	ubi_assert(len > 0); + +	err = paranoid_check_not_bad(ubi, pnum); +	if (err) +		return err > 0 ? -EINVAL : err; + +	addr = (loff_t)pnum * ubi->peb_size + offset; +retry: +	err = mtd_read(ubi->mtd, addr, len, &read, buf); +	if (err) { +		if (err == -EUCLEAN) { +			/* +			 * -EUCLEAN is reported if there was a bit-flip which +			 * was corrected, so this is harmless. +			 */ +			ubi_msg("fixable bit-flip detected at PEB %d", pnum); +			ubi_assert(len == read); +			return UBI_IO_BITFLIPS; +		} + +		if (read != len && retries++ < UBI_IO_RETRIES) { +			dbg_io("error %d while reading %d bytes from PEB %d:%d, " +			       "read only %zd bytes, retry", +			       err, len, pnum, offset, read); +			yield(); +			goto retry; +		} + +		ubi_err("error %d while reading %d bytes from PEB %d:%d, " +			"read %zd bytes", err, len, pnum, offset, read); +		ubi_dbg_dump_stack(); + +		/* +		 * The driver should never return -EBADMSG if it failed to read +		 * all the requested data. But some buggy drivers might do +		 * this, so we change it to -EIO. +		 */ +		if (read != len && err == -EBADMSG) { +			ubi_assert(0); +			printk("%s[%d] not here\n", __func__, __LINE__); +/*			err = -EIO; */ +		} +	} else { +		ubi_assert(len == read); + +		if (ubi_dbg_is_bitflip()) { +			dbg_msg("bit-flip (emulated)"); +			err = UBI_IO_BITFLIPS; +		} +	} + +	return err; +} + +/** + * ubi_io_write - write data to a physical eraseblock. + * @ubi: UBI device description object + * @buf: buffer with the data to write + * @pnum: physical eraseblock number to write to + * @offset: offset within the physical eraseblock where to write + * @len: how many bytes to write + * + * This function writes @len bytes of data from buffer @buf to offset @offset + * of physical eraseblock @pnum. If all the data were successfully written, + * zero is returned. If an error occurred, this function returns a negative + * error code. If %-EIO is returned, the physical eraseblock most probably went + * bad. + * + * Note, in case of an error, it is possible that something was still written + * to the flash media, but may be some garbage. + */ +int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, +		 int len) +{ +	int err; +	size_t written; +	loff_t addr; + +	dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset); + +	ubi_assert(pnum >= 0 && pnum < ubi->peb_count); +	ubi_assert(offset >= 0 && offset + len <= ubi->peb_size); +	ubi_assert(offset % ubi->hdrs_min_io_size == 0); +	ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); + +	if (ubi->ro_mode) { +		ubi_err("read-only mode"); +		return -EROFS; +	} + +	/* The below has to be compiled out if paranoid checks are disabled */ + +	err = paranoid_check_not_bad(ubi, pnum); +	if (err) +		return err > 0 ? -EINVAL : err; + +	/* The area we are writing to has to contain all 0xFF bytes */ +	err = paranoid_check_all_ff(ubi, pnum, offset, len); +	if (err) +		return err > 0 ? -EINVAL : err; + +	if (offset >= ubi->leb_start) { +		/* +		 * We write to the data area of the physical eraseblock. Make +		 * sure it has valid EC and VID headers. +		 */ +		err = paranoid_check_peb_ec_hdr(ubi, pnum); +		if (err) +			return err > 0 ? -EINVAL : err; +		err = paranoid_check_peb_vid_hdr(ubi, pnum); +		if (err) +			return err > 0 ? -EINVAL : err; +	} + +	if (ubi_dbg_is_write_failure()) { +		dbg_err("cannot write %d bytes to PEB %d:%d " +			"(emulated)", len, pnum, offset); +		ubi_dbg_dump_stack(); +		return -EIO; +	} + +	addr = (loff_t)pnum * ubi->peb_size + offset; +	err = mtd_write(ubi->mtd, addr, len, &written, buf); +	if (err) { +		ubi_err("error %d while writing %d bytes to PEB %d:%d, written" +			" %zd bytes", err, len, pnum, offset, written); +		ubi_dbg_dump_stack(); +	} else +		ubi_assert(written == len); + +	return err; +} + +/** + * erase_callback - MTD erasure call-back. + * @ei: MTD erase information object. + * + * Note, even though MTD erase interface is asynchronous, all the current + * implementations are synchronous anyway. + */ +static void erase_callback(struct erase_info *ei) +{ +	wake_up_interruptible((wait_queue_head_t *)ei->priv); +} + +/** + * do_sync_erase - synchronously erase a physical eraseblock. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to erase + * + * This function synchronously erases physical eraseblock @pnum and returns + * zero in case of success and a negative error code in case of failure. If + * %-EIO is returned, the physical eraseblock most probably went bad. + */ +static int do_sync_erase(struct ubi_device *ubi, int pnum) +{ +	int err, retries = 0; +	struct erase_info ei; +	wait_queue_head_t wq; + +	dbg_io("erase PEB %d", pnum); + +retry: +	init_waitqueue_head(&wq); +	memset(&ei, 0, sizeof(struct erase_info)); + +	ei.mtd      = ubi->mtd; +	ei.addr     = (loff_t)pnum * ubi->peb_size; +	ei.len      = ubi->peb_size; +	ei.callback = erase_callback; +	ei.priv     = (unsigned long)&wq; + +	err = mtd_erase(ubi->mtd, &ei); +	if (err) { +		if (retries++ < UBI_IO_RETRIES) { +			dbg_io("error %d while erasing PEB %d, retry", +			       err, pnum); +			yield(); +			goto retry; +		} +		ubi_err("cannot erase PEB %d, error %d", pnum, err); +		ubi_dbg_dump_stack(); +		return err; +	} + +	err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || +					   ei.state == MTD_ERASE_FAILED); +	if (err) { +		ubi_err("interrupted PEB %d erasure", pnum); +		return -EINTR; +	} + +	if (ei.state == MTD_ERASE_FAILED) { +		if (retries++ < UBI_IO_RETRIES) { +			dbg_io("error while erasing PEB %d, retry", pnum); +			yield(); +			goto retry; +		} +		ubi_err("cannot erase PEB %d", pnum); +		ubi_dbg_dump_stack(); +		return -EIO; +	} + +	err = paranoid_check_all_ff(ubi, pnum, 0, ubi->peb_size); +	if (err) +		return err > 0 ? -EINVAL : err; + +	if (ubi_dbg_is_erase_failure() && !err) { +		dbg_err("cannot erase PEB %d (emulated)", pnum); +		return -EIO; +	} + +	return 0; +} + +/** + * check_pattern - check if buffer contains only a certain byte pattern. + * @buf: buffer to check + * @patt: the pattern to check + * @size: buffer size in bytes + * + * This function returns %1 in there are only @patt bytes in @buf, and %0 if + * something else was also found. + */ +static int check_pattern(const void *buf, uint8_t patt, int size) +{ +	int i; + +	for (i = 0; i < size; i++) +		if (((const uint8_t *)buf)[i] != patt) +			return 0; +	return 1; +} + +/* Patterns to write to a physical eraseblock when torturing it */ +static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; + +/** + * torture_peb - test a supposedly bad physical eraseblock. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to test + * + * This function returns %-EIO if the physical eraseblock did not pass the + * test, a positive number of erase operations done if the test was + * successfully passed, and other negative error codes in case of other errors. + */ +static int torture_peb(struct ubi_device *ubi, int pnum) +{ +	int err, i, patt_count; + +	patt_count = ARRAY_SIZE(patterns); +	ubi_assert(patt_count > 0); + +	mutex_lock(&ubi->buf_mutex); +	for (i = 0; i < patt_count; i++) { +		err = do_sync_erase(ubi, pnum); +		if (err) +			goto out; + +		/* Make sure the PEB contains only 0xFF bytes */ +		err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); +		if (err) +			goto out; + +		err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size); +		if (err == 0) { +			ubi_err("erased PEB %d, but a non-0xFF byte found", +				pnum); +			err = -EIO; +			goto out; +		} + +		/* Write a pattern and check it */ +		memset(ubi->peb_buf1, patterns[i], ubi->peb_size); +		err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); +		if (err) +			goto out; + +		memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size); +		err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size); +		if (err) +			goto out; + +		err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size); +		if (err == 0) { +			ubi_err("pattern %x checking failed for PEB %d", +				patterns[i], pnum); +			err = -EIO; +			goto out; +		} +	} + +	err = patt_count; + +out: +	mutex_unlock(&ubi->buf_mutex); +	if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { +		/* +		 * If a bit-flip or data integrity error was detected, the test +		 * has not passed because it happened on a freshly erased +		 * physical eraseblock which means something is wrong with it. +		 */ +		ubi_err("read problems on freshly erased PEB %d, must be bad", +			pnum); +		err = -EIO; +	} +	return err; +} + +/** + * ubi_io_sync_erase - synchronously erase a physical eraseblock. + * @ubi: UBI device description object + * @pnum: physical eraseblock number to erase + * @torture: if this physical eraseblock has to be tortured + * + * This function synchronously erases physical eraseblock @pnum. If @torture + * flag is not zero, the physical eraseblock is checked by means of writing + * different patterns to it and reading them back. If the torturing is enabled, + * the physical eraseblock is erased more then once. + * + * This function returns the number of erasures made in case of success, %-EIO + * if the erasure failed or the torturing test failed, and other negative error + * codes in case of other errors. Note, %-EIO means that the physical + * eraseblock is bad. + */ +int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture) +{ +	int err, ret = 0; + +	ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + +	err = paranoid_check_not_bad(ubi, pnum); +	if (err != 0) +		return err > 0 ? -EINVAL : err; + +	if (ubi->ro_mode) { +		ubi_err("read-only mode"); +		return -EROFS; +	} + +	if (torture) { +		ret = torture_peb(ubi, pnum); +		if (ret < 0) +			return ret; +	} + +	err = do_sync_erase(ubi, pnum); +	if (err) +		return err; + +	return ret + 1; +} + +/** + * ubi_io_is_bad - check if a physical eraseblock is bad. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * + * This function returns a positive number if the physical eraseblock is bad, + * zero if not, and a negative error code if an error occurred. + */ +int ubi_io_is_bad(const struct ubi_device *ubi, int pnum) +{ +	struct mtd_info *mtd = ubi->mtd; + +	ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + +	if (ubi->bad_allowed) { +		int ret; + +		ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); +		if (ret < 0) +			ubi_err("error %d while checking if PEB %d is bad", +				ret, pnum); +		else if (ret) +			dbg_io("PEB %d is bad", pnum); +		return ret; +	} + +	return 0; +} + +/** + * ubi_io_mark_bad - mark a physical eraseblock as bad. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to mark + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum) +{ +	int err; +	struct mtd_info *mtd = ubi->mtd; + +	ubi_assert(pnum >= 0 && pnum < ubi->peb_count); + +	if (ubi->ro_mode) { +		ubi_err("read-only mode"); +		return -EROFS; +	} + +	if (!ubi->bad_allowed) +		return 0; + +	err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); +	if (err) +		ubi_err("cannot mark PEB %d bad, error %d", pnum, err); +	return err; +} + +/** + * validate_ec_hdr - validate an erase counter header. + * @ubi: UBI device description object + * @ec_hdr: the erase counter header to check + * + * This function returns zero if the erase counter header is OK, and %1 if + * not. + */ +static int validate_ec_hdr(const struct ubi_device *ubi, +			   const struct ubi_ec_hdr *ec_hdr) +{ +	long long ec; +	int vid_hdr_offset, leb_start; + +	ec = be64_to_cpu(ec_hdr->ec); +	vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset); +	leb_start = be32_to_cpu(ec_hdr->data_offset); + +	if (ec_hdr->version != UBI_VERSION) { +		ubi_err("node with incompatible UBI version found: " +			"this UBI version is %d, image version is %d", +			UBI_VERSION, (int)ec_hdr->version); +		goto bad; +	} + +	if (vid_hdr_offset != ubi->vid_hdr_offset) { +		ubi_err("bad VID header offset %d, expected %d", +			vid_hdr_offset, ubi->vid_hdr_offset); +		goto bad; +	} + +	if (leb_start != ubi->leb_start) { +		ubi_err("bad data offset %d, expected %d", +			leb_start, ubi->leb_start); +		goto bad; +	} + +	if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { +		ubi_err("bad erase counter %lld", ec); +		goto bad; +	} + +	return 0; + +bad: +	ubi_err("bad EC header"); +	ubi_dbg_dump_ec_hdr(ec_hdr); +	ubi_dbg_dump_stack(); +	return 1; +} + +/** + * ubi_io_read_ec_hdr - read and check an erase counter header. + * @ubi: UBI device description object + * @pnum: physical eraseblock to read from + * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter + * header + * @verbose: be verbose if the header is corrupted or was not found + * + * This function reads erase counter header from physical eraseblock @pnum and + * stores it in @ec_hdr. This function also checks CRC checksum of the read + * erase counter header. The following codes may be returned: + * + * o %0 if the CRC checksum is correct and the header was successfully read; + * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected + *   and corrected by the flash driver; this is harmless but may indicate that + *   this eraseblock may become bad soon (but may be not); + * o %UBI_IO_BAD_EC_HDR if the erase counter header is corrupted (a CRC error); + * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; + * o a negative error code in case of failure. + */ +int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, +		       struct ubi_ec_hdr *ec_hdr, int verbose) +{ +	int err, read_err = 0; +	uint32_t crc, magic, hdr_crc; + +	dbg_io("read EC header from PEB %d", pnum); +	ubi_assert(pnum >= 0 && pnum < ubi->peb_count); +	if (UBI_IO_DEBUG) +		verbose = 1; + +	err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); +	if (err) { +		if (err != UBI_IO_BITFLIPS && err != -EBADMSG) +			return err; + +		/* +		 * We read all the data, but either a correctable bit-flip +		 * occurred, or MTD reported about some data integrity error, +		 * like an ECC error in case of NAND. The former is harmless, +		 * the later may mean that the read data is corrupted. But we +		 * have a CRC check-sum and we will detect this. If the EC +		 * header is still OK, we just report this as there was a +		 * bit-flip. +		 */ +		read_err = err; +	} + +	magic = be32_to_cpu(ec_hdr->magic); +	if (magic != UBI_EC_HDR_MAGIC) { +		/* +		 * The magic field is wrong. Let's check if we have read all +		 * 0xFF. If yes, this physical eraseblock is assumed to be +		 * empty. +		 * +		 * But if there was a read error, we do not test it for all +		 * 0xFFs. Even if it does contain all 0xFFs, this error +		 * indicates that something is still wrong with this physical +		 * eraseblock and we anyway cannot treat it as empty. +		 */ +		if (read_err != -EBADMSG && +		    check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { +			/* The physical eraseblock is supposedly empty */ + +			/* +			 * The below is just a paranoid check, it has to be +			 * compiled out if paranoid checks are disabled. +			 */ +			err = paranoid_check_all_ff(ubi, pnum, 0, +						    ubi->peb_size); +			if (err) +				return err > 0 ? UBI_IO_BAD_EC_HDR : err; + +			if (verbose) +				ubi_warn("no EC header found at PEB %d, " +					 "only 0xFF bytes", pnum); +			return UBI_IO_PEB_EMPTY; +		} + +		/* +		 * This is not a valid erase counter header, and these are not +		 * 0xFF bytes. Report that the header is corrupted. +		 */ +		if (verbose) { +			ubi_warn("bad magic number at PEB %d: %08x instead of " +				 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); +			ubi_dbg_dump_ec_hdr(ec_hdr); +		} +		return UBI_IO_BAD_EC_HDR; +	} + +	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); +	hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); + +	if (hdr_crc != crc) { +		if (verbose) { +			ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," +				 " read %#08x", pnum, crc, hdr_crc); +			ubi_dbg_dump_ec_hdr(ec_hdr); +		} +		return UBI_IO_BAD_EC_HDR; +	} + +	/* And of course validate what has just been read from the media */ +	err = validate_ec_hdr(ubi, ec_hdr); +	if (err) { +		ubi_err("validation failed for PEB %d", pnum); +		return -EINVAL; +	} + +	return read_err ? UBI_IO_BITFLIPS : 0; +} + +/** + * ubi_io_write_ec_hdr - write an erase counter header. + * @ubi: UBI device description object + * @pnum: physical eraseblock to write to + * @ec_hdr: the erase counter header to write + * + * This function writes erase counter header described by @ec_hdr to physical + * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so + * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec + * field. + * + * This function returns zero in case of success and a negative error code in + * case of failure. If %-EIO is returned, the physical eraseblock most probably + * went bad. + */ +int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, +			struct ubi_ec_hdr *ec_hdr) +{ +	int err; +	uint32_t crc; + +	dbg_io("write EC header to PEB %d", pnum); +	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count); + +	ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC); +	ec_hdr->version = UBI_VERSION; +	ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset); +	ec_hdr->data_offset = cpu_to_be32(ubi->leb_start); +	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); +	ec_hdr->hdr_crc = cpu_to_be32(crc); + +	err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); +	if (err) +		return -EINVAL; + +	err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize); +	return err; +} + +/** + * validate_vid_hdr - validate a volume identifier header. + * @ubi: UBI device description object + * @vid_hdr: the volume identifier header to check + * + * This function checks that data stored in the volume identifier header + * @vid_hdr. Returns zero if the VID header is OK and %1 if not. + */ +static int validate_vid_hdr(const struct ubi_device *ubi, +			    const struct ubi_vid_hdr *vid_hdr) +{ +	int vol_type = vid_hdr->vol_type; +	int copy_flag = vid_hdr->copy_flag; +	int vol_id = be32_to_cpu(vid_hdr->vol_id); +	int lnum = be32_to_cpu(vid_hdr->lnum); +	int compat = vid_hdr->compat; +	int data_size = be32_to_cpu(vid_hdr->data_size); +	int used_ebs = be32_to_cpu(vid_hdr->used_ebs); +	int data_pad = be32_to_cpu(vid_hdr->data_pad); +	int data_crc = be32_to_cpu(vid_hdr->data_crc); +	int usable_leb_size = ubi->leb_size - data_pad; + +	if (copy_flag != 0 && copy_flag != 1) { +		dbg_err("bad copy_flag"); +		goto bad; +	} + +	if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || +	    data_pad < 0) { +		dbg_err("negative values"); +		goto bad; +	} + +	if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { +		dbg_err("bad vol_id"); +		goto bad; +	} + +	if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { +		dbg_err("bad compat"); +		goto bad; +	} + +	if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && +	    compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && +	    compat != UBI_COMPAT_REJECT) { +		dbg_err("bad compat"); +		goto bad; +	} + +	if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { +		dbg_err("bad vol_type"); +		goto bad; +	} + +	if (data_pad >= ubi->leb_size / 2) { +		dbg_err("bad data_pad"); +		goto bad; +	} + +	if (vol_type == UBI_VID_STATIC) { +		/* +		 * Although from high-level point of view static volumes may +		 * contain zero bytes of data, but no VID headers can contain +		 * zero at these fields, because they empty volumes do not have +		 * mapped logical eraseblocks. +		 */ +		if (used_ebs == 0) { +			dbg_err("zero used_ebs"); +			goto bad; +		} +		if (data_size == 0) { +			dbg_err("zero data_size"); +			goto bad; +		} +		if (lnum < used_ebs - 1) { +			if (data_size != usable_leb_size) { +				dbg_err("bad data_size"); +				goto bad; +			} +		} else if (lnum == used_ebs - 1) { +			if (data_size == 0) { +				dbg_err("bad data_size at last LEB"); +				goto bad; +			} +		} else { +			dbg_err("too high lnum"); +			goto bad; +		} +	} else { +		if (copy_flag == 0) { +			if (data_crc != 0) { +				dbg_err("non-zero data CRC"); +				goto bad; +			} +			if (data_size != 0) { +				dbg_err("non-zero data_size"); +				goto bad; +			} +		} else { +			if (data_size == 0) { +				dbg_err("zero data_size of copy"); +				goto bad; +			} +		} +		if (used_ebs != 0) { +			dbg_err("bad used_ebs"); +			goto bad; +		} +	} + +	return 0; + +bad: +	ubi_err("bad VID header"); +	ubi_dbg_dump_vid_hdr(vid_hdr); +	ubi_dbg_dump_stack(); +	return 1; +} + +/** + * ubi_io_read_vid_hdr - read and check a volume identifier header. + * @ubi: UBI device description object + * @pnum: physical eraseblock number to read from + * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume + * identifier header + * @verbose: be verbose if the header is corrupted or wasn't found + * + * This function reads the volume identifier header from physical eraseblock + * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read + * volume identifier header. The following codes may be returned: + * + * o %0 if the CRC checksum is correct and the header was successfully read; + * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected + *   and corrected by the flash driver; this is harmless but may indicate that + *   this eraseblock may become bad soon; + * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC + *   error detected); + * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID + *   header there); + * o a negative error code in case of failure. + */ +int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, +			struct ubi_vid_hdr *vid_hdr, int verbose) +{ +	int err, read_err = 0; +	uint32_t crc, magic, hdr_crc; +	void *p; + +	dbg_io("read VID header from PEB %d", pnum); +	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count); +	if (UBI_IO_DEBUG) +		verbose = 1; + +	p = (char *)vid_hdr - ubi->vid_hdr_shift; +	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, +			  ubi->vid_hdr_alsize); +	if (err) { +		if (err != UBI_IO_BITFLIPS && err != -EBADMSG) +			return err; + +		/* +		 * We read all the data, but either a correctable bit-flip +		 * occurred, or MTD reported about some data integrity error, +		 * like an ECC error in case of NAND. The former is harmless, +		 * the later may mean the read data is corrupted. But we have a +		 * CRC check-sum and we will identify this. If the VID header is +		 * still OK, we just report this as there was a bit-flip. +		 */ +		read_err = err; +	} + +	magic = be32_to_cpu(vid_hdr->magic); +	if (magic != UBI_VID_HDR_MAGIC) { +		/* +		 * If we have read all 0xFF bytes, the VID header probably does +		 * not exist and the physical eraseblock is assumed to be free. +		 * +		 * But if there was a read error, we do not test the data for +		 * 0xFFs. Even if it does contain all 0xFFs, this error +		 * indicates that something is still wrong with this physical +		 * eraseblock and it cannot be regarded as free. +		 */ +		if (read_err != -EBADMSG && +		    check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { +			/* The physical eraseblock is supposedly free */ + +			/* +			 * The below is just a paranoid check, it has to be +			 * compiled out if paranoid checks are disabled. +			 */ +			err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start, +						    ubi->leb_size); +			if (err) +				return err > 0 ? UBI_IO_BAD_VID_HDR : err; + +			if (verbose) +				ubi_warn("no VID header found at PEB %d, " +					 "only 0xFF bytes", pnum); +			return UBI_IO_PEB_FREE; +		} + +		/* +		 * This is not a valid VID header, and these are not 0xFF +		 * bytes. Report that the header is corrupted. +		 */ +		if (verbose) { +			ubi_warn("bad magic number at PEB %d: %08x instead of " +				 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); +			ubi_dbg_dump_vid_hdr(vid_hdr); +		} +		return UBI_IO_BAD_VID_HDR; +	} + +	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); +	hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); + +	if (hdr_crc != crc) { +		if (verbose) { +			ubi_warn("bad CRC at PEB %d, calculated %#08x, " +				 "read %#08x", pnum, crc, hdr_crc); +			ubi_dbg_dump_vid_hdr(vid_hdr); +		} +		return UBI_IO_BAD_VID_HDR; +	} + +	/* Validate the VID header that we have just read */ +	err = validate_vid_hdr(ubi, vid_hdr); +	if (err) { +		ubi_err("validation failed for PEB %d", pnum); +		return -EINVAL; +	} + +	return read_err ? UBI_IO_BITFLIPS : 0; +} + +/** + * ubi_io_write_vid_hdr - write a volume identifier header. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to write to + * @vid_hdr: the volume identifier header to write + * + * This function writes the volume identifier header described by @vid_hdr to + * physical eraseblock @pnum. This function automatically fills the + * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates + * header CRC checksum and stores it at vid_hdr->hdr_crc. + * + * This function returns zero in case of success and a negative error code in + * case of failure. If %-EIO is returned, the physical eraseblock probably went + * bad. + */ +int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, +			 struct ubi_vid_hdr *vid_hdr) +{ +	int err; +	uint32_t crc; +	void *p; + +	dbg_io("write VID header to PEB %d", pnum); +	ubi_assert(pnum >= 0 &&  pnum < ubi->peb_count); + +	err = paranoid_check_peb_ec_hdr(ubi, pnum); +	if (err) +		return err > 0 ? -EINVAL: err; + +	vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); +	vid_hdr->version = UBI_VERSION; +	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC); +	vid_hdr->hdr_crc = cpu_to_be32(crc); + +	err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); +	if (err) +		return -EINVAL; + +	p = (char *)vid_hdr - ubi->vid_hdr_shift; +	err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset, +			   ubi->vid_hdr_alsize); +	return err; +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_not_bad - ensure that a physical eraseblock is not bad. + * @ubi: UBI device description object + * @pnum: physical eraseblock number to check + * + * This function returns zero if the physical eraseblock is good, a positive + * number if it is bad and a negative error code if an error occurred. + */ +static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum) +{ +	int err; + +	err = ubi_io_is_bad(ubi, pnum); +	if (!err) +		return err; + +	ubi_err("paranoid check failed for PEB %d", pnum); +	ubi_dbg_dump_stack(); +	return err; +} + +/** + * paranoid_check_ec_hdr - check if an erase counter header is all right. + * @ubi: UBI device description object + * @pnum: physical eraseblock number the erase counter header belongs to + * @ec_hdr: the erase counter header to check + * + * This function returns zero if the erase counter header contains valid + * values, and %1 if not. + */ +static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, +				 const struct ubi_ec_hdr *ec_hdr) +{ +	int err; +	uint32_t magic; + +	magic = be32_to_cpu(ec_hdr->magic); +	if (magic != UBI_EC_HDR_MAGIC) { +		ubi_err("bad magic %#08x, must be %#08x", +			magic, UBI_EC_HDR_MAGIC); +		goto fail; +	} + +	err = validate_ec_hdr(ubi, ec_hdr); +	if (err) { +		ubi_err("paranoid check failed for PEB %d", pnum); +		goto fail; +	} + +	return 0; + +fail: +	ubi_dbg_dump_ec_hdr(ec_hdr); +	ubi_dbg_dump_stack(); +	return 1; +} + +/** + * paranoid_check_peb_ec_hdr - check that the erase counter header of a + * physical eraseblock is in-place and is all right. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * + * This function returns zero if the erase counter header is all right, %1 if + * not, and a negative error code if an error occurred. + */ +static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) +{ +	int err; +	uint32_t crc, hdr_crc; +	struct ubi_ec_hdr *ec_hdr; + +	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); +	if (!ec_hdr) +		return -ENOMEM; + +	err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); +	if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) +		goto exit; + +	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); +	hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); +	if (hdr_crc != crc) { +		ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); +		ubi_err("paranoid check failed for PEB %d", pnum); +		ubi_dbg_dump_ec_hdr(ec_hdr); +		ubi_dbg_dump_stack(); +		err = 1; +		goto exit; +	} + +	err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr); + +exit: +	kfree(ec_hdr); +	return err; +} + +/** + * paranoid_check_vid_hdr - check that a volume identifier header is all right. + * @ubi: UBI device description object + * @pnum: physical eraseblock number the volume identifier header belongs to + * @vid_hdr: the volume identifier header to check + * + * This function returns zero if the volume identifier header is all right, and + * %1 if not. + */ +static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, +				  const struct ubi_vid_hdr *vid_hdr) +{ +	int err; +	uint32_t magic; + +	magic = be32_to_cpu(vid_hdr->magic); +	if (magic != UBI_VID_HDR_MAGIC) { +		ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", +			magic, pnum, UBI_VID_HDR_MAGIC); +		goto fail; +	} + +	err = validate_vid_hdr(ubi, vid_hdr); +	if (err) { +		ubi_err("paranoid check failed for PEB %d", pnum); +		goto fail; +	} + +	return err; + +fail: +	ubi_err("paranoid check failed for PEB %d", pnum); +	ubi_dbg_dump_vid_hdr(vid_hdr); +	ubi_dbg_dump_stack(); +	return 1; + +} + +/** + * paranoid_check_peb_vid_hdr - check that the volume identifier header of a + * physical eraseblock is in-place and is all right. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * + * This function returns zero if the volume identifier header is all right, + * %1 if not, and a negative error code if an error occurred. + */ +static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) +{ +	int err; +	uint32_t crc, hdr_crc; +	struct ubi_vid_hdr *vid_hdr; +	void *p; + +	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); +	if (!vid_hdr) +		return -ENOMEM; + +	p = (char *)vid_hdr - ubi->vid_hdr_shift; +	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, +			  ubi->vid_hdr_alsize); +	if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG) +		goto exit; + +	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); +	hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); +	if (hdr_crc != crc) { +		ubi_err("bad VID header CRC at PEB %d, calculated %#08x, " +			"read %#08x", pnum, crc, hdr_crc); +		ubi_err("paranoid check failed for PEB %d", pnum); +		ubi_dbg_dump_vid_hdr(vid_hdr); +		ubi_dbg_dump_stack(); +		err = 1; +		goto exit; +	} + +	err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr); + +exit: +	ubi_free_vid_hdr(ubi, vid_hdr); +	return err; +} + +/** + * paranoid_check_all_ff - check that a region of flash is empty. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * @offset: the starting offset within the physical eraseblock to check + * @len: the length of the region to check + * + * This function returns zero if only 0xFF bytes are present at offset + * @offset of the physical eraseblock @pnum, %1 if not, and a negative error + * code if an error occurred. + */ +static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, +				 int len) +{ +	size_t read; +	int err; +	loff_t addr = (loff_t)pnum * ubi->peb_size + offset; + +	mutex_lock(&ubi->dbg_buf_mutex); +	err = mtd_read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf); +	if (err && err != -EUCLEAN) { +		ubi_err("error %d while reading %d bytes from PEB %d:%d, " +			"read %zd bytes", err, len, pnum, offset, read); +		goto error; +	} + +	err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); +	if (err == 0) { +		ubi_err("flash region at PEB %d:%d, length %d does not " +			"contain all 0xFF bytes", pnum, offset, len); +		goto fail; +	} +	mutex_unlock(&ubi->dbg_buf_mutex); + +	return 0; + +fail: +	ubi_err("paranoid check failed for PEB %d", pnum); +	dbg_msg("hex dump of the %d-%d region", offset, offset + len); +	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, +		       ubi->dbg_peb_buf, len, 1); +	err = 1; +error: +	ubi_dbg_dump_stack(); +	mutex_unlock(&ubi->dbg_buf_mutex); +	return err; +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/roms/u-boot/drivers/mtd/ubi/kapi.c b/roms/u-boot/drivers/mtd/ubi/kapi.c new file mode 100644 index 00000000..63c56c99 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/kapi.c @@ -0,0 +1,626 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* This file mostly implements UBI kernel API functions */ + +#ifdef UBI_LINUX +#include <linux/module.h> +#include <linux/err.h> +#include <asm/div64.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +/** + * ubi_get_device_info - get information about UBI device. + * @ubi_num: UBI device number + * @di: the information is stored here + * + * This function returns %0 in case of success, %-EINVAL if the UBI device + * number is invalid, and %-ENODEV if there is no such UBI device. + */ +int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) +{ +	struct ubi_device *ubi; + +	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) +		return -EINVAL; + +	ubi = ubi_get_device(ubi_num); +	if (!ubi) +		return -ENODEV; + +	di->ubi_num = ubi->ubi_num; +	di->leb_size = ubi->leb_size; +	di->min_io_size = ubi->min_io_size; +	di->ro_mode = ubi->ro_mode; +	di->cdev = ubi->cdev.dev; + +	ubi_put_device(ubi); +	return 0; +} +EXPORT_SYMBOL_GPL(ubi_get_device_info); + +/** + * ubi_get_volume_info - get information about UBI volume. + * @desc: volume descriptor + * @vi: the information is stored here + */ +void ubi_get_volume_info(struct ubi_volume_desc *desc, +			 struct ubi_volume_info *vi) +{ +	const struct ubi_volume *vol = desc->vol; +	const struct ubi_device *ubi = vol->ubi; + +	vi->vol_id = vol->vol_id; +	vi->ubi_num = ubi->ubi_num; +	vi->size = vol->reserved_pebs; +	vi->used_bytes = vol->used_bytes; +	vi->vol_type = vol->vol_type; +	vi->corrupted = vol->corrupted; +	vi->upd_marker = vol->upd_marker; +	vi->alignment = vol->alignment; +	vi->usable_leb_size = vol->usable_leb_size; +	vi->name_len = vol->name_len; +	vi->name = vol->name; +	vi->cdev = vol->cdev.dev; +} +EXPORT_SYMBOL_GPL(ubi_get_volume_info); + +/** + * ubi_open_volume - open UBI volume. + * @ubi_num: UBI device number + * @vol_id: volume ID + * @mode: open mode + * + * The @mode parameter specifies if the volume should be opened in read-only + * mode, read-write mode, or exclusive mode. The exclusive mode guarantees that + * nobody else will be able to open this volume. UBI allows to have many volume + * readers and one writer at a time. + * + * If a static volume is being opened for the first time since boot, it will be + * checked by this function, which means it will be fully read and the CRC + * checksum of each logical eraseblock will be checked. + * + * This function returns volume descriptor in case of success and a negative + * error code in case of failure. + */ +struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) +{ +	int err; +	struct ubi_volume_desc *desc; +	struct ubi_device *ubi; +	struct ubi_volume *vol; + +	dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); + +	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) +		return ERR_PTR(-EINVAL); + +	if (mode != UBI_READONLY && mode != UBI_READWRITE && +	    mode != UBI_EXCLUSIVE) +		return ERR_PTR(-EINVAL); + +	/* +	 * First of all, we have to get the UBI device to prevent its removal. +	 */ +	ubi = ubi_get_device(ubi_num); +	if (!ubi) +		return ERR_PTR(-ENODEV); + +	if (vol_id < 0 || vol_id >= ubi->vtbl_slots) { +		err = -EINVAL; +		goto out_put_ubi; +	} + +	desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL); +	if (!desc) { +		err = -ENOMEM; +		goto out_put_ubi; +	} + +	err = -ENODEV; +	if (!try_module_get(THIS_MODULE)) +		goto out_free; + +	spin_lock(&ubi->volumes_lock); +	vol = ubi->volumes[vol_id]; +	if (!vol) +		goto out_unlock; + +	err = -EBUSY; +	switch (mode) { +	case UBI_READONLY: +		if (vol->exclusive) +			goto out_unlock; +		vol->readers += 1; +		break; + +	case UBI_READWRITE: +		if (vol->exclusive || vol->writers > 0) +			goto out_unlock; +		vol->writers += 1; +		break; + +	case UBI_EXCLUSIVE: +		if (vol->exclusive || vol->writers || vol->readers) +			goto out_unlock; +		vol->exclusive = 1; +		break; +	} +	get_device(&vol->dev); +	vol->ref_count += 1; +	spin_unlock(&ubi->volumes_lock); + +	desc->vol = vol; +	desc->mode = mode; + +	mutex_lock(&ubi->ckvol_mutex); +	if (!vol->checked) { +		/* This is the first open - check the volume */ +		err = ubi_check_volume(ubi, vol_id); +		if (err < 0) { +			mutex_unlock(&ubi->ckvol_mutex); +			ubi_close_volume(desc); +			return ERR_PTR(err); +		} +		if (err == 1) { +			ubi_warn("volume %d on UBI device %d is corrupted", +				 vol_id, ubi->ubi_num); +			vol->corrupted = 1; +		} +		vol->checked = 1; +	} +	mutex_unlock(&ubi->ckvol_mutex); + +	return desc; + +out_unlock: +	spin_unlock(&ubi->volumes_lock); +	module_put(THIS_MODULE); +out_free: +	kfree(desc); +out_put_ubi: +	ubi_put_device(ubi); +	return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(ubi_open_volume); + +/** + * ubi_open_volume_nm - open UBI volume by name. + * @ubi_num: UBI device number + * @name: volume name + * @mode: open mode + * + * This function is similar to 'ubi_open_volume()', but opens a volume by name. + */ +struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, +					   int mode) +{ +	int i, vol_id = -1, len; +	struct ubi_device *ubi; +	struct ubi_volume_desc *ret; + +	dbg_msg("open volume %s, mode %d", name, mode); + +	if (!name) +		return ERR_PTR(-EINVAL); + +	len = strnlen(name, UBI_VOL_NAME_MAX + 1); +	if (len > UBI_VOL_NAME_MAX) +		return ERR_PTR(-EINVAL); + +	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) +		return ERR_PTR(-EINVAL); + +	ubi = ubi_get_device(ubi_num); +	if (!ubi) +		return ERR_PTR(-ENODEV); + +	spin_lock(&ubi->volumes_lock); +	/* Walk all volumes of this UBI device */ +	for (i = 0; i < ubi->vtbl_slots; i++) { +		struct ubi_volume *vol = ubi->volumes[i]; + +		if (vol && len == vol->name_len && !strcmp(name, vol->name)) { +			vol_id = i; +			break; +		} +	} +	spin_unlock(&ubi->volumes_lock); + +	if (vol_id >= 0) +		ret = ubi_open_volume(ubi_num, vol_id, mode); +	else +		ret = ERR_PTR(-ENODEV); + +	/* +	 * We should put the UBI device even in case of success, because +	 * 'ubi_open_volume()' took a reference as well. +	 */ +	ubi_put_device(ubi); +	return ret; +} +EXPORT_SYMBOL_GPL(ubi_open_volume_nm); + +/** + * ubi_close_volume - close UBI volume. + * @desc: volume descriptor + */ +void ubi_close_volume(struct ubi_volume_desc *desc) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; + +	dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); + +	spin_lock(&ubi->volumes_lock); +	switch (desc->mode) { +	case UBI_READONLY: +		vol->readers -= 1; +		break; +	case UBI_READWRITE: +		vol->writers -= 1; +		break; +	case UBI_EXCLUSIVE: +		vol->exclusive = 0; +	} +	vol->ref_count -= 1; +	spin_unlock(&ubi->volumes_lock); + +	kfree(desc); +	put_device(&vol->dev); +	ubi_put_device(ubi); +	module_put(THIS_MODULE); +} +EXPORT_SYMBOL_GPL(ubi_close_volume); + +/** + * ubi_leb_read - read data. + * @desc: volume descriptor + * @lnum: logical eraseblock number to read from + * @buf: buffer where to store the read data + * @offset: offset within the logical eraseblock to read from + * @len: how many bytes to read + * @check: whether UBI has to check the read data's CRC or not. + * + * This function reads data from offset @offset of logical eraseblock @lnum and + * stores the data at @buf. When reading from static volumes, @check specifies + * whether the data has to be checked or not. If yes, the whole logical + * eraseblock will be read and its CRC checksum will be checked (i.e., the CRC + * checksum is per-eraseblock). So checking may substantially slow down the + * read speed. The @check argument is ignored for dynamic volumes. + * + * In case of success, this function returns zero. In case of failure, this + * function returns a negative error code. + * + * %-EBADMSG error code is returned: + * o for both static and dynamic volumes if MTD driver has detected a data + *   integrity problem (unrecoverable ECC checksum mismatch in case of NAND); + * o for static volumes in case of data CRC mismatch. + * + * If the volume is damaged because of an interrupted update this function just + * returns immediately with %-EBADF error code. + */ +int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, +		 int len, int check) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; +	int err, vol_id = vol->vol_id; + +	dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); + +	if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || +	    lnum >= vol->used_ebs || offset < 0 || len < 0 || +	    offset + len > vol->usable_leb_size) +		return -EINVAL; + +	if (vol->vol_type == UBI_STATIC_VOLUME) { +		if (vol->used_ebs == 0) +			/* Empty static UBI volume */ +			return 0; +		if (lnum == vol->used_ebs - 1 && +		    offset + len > vol->last_eb_bytes) +			return -EINVAL; +	} + +	if (vol->upd_marker) +		return -EBADF; +	if (len == 0) +		return 0; + +	err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); +	if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) { +		ubi_warn("mark volume %d as corrupted", vol_id); +		vol->corrupted = 1; +	} + +	return err; +} +EXPORT_SYMBOL_GPL(ubi_leb_read); + +/** + * ubi_leb_write - write data. + * @desc: volume descriptor + * @lnum: logical eraseblock number to write to + * @buf: data to write + * @offset: offset within the logical eraseblock where to write + * @len: how many bytes to write + * @dtype: expected data type + * + * This function writes @len bytes of data from @buf to offset @offset of + * logical eraseblock @lnum. The @dtype argument describes expected lifetime of + * the data. + * + * This function takes care of physical eraseblock write failures. If write to + * the physical eraseblock write operation fails, the logical eraseblock is + * re-mapped to another physical eraseblock, the data is recovered, and the + * write finishes. UBI has a pool of reserved physical eraseblocks for this. + * + * If all the data were successfully written, zero is returned. If an error + * occurred and UBI has not been able to recover from it, this function returns + * a negative error code. Note, in case of an error, it is possible that + * something was still written to the flash media, but that may be some + * garbage. + * + * If the volume is damaged because of an interrupted update this function just + * returns immediately with %-EBADF code. + */ +int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, +		  int offset, int len, int dtype) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; +	int vol_id = vol->vol_id; + +	dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); + +	if (vol_id < 0 || vol_id >= ubi->vtbl_slots) +		return -EINVAL; + +	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) +		return -EROFS; + +	if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || +	    offset + len > vol->usable_leb_size || +	    offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1)) +		return -EINVAL; + +	if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && +	    dtype != UBI_UNKNOWN) +		return -EINVAL; + +	if (vol->upd_marker) +		return -EBADF; + +	if (len == 0) +		return 0; + +	return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype); +} +EXPORT_SYMBOL_GPL(ubi_leb_write); + +/* + * ubi_leb_change - change logical eraseblock atomically. + * @desc: volume descriptor + * @lnum: logical eraseblock number to change + * @buf: data to write + * @len: how many bytes to write + * @dtype: expected data type + * + * This function changes the contents of a logical eraseblock atomically. @buf + * has to contain new logical eraseblock data, and @len - the length of the + * data, which has to be aligned. The length may be shorter then the logical + * eraseblock size, ant the logical eraseblock may be appended to more times + * later on. This function guarantees that in case of an unclean reboot the old + * contents is preserved. Returns zero in case of success and a negative error + * code in case of failure. + */ +int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, +		   int len, int dtype) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; +	int vol_id = vol->vol_id; + +	dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); + +	if (vol_id < 0 || vol_id >= ubi->vtbl_slots) +		return -EINVAL; + +	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) +		return -EROFS; + +	if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || +	    len > vol->usable_leb_size || len & (ubi->min_io_size - 1)) +		return -EINVAL; + +	if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && +	    dtype != UBI_UNKNOWN) +		return -EINVAL; + +	if (vol->upd_marker) +		return -EBADF; + +	if (len == 0) +		return 0; + +	return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype); +} +EXPORT_SYMBOL_GPL(ubi_leb_change); + +/** + * ubi_leb_erase - erase logical eraseblock. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * + * This function un-maps logical eraseblock @lnum and synchronously erases the + * correspondent physical eraseblock. Returns zero in case of success and a + * negative error code in case of failure. + * + * If the volume is damaged because of an interrupted update this function just + * returns immediately with %-EBADF code. + */ +int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; +	int err; + +	dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); + +	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) +		return -EROFS; + +	if (lnum < 0 || lnum >= vol->reserved_pebs) +		return -EINVAL; + +	if (vol->upd_marker) +		return -EBADF; + +	err = ubi_eba_unmap_leb(ubi, vol, lnum); +	if (err) +		return err; + +	return ubi_wl_flush(ubi); +} +EXPORT_SYMBOL_GPL(ubi_leb_erase); + +/** + * ubi_leb_unmap - un-map logical eraseblock. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * + * This function un-maps logical eraseblock @lnum and schedules the + * corresponding physical eraseblock for erasure, so that it will eventually be + * physically erased in background. This operation is much faster then the + * erase operation. + * + * Unlike erase, the un-map operation does not guarantee that the logical + * eraseblock will contain all 0xFF bytes when UBI is initialized again. For + * example, if several logical eraseblocks are un-mapped, and an unclean reboot + * happens after this, the logical eraseblocks will not necessarily be + * un-mapped again when this MTD device is attached. They may actually be + * mapped to the same physical eraseblocks again. So, this function has to be + * used with care. + * + * In other words, when un-mapping a logical eraseblock, UBI does not store + * any information about this on the flash media, it just marks the logical + * eraseblock as "un-mapped" in RAM. If UBI is detached before the physical + * eraseblock is physically erased, it will be mapped again to the same logical + * eraseblock when the MTD device is attached again. + * + * The main and obvious use-case of this function is when the contents of a + * logical eraseblock has to be re-written. Then it is much more efficient to + * first un-map it, then write new data, rather then first erase it, then write + * new data. Note, once new data has been written to the logical eraseblock, + * UBI guarantees that the old contents has gone forever. In other words, if an + * unclean reboot happens after the logical eraseblock has been un-mapped and + * then written to, it will contain the last written data. + * + * This function returns zero in case of success and a negative error code in + * case of failure. If the volume is damaged because of an interrupted update + * this function just returns immediately with %-EBADF code. + */ +int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; + +	dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); + +	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) +		return -EROFS; + +	if (lnum < 0 || lnum >= vol->reserved_pebs) +		return -EINVAL; + +	if (vol->upd_marker) +		return -EBADF; + +	return ubi_eba_unmap_leb(ubi, vol, lnum); +} +EXPORT_SYMBOL_GPL(ubi_leb_unmap); + +/** + * ubi_leb_map - map logical erasblock to a physical eraseblock. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * @dtype: expected data type + * + * This function maps an un-mapped logical eraseblock @lnum to a physical + * eraseblock. This means, that after a successfull invocation of this + * function the logical eraseblock @lnum will be empty (contain only %0xFF + * bytes) and be mapped to a physical eraseblock, even if an unclean reboot + * happens. + * + * This function returns zero in case of success, %-EBADF if the volume is + * damaged because of an interrupted update, %-EBADMSG if the logical + * eraseblock is already mapped, and other negative error codes in case of + * other failures. + */ +int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; + +	dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); + +	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) +		return -EROFS; + +	if (lnum < 0 || lnum >= vol->reserved_pebs) +		return -EINVAL; + +	if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && +	    dtype != UBI_UNKNOWN) +		return -EINVAL; + +	if (vol->upd_marker) +		return -EBADF; + +	if (vol->eba_tbl[lnum] >= 0) +		return -EBADMSG; + +	return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype); +} +EXPORT_SYMBOL_GPL(ubi_leb_map); + +/** + * ubi_is_mapped - check if logical eraseblock is mapped. + * @desc: volume descriptor + * @lnum: logical eraseblock number + * + * This function checks if logical eraseblock @lnum is mapped to a physical + * eraseblock. If a logical eraseblock is un-mapped, this does not necessarily + * mean it will still be un-mapped after the UBI device is re-attached. The + * logical eraseblock may become mapped to the physical eraseblock it was last + * mapped to. + * + * This function returns %1 if the LEB is mapped, %0 if not, and a negative + * error code in case of failure. If the volume is damaged because of an + * interrupted update this function just returns immediately with %-EBADF error + * code. + */ +int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum) +{ +	struct ubi_volume *vol = desc->vol; + +	dbg_msg("test LEB %d:%d", vol->vol_id, lnum); + +	if (lnum < 0 || lnum >= vol->reserved_pebs) +		return -EINVAL; + +	if (vol->upd_marker) +		return -EBADF; + +	return vol->eba_tbl[lnum] >= 0; +} +EXPORT_SYMBOL_GPL(ubi_is_mapped); diff --git a/roms/u-boot/drivers/mtd/ubi/misc.c b/roms/u-boot/drivers/mtd/ubi/misc.c new file mode 100644 index 00000000..5ff55b4f --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/misc.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* Here we keep miscellaneous functions which are used all over the UBI code */ + +#include <ubi_uboot.h> +#include "ubi.h" + +/** + * calc_data_len - calculate how much real data is stored in a buffer. + * @ubi: UBI device description object + * @buf: a buffer with the contents of the physical eraseblock + * @length: the buffer length + * + * This function calculates how much "real data" is stored in @buf and returnes + * the length. Continuous 0xFF bytes at the end of the buffer are not + * considered as "real data". + */ +int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, +		      int length) +{ +	int i; + +	ubi_assert(!(length & (ubi->min_io_size - 1))); + +	for (i = length - 1; i >= 0; i--) +		if (((const uint8_t *)buf)[i] != 0xFF) +			break; + +	/* The resulting length must be aligned to the minimum flash I/O size */ +	length = ALIGN(i + 1, ubi->min_io_size); +	return length; +} + +/** + * ubi_check_volume - check the contents of a static volume. + * @ubi: UBI device description object + * @vol_id: ID of the volume to check + * + * This function checks if static volume @vol_id is corrupted by fully reading + * it and checking data CRC. This function returns %0 if the volume is not + * corrupted, %1 if it is corrupted and a negative error code in case of + * failure. Dynamic volumes are not checked and zero is returned immediately. + */ +int ubi_check_volume(struct ubi_device *ubi, int vol_id) +{ +	void *buf; +	int err = 0, i; +	struct ubi_volume *vol = ubi->volumes[vol_id]; + +	if (vol->vol_type != UBI_STATIC_VOLUME) +		return 0; + +	buf = vmalloc(vol->usable_leb_size); +	if (!buf) +		return -ENOMEM; + +	for (i = 0; i < vol->used_ebs; i++) { +		int size; + +		if (i == vol->used_ebs - 1) +			size = vol->last_eb_bytes; +		else +			size = vol->usable_leb_size; + +		err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1); +		if (err) { +			if (mtd_is_eccerr(err)) +				err = 1; +			break; +		} +	} + +	vfree(buf); +	return err; +} + +/** + * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad + * eraseblock handling. + * @ubi: UBI device description object + */ +void ubi_calculate_reserved(struct ubi_device *ubi) +{ +	ubi->beb_rsvd_level = ubi->good_peb_count/100; +	ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE; +	if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS) +		ubi->beb_rsvd_level = MIN_RESEVED_PEBS; +} diff --git a/roms/u-boot/drivers/mtd/ubi/scan.c b/roms/u-boot/drivers/mtd/ubi/scan.c new file mode 100644 index 00000000..a6d0fbcb --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/scan.c @@ -0,0 +1,1348 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * UBI scanning unit. + * + * This unit is responsible for scanning the flash media, checking UBI + * headers and providing complete information about the UBI flash image. + * + * The scanning information is represented by a &struct ubi_scan_info' object. + * Information about found volumes is represented by &struct ubi_scan_volume + * objects which are kept in volume RB-tree with root at the @volumes field. + * The RB-tree is indexed by the volume ID. + * + * Found logical eraseblocks are represented by &struct ubi_scan_leb objects. + * These objects are kept in per-volume RB-trees with the root at the + * corresponding &struct ubi_scan_volume object. To put it differently, we keep + * an RB-tree of per-volume objects and each of these objects is the root of + * RB-tree of per-eraseblock objects. + * + * Corrupted physical eraseblocks are put to the @corr list, free physical + * eraseblocks are put to the @free list and the physical eraseblock to be + * erased are put to the @erase list. + */ + +#ifdef UBI_LINUX +#include <linux/err.h> +#include <linux/crc32.h> +#include <asm/div64.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si); +#else +#define paranoid_check_si(ubi, si) 0 +#endif + +/* Temporary variables used during scanning */ +static struct ubi_ec_hdr *ech; +static struct ubi_vid_hdr *vidh; + +/** + * add_to_list - add physical eraseblock to a list. + * @si: scanning information + * @pnum: physical eraseblock number to add + * @ec: erase counter of the physical eraseblock + * @list: the list to add to + * + * This function adds physical eraseblock @pnum to free, erase, corrupted or + * alien lists. Returns zero in case of success and a negative error code in + * case of failure. + */ +static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, +		       struct list_head *list) +{ +	struct ubi_scan_leb *seb; + +	if (list == &si->free) +		dbg_bld("add to free: PEB %d, EC %d", pnum, ec); +	else if (list == &si->erase) +		dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); +	else if (list == &si->corr) +		dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); +	else if (list == &si->alien) +		dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); +	else +		BUG(); + +	seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); +	if (!seb) +		return -ENOMEM; + +	seb->pnum = pnum; +	seb->ec = ec; +	list_add_tail(&seb->u.list, list); +	return 0; +} + +/** + * validate_vid_hdr - check that volume identifier header is correct and + * consistent. + * @vid_hdr: the volume identifier header to check + * @sv: information about the volume this logical eraseblock belongs to + * @pnum: physical eraseblock number the VID header came from + * + * This function checks that data stored in @vid_hdr is consistent. Returns + * non-zero if an inconsistency was found and zero if not. + * + * Note, UBI does sanity check of everything it reads from the flash media. + * Most of the checks are done in the I/O unit. Here we check that the + * information in the VID header is consistent to the information in other VID + * headers of the same volume. + */ +static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, +			    const struct ubi_scan_volume *sv, int pnum) +{ +	int vol_type = vid_hdr->vol_type; +	int vol_id = be32_to_cpu(vid_hdr->vol_id); +	int used_ebs = be32_to_cpu(vid_hdr->used_ebs); +	int data_pad = be32_to_cpu(vid_hdr->data_pad); + +	if (sv->leb_count != 0) { +		int sv_vol_type; + +		/* +		 * This is not the first logical eraseblock belonging to this +		 * volume. Ensure that the data in its VID header is consistent +		 * to the data in previous logical eraseblock headers. +		 */ + +		if (vol_id != sv->vol_id) { +			dbg_err("inconsistent vol_id"); +			goto bad; +		} + +		if (sv->vol_type == UBI_STATIC_VOLUME) +			sv_vol_type = UBI_VID_STATIC; +		else +			sv_vol_type = UBI_VID_DYNAMIC; + +		if (vol_type != sv_vol_type) { +			dbg_err("inconsistent vol_type"); +			goto bad; +		} + +		if (used_ebs != sv->used_ebs) { +			dbg_err("inconsistent used_ebs"); +			goto bad; +		} + +		if (data_pad != sv->data_pad) { +			dbg_err("inconsistent data_pad"); +			goto bad; +		} +	} + +	return 0; + +bad: +	ubi_err("inconsistent VID header at PEB %d", pnum); +	ubi_dbg_dump_vid_hdr(vid_hdr); +	ubi_dbg_dump_sv(sv); +	return -EINVAL; +} + +/** + * add_volume - add volume to the scanning information. + * @si: scanning information + * @vol_id: ID of the volume to add + * @pnum: physical eraseblock number + * @vid_hdr: volume identifier header + * + * If the volume corresponding to the @vid_hdr logical eraseblock is already + * present in the scanning information, this function does nothing. Otherwise + * it adds corresponding volume to the scanning information. Returns a pointer + * to the scanning volume object in case of success and a negative error code + * in case of failure. + */ +static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id, +					  int pnum, +					  const struct ubi_vid_hdr *vid_hdr) +{ +	struct ubi_scan_volume *sv; +	struct rb_node **p = &si->volumes.rb_node, *parent = NULL; + +	ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); + +	/* Walk the volume RB-tree to look if this volume is already present */ +	while (*p) { +		parent = *p; +		sv = rb_entry(parent, struct ubi_scan_volume, rb); + +		if (vol_id == sv->vol_id) +			return sv; + +		if (vol_id > sv->vol_id) +			p = &(*p)->rb_left; +		else +			p = &(*p)->rb_right; +	} + +	/* The volume is absent - add it */ +	sv = kmalloc(sizeof(struct ubi_scan_volume), GFP_KERNEL); +	if (!sv) +		return ERR_PTR(-ENOMEM); + +	sv->highest_lnum = sv->leb_count = 0; +	sv->vol_id = vol_id; +	sv->root = RB_ROOT; +	sv->used_ebs = be32_to_cpu(vid_hdr->used_ebs); +	sv->data_pad = be32_to_cpu(vid_hdr->data_pad); +	sv->compat = vid_hdr->compat; +	sv->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME +							    : UBI_STATIC_VOLUME; +	if (vol_id > si->highest_vol_id) +		si->highest_vol_id = vol_id; + +	rb_link_node(&sv->rb, parent, p); +	rb_insert_color(&sv->rb, &si->volumes); +	si->vols_found += 1; +	dbg_bld("added volume %d", vol_id); +	return sv; +} + +/** + * compare_lebs - find out which logical eraseblock is newer. + * @ubi: UBI device description object + * @seb: first logical eraseblock to compare + * @pnum: physical eraseblock number of the second logical eraseblock to + * compare + * @vid_hdr: volume identifier header of the second logical eraseblock + * + * This function compares 2 copies of a LEB and informs which one is newer. In + * case of success this function returns a positive value, in case of failure, a + * negative error code is returned. The success return codes use the following + * bits: + *     o bit 0 is cleared: the first PEB (described by @seb) is newer then the + *       second PEB (described by @pnum and @vid_hdr); + *     o bit 0 is set: the second PEB is newer; + *     o bit 1 is cleared: no bit-flips were detected in the newer LEB; + *     o bit 1 is set: bit-flips were detected in the newer LEB; + *     o bit 2 is cleared: the older LEB is not corrupted; + *     o bit 2 is set: the older LEB is corrupted. + */ +static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, +			int pnum, const struct ubi_vid_hdr *vid_hdr) +{ +	void *buf; +	int len, err, second_is_newer, bitflips = 0, corrupted = 0; +	uint32_t data_crc, crc; +	struct ubi_vid_hdr *vh = NULL; +	unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); + +	if (seb->sqnum == 0 && sqnum2 == 0) { +		long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver); + +		/* +		 * UBI constantly increases the logical eraseblock version +		 * number and it can overflow. Thus, we have to bear in mind +		 * that versions that are close to %0xFFFFFFFF are less then +		 * versions that are close to %0. +		 * +		 * The UBI WL unit guarantees that the number of pending tasks +		 * is not greater then %0x7FFFFFFF. So, if the difference +		 * between any two versions is greater or equivalent to +		 * %0x7FFFFFFF, there was an overflow and the logical +		 * eraseblock with lower version is actually newer then the one +		 * with higher version. +		 * +		 * FIXME: but this is anyway obsolete and will be removed at +		 * some point. +		 */ +		dbg_bld("using old crappy leb_ver stuff"); + +		if (v1 == v2) { +			ubi_err("PEB %d and PEB %d have the same version %lld", +				seb->pnum, pnum, v1); +			return -EINVAL; +		} + +		abs = v1 - v2; +		if (abs < 0) +			abs = -abs; + +		if (abs < 0x7FFFFFFF) +			/* Non-overflow situation */ +			second_is_newer = (v2 > v1); +		else +			second_is_newer = (v2 < v1); +	} else +		/* Obviously the LEB with lower sequence counter is older */ +		second_is_newer = sqnum2 > seb->sqnum; + +	/* +	 * Now we know which copy is newer. If the copy flag of the PEB with +	 * newer version is not set, then we just return, otherwise we have to +	 * check data CRC. For the second PEB we already have the VID header, +	 * for the first one - we'll need to re-read it from flash. +	 * +	 * FIXME: this may be optimized so that we wouldn't read twice. +	 */ + +	if (second_is_newer) { +		if (!vid_hdr->copy_flag) { +			/* It is not a copy, so it is newer */ +			dbg_bld("second PEB %d is newer, copy_flag is unset", +				pnum); +			return 1; +		} +	} else { +		pnum = seb->pnum; + +		vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); +		if (!vh) +			return -ENOMEM; + +		err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); +		if (err) { +			if (err == UBI_IO_BITFLIPS) +				bitflips = 1; +			else { +				dbg_err("VID of PEB %d header is bad, but it " +					"was OK earlier", pnum); +				if (err > 0) +					err = -EIO; + +				goto out_free_vidh; +			} +		} + +		if (!vh->copy_flag) { +			/* It is not a copy, so it is newer */ +			dbg_bld("first PEB %d is newer, copy_flag is unset", +				pnum); +			err = bitflips << 1; +			goto out_free_vidh; +		} + +		vid_hdr = vh; +	} + +	/* Read the data of the copy and check the CRC */ + +	len = be32_to_cpu(vid_hdr->data_size); +	buf = vmalloc(len); +	if (!buf) { +		err = -ENOMEM; +		goto out_free_vidh; +	} + +	err = ubi_io_read_data(ubi, buf, pnum, 0, len); +	if (err && err != UBI_IO_BITFLIPS) +		goto out_free_buf; + +	data_crc = be32_to_cpu(vid_hdr->data_crc); +	crc = crc32(UBI_CRC32_INIT, buf, len); +	if (crc != data_crc) { +		dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", +			pnum, crc, data_crc); +		corrupted = 1; +		bitflips = 0; +		second_is_newer = !second_is_newer; +	} else { +		dbg_bld("PEB %d CRC is OK", pnum); +		bitflips = !!err; +	} + +	vfree(buf); +	ubi_free_vid_hdr(ubi, vh); + +	if (second_is_newer) +		dbg_bld("second PEB %d is newer, copy_flag is set", pnum); +	else +		dbg_bld("first PEB %d is newer, copy_flag is set", pnum); + +	return second_is_newer | (bitflips << 1) | (corrupted << 2); + +out_free_buf: +	vfree(buf); +out_free_vidh: +	ubi_free_vid_hdr(ubi, vh); +	return err; +} + +/** + * ubi_scan_add_used - add information about a physical eraseblock to the + * scanning information. + * @ubi: UBI device description object + * @si: scanning information + * @pnum: the physical eraseblock number + * @ec: erase counter + * @vid_hdr: the volume identifier header + * @bitflips: if bit-flips were detected when this physical eraseblock was read + * + * This function adds information about a used physical eraseblock to the + * 'used' tree of the corresponding volume. The function is rather complex + * because it has to handle cases when this is not the first physical + * eraseblock belonging to the same logical eraseblock, and the newer one has + * to be picked, while the older one has to be dropped. This function returns + * zero in case of success and a negative error code in case of failure. + */ +int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, +		      int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, +		      int bitflips) +{ +	int err, vol_id, lnum; +	uint32_t leb_ver; +	unsigned long long sqnum; +	struct ubi_scan_volume *sv; +	struct ubi_scan_leb *seb; +	struct rb_node **p, *parent = NULL; + +	vol_id = be32_to_cpu(vid_hdr->vol_id); +	lnum = be32_to_cpu(vid_hdr->lnum); +	sqnum = be64_to_cpu(vid_hdr->sqnum); +	leb_ver = be32_to_cpu(vid_hdr->leb_ver); + +	dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d", +		pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips); + +	sv = add_volume(si, vol_id, pnum, vid_hdr); +	if (IS_ERR(sv) < 0) +		return PTR_ERR(sv); + +	if (si->max_sqnum < sqnum) +		si->max_sqnum = sqnum; + +	/* +	 * Walk the RB-tree of logical eraseblocks of volume @vol_id to look +	 * if this is the first instance of this logical eraseblock or not. +	 */ +	p = &sv->root.rb_node; +	while (*p) { +		int cmp_res; + +		parent = *p; +		seb = rb_entry(parent, struct ubi_scan_leb, u.rb); +		if (lnum != seb->lnum) { +			if (lnum < seb->lnum) +				p = &(*p)->rb_left; +			else +				p = &(*p)->rb_right; +			continue; +		} + +		/* +		 * There is already a physical eraseblock describing the same +		 * logical eraseblock present. +		 */ + +		dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " +			"LEB ver %u, EC %d", seb->pnum, seb->sqnum, +			seb->leb_ver, seb->ec); + +		/* +		 * Make sure that the logical eraseblocks have different +		 * versions. Otherwise the image is bad. +		 */ +		if (seb->leb_ver == leb_ver && leb_ver != 0) { +			ubi_err("two LEBs with same version %u", leb_ver); +			ubi_dbg_dump_seb(seb, 0); +			ubi_dbg_dump_vid_hdr(vid_hdr); +			return -EINVAL; +		} + +		/* +		 * Make sure that the logical eraseblocks have different +		 * sequence numbers. Otherwise the image is bad. +		 * +		 * FIXME: remove 'sqnum != 0' check when leb_ver is removed. +		 */ +		if (seb->sqnum == sqnum && sqnum != 0) { +			ubi_err("two LEBs with same sequence number %llu", +				sqnum); +			ubi_dbg_dump_seb(seb, 0); +			ubi_dbg_dump_vid_hdr(vid_hdr); +			return -EINVAL; +		} + +		/* +		 * Now we have to drop the older one and preserve the newer +		 * one. +		 */ +		cmp_res = compare_lebs(ubi, seb, pnum, vid_hdr); +		if (cmp_res < 0) +			return cmp_res; + +		if (cmp_res & 1) { +			/* +			 * This logical eraseblock is newer then the one +			 * found earlier. +			 */ +			err = validate_vid_hdr(vid_hdr, sv, pnum); +			if (err) +				return err; + +			if (cmp_res & 4) +				err = add_to_list(si, seb->pnum, seb->ec, +						  &si->corr); +			else +				err = add_to_list(si, seb->pnum, seb->ec, +						  &si->erase); +			if (err) +				return err; + +			seb->ec = ec; +			seb->pnum = pnum; +			seb->scrub = ((cmp_res & 2) || bitflips); +			seb->sqnum = sqnum; +			seb->leb_ver = leb_ver; + +			if (sv->highest_lnum == lnum) +				sv->last_data_size = +					be32_to_cpu(vid_hdr->data_size); + +			return 0; +		} else { +			/* +			 * This logical eraseblock is older then the one found +			 * previously. +			 */ +			if (cmp_res & 4) +				return add_to_list(si, pnum, ec, &si->corr); +			else +				return add_to_list(si, pnum, ec, &si->erase); +		} +	} + +	/* +	 * We've met this logical eraseblock for the first time, add it to the +	 * scanning information. +	 */ + +	err = validate_vid_hdr(vid_hdr, sv, pnum); +	if (err) +		return err; + +	seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); +	if (!seb) +		return -ENOMEM; + +	seb->ec = ec; +	seb->pnum = pnum; +	seb->lnum = lnum; +	seb->sqnum = sqnum; +	seb->scrub = bitflips; +	seb->leb_ver = leb_ver; + +	if (sv->highest_lnum <= lnum) { +		sv->highest_lnum = lnum; +		sv->last_data_size = be32_to_cpu(vid_hdr->data_size); +	} + +	sv->leb_count += 1; +	rb_link_node(&seb->u.rb, parent, p); +	rb_insert_color(&seb->u.rb, &sv->root); +	return 0; +} + +/** + * ubi_scan_find_sv - find information about a particular volume in the + * scanning information. + * @si: scanning information + * @vol_id: the requested volume ID + * + * This function returns a pointer to the volume description or %NULL if there + * are no data about this volume in the scanning information. + */ +struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, +					 int vol_id) +{ +	struct ubi_scan_volume *sv; +	struct rb_node *p = si->volumes.rb_node; + +	while (p) { +		sv = rb_entry(p, struct ubi_scan_volume, rb); + +		if (vol_id == sv->vol_id) +			return sv; + +		if (vol_id > sv->vol_id) +			p = p->rb_left; +		else +			p = p->rb_right; +	} + +	return NULL; +} + +/** + * ubi_scan_find_seb - find information about a particular logical + * eraseblock in the volume scanning information. + * @sv: a pointer to the volume scanning information + * @lnum: the requested logical eraseblock + * + * This function returns a pointer to the scanning logical eraseblock or %NULL + * if there are no data about it in the scanning volume information. + */ +struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, +				       int lnum) +{ +	struct ubi_scan_leb *seb; +	struct rb_node *p = sv->root.rb_node; + +	while (p) { +		seb = rb_entry(p, struct ubi_scan_leb, u.rb); + +		if (lnum == seb->lnum) +			return seb; + +		if (lnum > seb->lnum) +			p = p->rb_left; +		else +			p = p->rb_right; +	} + +	return NULL; +} + +/** + * ubi_scan_rm_volume - delete scanning information about a volume. + * @si: scanning information + * @sv: the volume scanning information to delete + */ +void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv) +{ +	struct rb_node *rb; +	struct ubi_scan_leb *seb; + +	dbg_bld("remove scanning information about volume %d", sv->vol_id); + +	while ((rb = rb_first(&sv->root))) { +		seb = rb_entry(rb, struct ubi_scan_leb, u.rb); +		rb_erase(&seb->u.rb, &sv->root); +		list_add_tail(&seb->u.list, &si->erase); +	} + +	rb_erase(&sv->rb, &si->volumes); +	kfree(sv); +	si->vols_found -= 1; +} + +/** + * ubi_scan_erase_peb - erase a physical eraseblock. + * @ubi: UBI device description object + * @si: scanning information + * @pnum: physical eraseblock number to erase; + * @ec: erase counter value to write (%UBI_SCAN_UNKNOWN_EC if it is unknown) + * + * This function erases physical eraseblock 'pnum', and writes the erase + * counter header to it. This function should only be used on UBI device + * initialization stages, when the EBA unit had not been yet initialized. This + * function returns zero in case of success and a negative error code in case + * of failure. + */ +int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, +		       int pnum, int ec) +{ +	int err; +	struct ubi_ec_hdr *ec_hdr; + +	if ((long long)ec >= UBI_MAX_ERASECOUNTER) { +		/* +		 * Erase counter overflow. Upgrade UBI and use 64-bit +		 * erase counters internally. +		 */ +		ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); +		return -EINVAL; +	} + +	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); +	if (!ec_hdr) +		return -ENOMEM; + +	ec_hdr->ec = cpu_to_be64(ec); + +	err = ubi_io_sync_erase(ubi, pnum, 0); +	if (err < 0) +		goto out_free; + +	err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); + +out_free: +	kfree(ec_hdr); +	return err; +} + +/** + * ubi_scan_get_free_peb - get a free physical eraseblock. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns a free physical eraseblock. It is supposed to be + * called on the UBI initialization stages when the wear-leveling unit is not + * initialized yet. This function picks a physical eraseblocks from one of the + * lists, writes the EC header if it is needed, and removes it from the list. + * + * This function returns scanning physical eraseblock information in case of + * success and an error code in case of failure. + */ +struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, +					   struct ubi_scan_info *si) +{ +	int err = 0, i; +	struct ubi_scan_leb *seb; + +	if (!list_empty(&si->free)) { +		seb = list_entry(si->free.next, struct ubi_scan_leb, u.list); +		list_del(&seb->u.list); +		dbg_bld("return free PEB %d, EC %d", seb->pnum, seb->ec); +		return seb; +	} + +	for (i = 0; i < 2; i++) { +		struct list_head *head; +		struct ubi_scan_leb *tmp_seb; + +		if (i == 0) +			head = &si->erase; +		else +			head = &si->corr; + +		/* +		 * We try to erase the first physical eraseblock from the @head +		 * list and pick it if we succeed, or try to erase the +		 * next one if not. And so forth. We don't want to take care +		 * about bad eraseblocks here - they'll be handled later. +		 */ +		list_for_each_entry_safe(seb, tmp_seb, head, u.list) { +			if (seb->ec == UBI_SCAN_UNKNOWN_EC) +				seb->ec = si->mean_ec; + +			err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1); +			if (err) +				continue; + +			seb->ec += 1; +			list_del(&seb->u.list); +			dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec); +			return seb; +		} +	} + +	ubi_err("no eraseblocks found"); +	return ERR_PTR(-ENOSPC); +} + +/** + * process_eb - read UBI headers, check them and add corresponding data + * to the scanning information. + * @ubi: UBI device description object + * @si: scanning information + * @pnum: the physical eraseblock number + * + * This function returns a zero if the physical eraseblock was successfully + * handled and a negative error code in case of failure. + */ +static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) +{ +	long long uninitialized_var(ec); +	int err, bitflips = 0, vol_id, ec_corr = 0; + +	dbg_bld("scan PEB %d", pnum); + +	/* Skip bad physical eraseblocks */ +	err = ubi_io_is_bad(ubi, pnum); +	if (err < 0) +		return err; +	else if (err) { +		/* +		 * FIXME: this is actually duty of the I/O unit to initialize +		 * this, but MTD does not provide enough information. +		 */ +		si->bad_peb_count += 1; +		return 0; +	} + +	err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); +	if (err < 0) +		return err; +	else if (err == UBI_IO_BITFLIPS) +		bitflips = 1; +	else if (err == UBI_IO_PEB_EMPTY) +		return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase); +	else if (err == UBI_IO_BAD_EC_HDR) { +		/* +		 * We have to also look at the VID header, possibly it is not +		 * corrupted. Set %bitflips flag in order to make this PEB be +		 * moved and EC be re-created. +		 */ +		ec_corr = 1; +		ec = UBI_SCAN_UNKNOWN_EC; +		bitflips = 1; +	} + +	si->is_empty = 0; + +	if (!ec_corr) { +		/* Make sure UBI version is OK */ +		if (ech->version != UBI_VERSION) { +			ubi_err("this UBI version is %d, image version is %d", +				UBI_VERSION, (int)ech->version); +			return -EINVAL; +		} + +		ec = be64_to_cpu(ech->ec); +		if (ec > UBI_MAX_ERASECOUNTER) { +			/* +			 * Erase counter overflow. The EC headers have 64 bits +			 * reserved, but we anyway make use of only 31 bit +			 * values, as this seems to be enough for any existing +			 * flash. Upgrade UBI and use 64-bit erase counters +			 * internally. +			 */ +			ubi_err("erase counter overflow, max is %d", +				UBI_MAX_ERASECOUNTER); +			ubi_dbg_dump_ec_hdr(ech); +			return -EINVAL; +		} +	} + +	/* OK, we've done with the EC header, let's look at the VID header */ + +	err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); +	if (err < 0) +		return err; +	else if (err == UBI_IO_BITFLIPS) +		bitflips = 1; +	else if (err == UBI_IO_BAD_VID_HDR || +		 (err == UBI_IO_PEB_FREE && ec_corr)) { +		/* VID header is corrupted */ +		err = add_to_list(si, pnum, ec, &si->corr); +		if (err) +			return err; +		goto adjust_mean_ec; +	} else if (err == UBI_IO_PEB_FREE) { +		/* No VID header - the physical eraseblock is free */ +		err = add_to_list(si, pnum, ec, &si->free); +		if (err) +			return err; +		goto adjust_mean_ec; +	} + +	vol_id = be32_to_cpu(vidh->vol_id); +	if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { +		int lnum = be32_to_cpu(vidh->lnum); + +		/* Unsupported internal volume */ +		switch (vidh->compat) { +		case UBI_COMPAT_DELETE: +			ubi_msg("\"delete\" compatible internal volume %d:%d" +				" found, remove it", vol_id, lnum); +			err = add_to_list(si, pnum, ec, &si->corr); +			if (err) +				return err; +			break; + +		case UBI_COMPAT_RO: +			ubi_msg("read-only compatible internal volume %d:%d" +				" found, switch to read-only mode", +				vol_id, lnum); +			ubi->ro_mode = 1; +			break; + +		case UBI_COMPAT_PRESERVE: +			ubi_msg("\"preserve\" compatible internal volume %d:%d" +				" found", vol_id, lnum); +			err = add_to_list(si, pnum, ec, &si->alien); +			if (err) +				return err; +			si->alien_peb_count += 1; +			return 0; + +		case UBI_COMPAT_REJECT: +			ubi_err("incompatible internal volume %d:%d found", +				vol_id, lnum); +			return -EINVAL; +		} +	} + +	/* Both UBI headers seem to be fine */ +	err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips); +	if (err) +		return err; + +adjust_mean_ec: +	if (!ec_corr) { +		si->ec_sum += ec; +		si->ec_count += 1; +		if (ec > si->max_ec) +			si->max_ec = ec; +		if (ec < si->min_ec) +			si->min_ec = ec; +	} + +	return 0; +} + +/** + * ubi_scan - scan an MTD device. + * @ubi: UBI device description object + * + * This function does full scanning of an MTD device and returns complete + * information about it. In case of failure, an error code is returned. + */ +struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) +{ +	int err, pnum; +	struct rb_node *rb1, *rb2; +	struct ubi_scan_volume *sv; +	struct ubi_scan_leb *seb; +	struct ubi_scan_info *si; + +	si = kzalloc(sizeof(struct ubi_scan_info), GFP_KERNEL); +	if (!si) +		return ERR_PTR(-ENOMEM); + +	INIT_LIST_HEAD(&si->corr); +	INIT_LIST_HEAD(&si->free); +	INIT_LIST_HEAD(&si->erase); +	INIT_LIST_HEAD(&si->alien); +	si->volumes = RB_ROOT; +	si->is_empty = 1; + +	err = -ENOMEM; +	ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); +	if (!ech) +		goto out_si; + +	vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); +	if (!vidh) +		goto out_ech; + +	for (pnum = 0; pnum < ubi->peb_count; pnum++) { +		cond_resched(); + +		dbg_msg("process PEB %d", pnum); +		err = process_eb(ubi, si, pnum); +		if (err < 0) +			goto out_vidh; +	} + +	dbg_msg("scanning is finished"); + +	/* Calculate mean erase counter */ +	if (si->ec_count) { +		do_div(si->ec_sum, si->ec_count); +		si->mean_ec = si->ec_sum; +	} + +	if (si->is_empty) +		ubi_msg("empty MTD device detected"); + +	/* +	 * In case of unknown erase counter we use the mean erase counter +	 * value. +	 */ +	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { +		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) +			if (seb->ec == UBI_SCAN_UNKNOWN_EC) +				seb->ec = si->mean_ec; +	} + +	list_for_each_entry(seb, &si->free, u.list) { +		if (seb->ec == UBI_SCAN_UNKNOWN_EC) +			seb->ec = si->mean_ec; +	} + +	list_for_each_entry(seb, &si->corr, u.list) +		if (seb->ec == UBI_SCAN_UNKNOWN_EC) +			seb->ec = si->mean_ec; + +	list_for_each_entry(seb, &si->erase, u.list) +		if (seb->ec == UBI_SCAN_UNKNOWN_EC) +			seb->ec = si->mean_ec; + +	err = paranoid_check_si(ubi, si); +	if (err) { +		if (err > 0) +			err = -EINVAL; +		goto out_vidh; +	} + +	ubi_free_vid_hdr(ubi, vidh); +	kfree(ech); + +	return si; + +out_vidh: +	ubi_free_vid_hdr(ubi, vidh); +out_ech: +	kfree(ech); +out_si: +	ubi_scan_destroy_si(si); +	return ERR_PTR(err); +} + +/** + * destroy_sv - free the scanning volume information + * @sv: scanning volume information + * + * This function destroys the volume RB-tree (@sv->root) and the scanning + * volume information. + */ +static void destroy_sv(struct ubi_scan_volume *sv) +{ +	struct ubi_scan_leb *seb; +	struct rb_node *this = sv->root.rb_node; + +	while (this) { +		if (this->rb_left) +			this = this->rb_left; +		else if (this->rb_right) +			this = this->rb_right; +		else { +			seb = rb_entry(this, struct ubi_scan_leb, u.rb); +			this = rb_parent(this); +			if (this) { +				if (this->rb_left == &seb->u.rb) +					this->rb_left = NULL; +				else +					this->rb_right = NULL; +			} + +			kfree(seb); +		} +	} +	kfree(sv); +} + +/** + * ubi_scan_destroy_si - destroy scanning information. + * @si: scanning information + */ +void ubi_scan_destroy_si(struct ubi_scan_info *si) +{ +	struct ubi_scan_leb *seb, *seb_tmp; +	struct ubi_scan_volume *sv; +	struct rb_node *rb; + +	list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) { +		list_del(&seb->u.list); +		kfree(seb); +	} +	list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) { +		list_del(&seb->u.list); +		kfree(seb); +	} +	list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) { +		list_del(&seb->u.list); +		kfree(seb); +	} +	list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) { +		list_del(&seb->u.list); +		kfree(seb); +	} + +	/* Destroy the volume RB-tree */ +	rb = si->volumes.rb_node; +	while (rb) { +		if (rb->rb_left) +			rb = rb->rb_left; +		else if (rb->rb_right) +			rb = rb->rb_right; +		else { +			sv = rb_entry(rb, struct ubi_scan_volume, rb); + +			rb = rb_parent(rb); +			if (rb) { +				if (rb->rb_left == &sv->rb) +					rb->rb_left = NULL; +				else +					rb->rb_right = NULL; +			} + +			destroy_sv(sv); +		} +	} + +	kfree(si); +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_si - check if the scanning information is correct and + * consistent. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns zero if the scanning information is all right, %1 if + * not and a negative error code if an error occurred. + */ +static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si) +{ +	int pnum, err, vols_found = 0; +	struct rb_node *rb1, *rb2; +	struct ubi_scan_volume *sv; +	struct ubi_scan_leb *seb, *last_seb; +	uint8_t *buf; + +	/* +	 * At first, check that scanning information is OK. +	 */ +	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { +		int leb_count = 0; + +		cond_resched(); + +		vols_found += 1; + +		if (si->is_empty) { +			ubi_err("bad is_empty flag"); +			goto bad_sv; +		} + +		if (sv->vol_id < 0 || sv->highest_lnum < 0 || +		    sv->leb_count < 0 || sv->vol_type < 0 || sv->used_ebs < 0 || +		    sv->data_pad < 0 || sv->last_data_size < 0) { +			ubi_err("negative values"); +			goto bad_sv; +		} + +		if (sv->vol_id >= UBI_MAX_VOLUMES && +		    sv->vol_id < UBI_INTERNAL_VOL_START) { +			ubi_err("bad vol_id"); +			goto bad_sv; +		} + +		if (sv->vol_id > si->highest_vol_id) { +			ubi_err("highest_vol_id is %d, but vol_id %d is there", +				si->highest_vol_id, sv->vol_id); +			goto out; +		} + +		if (sv->vol_type != UBI_DYNAMIC_VOLUME && +		    sv->vol_type != UBI_STATIC_VOLUME) { +			ubi_err("bad vol_type"); +			goto bad_sv; +		} + +		if (sv->data_pad > ubi->leb_size / 2) { +			ubi_err("bad data_pad"); +			goto bad_sv; +		} + +		last_seb = NULL; +		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { +			cond_resched(); + +			last_seb = seb; +			leb_count += 1; + +			if (seb->pnum < 0 || seb->ec < 0) { +				ubi_err("negative values"); +				goto bad_seb; +			} + +			if (seb->ec < si->min_ec) { +				ubi_err("bad si->min_ec (%d), %d found", +					si->min_ec, seb->ec); +				goto bad_seb; +			} + +			if (seb->ec > si->max_ec) { +				ubi_err("bad si->max_ec (%d), %d found", +					si->max_ec, seb->ec); +				goto bad_seb; +			} + +			if (seb->pnum >= ubi->peb_count) { +				ubi_err("too high PEB number %d, total PEBs %d", +					seb->pnum, ubi->peb_count); +				goto bad_seb; +			} + +			if (sv->vol_type == UBI_STATIC_VOLUME) { +				if (seb->lnum >= sv->used_ebs) { +					ubi_err("bad lnum or used_ebs"); +					goto bad_seb; +				} +			} else { +				if (sv->used_ebs != 0) { +					ubi_err("non-zero used_ebs"); +					goto bad_seb; +				} +			} + +			if (seb->lnum > sv->highest_lnum) { +				ubi_err("incorrect highest_lnum or lnum"); +				goto bad_seb; +			} +		} + +		if (sv->leb_count != leb_count) { +			ubi_err("bad leb_count, %d objects in the tree", +				leb_count); +			goto bad_sv; +		} + +		if (!last_seb) +			continue; + +		seb = last_seb; + +		if (seb->lnum != sv->highest_lnum) { +			ubi_err("bad highest_lnum"); +			goto bad_seb; +		} +	} + +	if (vols_found != si->vols_found) { +		ubi_err("bad si->vols_found %d, should be %d", +			si->vols_found, vols_found); +		goto out; +	} + +	/* Check that scanning information is correct */ +	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { +		last_seb = NULL; +		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { +			int vol_type; + +			cond_resched(); + +			last_seb = seb; + +			err = ubi_io_read_vid_hdr(ubi, seb->pnum, vidh, 1); +			if (err && err != UBI_IO_BITFLIPS) { +				ubi_err("VID header is not OK (%d)", err); +				if (err > 0) +					err = -EIO; +				return err; +			} + +			vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? +				   UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; +			if (sv->vol_type != vol_type) { +				ubi_err("bad vol_type"); +				goto bad_vid_hdr; +			} + +			if (seb->sqnum != be64_to_cpu(vidh->sqnum)) { +				ubi_err("bad sqnum %llu", seb->sqnum); +				goto bad_vid_hdr; +			} + +			if (sv->vol_id != be32_to_cpu(vidh->vol_id)) { +				ubi_err("bad vol_id %d", sv->vol_id); +				goto bad_vid_hdr; +			} + +			if (sv->compat != vidh->compat) { +				ubi_err("bad compat %d", vidh->compat); +				goto bad_vid_hdr; +			} + +			if (seb->lnum != be32_to_cpu(vidh->lnum)) { +				ubi_err("bad lnum %d", seb->lnum); +				goto bad_vid_hdr; +			} + +			if (sv->used_ebs != be32_to_cpu(vidh->used_ebs)) { +				ubi_err("bad used_ebs %d", sv->used_ebs); +				goto bad_vid_hdr; +			} + +			if (sv->data_pad != be32_to_cpu(vidh->data_pad)) { +				ubi_err("bad data_pad %d", sv->data_pad); +				goto bad_vid_hdr; +			} + +			if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) { +				ubi_err("bad leb_ver %u", seb->leb_ver); +				goto bad_vid_hdr; +			} +		} + +		if (!last_seb) +			continue; + +		if (sv->highest_lnum != be32_to_cpu(vidh->lnum)) { +			ubi_err("bad highest_lnum %d", sv->highest_lnum); +			goto bad_vid_hdr; +		} + +		if (sv->last_data_size != be32_to_cpu(vidh->data_size)) { +			ubi_err("bad last_data_size %d", sv->last_data_size); +			goto bad_vid_hdr; +		} +	} + +	/* +	 * Make sure that all the physical eraseblocks are in one of the lists +	 * or trees. +	 */ +	buf = kzalloc(ubi->peb_count, GFP_KERNEL); +	if (!buf) +		return -ENOMEM; + +	for (pnum = 0; pnum < ubi->peb_count; pnum++) { +		err = ubi_io_is_bad(ubi, pnum); +		if (err < 0) { +			kfree(buf); +			return err; +		} +		else if (err) +			buf[pnum] = 1; +	} + +	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) +		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) +			buf[seb->pnum] = 1; + +	list_for_each_entry(seb, &si->free, u.list) +		buf[seb->pnum] = 1; + +	list_for_each_entry(seb, &si->corr, u.list) +		buf[seb->pnum] = 1; + +	list_for_each_entry(seb, &si->erase, u.list) +		buf[seb->pnum] = 1; + +	list_for_each_entry(seb, &si->alien, u.list) +		buf[seb->pnum] = 1; + +	err = 0; +	for (pnum = 0; pnum < ubi->peb_count; pnum++) +		if (!buf[pnum]) { +			ubi_err("PEB %d is not referred", pnum); +			err = 1; +		} + +	kfree(buf); +	if (err) +		goto out; +	return 0; + +bad_seb: +	ubi_err("bad scanning information about LEB %d", seb->lnum); +	ubi_dbg_dump_seb(seb, 0); +	ubi_dbg_dump_sv(sv); +	goto out; + +bad_sv: +	ubi_err("bad scanning information about volume %d", sv->vol_id); +	ubi_dbg_dump_sv(sv); +	goto out; + +bad_vid_hdr: +	ubi_err("bad scanning information about volume %d", sv->vol_id); +	ubi_dbg_dump_sv(sv); +	ubi_dbg_dump_vid_hdr(vidh); + +out: +	ubi_dbg_dump_stack(); +	return 1; +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/roms/u-boot/drivers/mtd/ubi/scan.h b/roms/u-boot/drivers/mtd/ubi/scan.h new file mode 100644 index 00000000..252b1f1e --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/scan.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __UBI_SCAN_H__ +#define __UBI_SCAN_H__ + +/* The erase counter value for this physical eraseblock is unknown */ +#define UBI_SCAN_UNKNOWN_EC (-1) + +/** + * struct ubi_scan_leb - scanning information about a physical eraseblock. + * @ec: erase counter (%UBI_SCAN_UNKNOWN_EC if it is unknown) + * @pnum: physical eraseblock number + * @lnum: logical eraseblock number + * @scrub: if this physical eraseblock needs scrubbing + * @sqnum: sequence number + * @u: unions RB-tree or @list links + * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects + * @u.list: link in one of the eraseblock lists + * @leb_ver: logical eraseblock version (obsolete) + * + * One object of this type is allocated for each physical eraseblock during + * scanning. + */ +struct ubi_scan_leb { +	int ec; +	int pnum; +	int lnum; +	int scrub; +	unsigned long long sqnum; +	union { +		struct rb_node rb; +		struct list_head list; +	} u; +	uint32_t leb_ver; +}; + +/** + * struct ubi_scan_volume - scanning information about a volume. + * @vol_id: volume ID + * @highest_lnum: highest logical eraseblock number in this volume + * @leb_count: number of logical eraseblocks in this volume + * @vol_type: volume type + * @used_ebs: number of used logical eraseblocks in this volume (only for + * static volumes) + * @last_data_size: amount of data in the last logical eraseblock of this + * volume (always equivalent to the usable logical eraseblock size in case of + * dynamic volumes) + * @data_pad: how many bytes at the end of logical eraseblocks of this volume + * are not used (due to volume alignment) + * @compat: compatibility flags of this volume + * @rb: link in the volume RB-tree + * @root: root of the RB-tree containing all the eraseblock belonging to this + * volume (&struct ubi_scan_leb objects) + * + * One object of this type is allocated for each volume during scanning. + */ +struct ubi_scan_volume { +	int vol_id; +	int highest_lnum; +	int leb_count; +	int vol_type; +	int used_ebs; +	int last_data_size; +	int data_pad; +	int compat; +	struct rb_node rb; +	struct rb_root root; +}; + +/** + * struct ubi_scan_info - UBI scanning information. + * @volumes: root of the volume RB-tree + * @corr: list of corrupted physical eraseblocks + * @free: list of free physical eraseblocks + * @erase: list of physical eraseblocks which have to be erased + * @alien: list of physical eraseblocks which should not be used by UBI (e.g., + * @bad_peb_count: count of bad physical eraseblocks + * those belonging to "preserve"-compatible internal volumes) + * @vols_found: number of volumes found during scanning + * @highest_vol_id: highest volume ID + * @alien_peb_count: count of physical eraseblocks in the @alien list + * @is_empty: flag indicating whether the MTD device is empty or not + * @min_ec: lowest erase counter value + * @max_ec: highest erase counter value + * @max_sqnum: highest sequence number value + * @mean_ec: mean erase counter value + * @ec_sum: a temporary variable used when calculating @mean_ec + * @ec_count: a temporary variable used when calculating @mean_ec + * + * This data structure contains the result of scanning and may be used by other + * UBI units to build final UBI data structures, further error-recovery and so + * on. + */ +struct ubi_scan_info { +	struct rb_root volumes; +	struct list_head corr; +	struct list_head free; +	struct list_head erase; +	struct list_head alien; +	int bad_peb_count; +	int vols_found; +	int highest_vol_id; +	int alien_peb_count; +	int is_empty; +	int min_ec; +	int max_ec; +	unsigned long long max_sqnum; +	int mean_ec; +	uint64_t ec_sum; +	int ec_count; +}; + +struct ubi_device; +struct ubi_vid_hdr; + +/* + * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a + * list. + * + * @sv: volume scanning information + * @seb: scanning eraseblock infprmation + * @list: the list to move to + */ +static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv, +					 struct ubi_scan_leb *seb, +					 struct list_head *list) +{ +		rb_erase(&seb->u.rb, &sv->root); +		list_add_tail(&seb->u.list, list); +} + +int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, +		      int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, +		      int bitflips); +struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, +					 int vol_id); +struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv, +				       int lnum); +void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv); +struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, +					   struct ubi_scan_info *si); +int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, +		       int pnum, int ec); +struct ubi_scan_info *ubi_scan(struct ubi_device *ubi); +void ubi_scan_destroy_si(struct ubi_scan_info *si); + +#endif /* !__UBI_SCAN_H__ */ diff --git a/roms/u-boot/drivers/mtd/ubi/ubi-media.h b/roms/u-boot/drivers/mtd/ubi/ubi-media.h new file mode 100644 index 00000000..9012326d --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/ubi-media.h @@ -0,0 +1,360 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Authors: Artem Bityutskiy (Битюцкий Артём) + *          Thomas Gleixner + *          Frank Haverkamp + *          Oliver Lohmann + *          Andreas Arnez + */ + +/* + * This file defines the layout of UBI headers and all the other UBI on-flash + * data structures. + */ + +#ifndef __UBI_MEDIA_H__ +#define __UBI_MEDIA_H__ + +#include <asm/byteorder.h> + +/* The version of UBI images supported by this implementation */ +#define UBI_VERSION 1 + +/* The highest erase counter value supported by this implementation */ +#define UBI_MAX_ERASECOUNTER 0x7FFFFFFF + +/* The initial CRC32 value used when calculating CRC checksums */ +#define UBI_CRC32_INIT 0xFFFFFFFFU + +/* Erase counter header magic number (ASCII "UBI#") */ +#define UBI_EC_HDR_MAGIC  0x55424923 +/* Volume identifier header magic number (ASCII "UBI!") */ +#define UBI_VID_HDR_MAGIC 0x55424921 + +/* + * Volume type constants used in the volume identifier header. + * + * @UBI_VID_DYNAMIC: dynamic volume + * @UBI_VID_STATIC: static volume + */ +enum { +	UBI_VID_DYNAMIC = 1, +	UBI_VID_STATIC  = 2 +}; + +/* + * Volume flags used in the volume table record. + * + * @UBI_VTBL_AUTORESIZE_FLG: auto-resize this volume + * + * %UBI_VTBL_AUTORESIZE_FLG flag can be set only for one volume in the volume + * table. UBI automatically re-sizes the volume which has this flag and makes + * the volume to be of largest possible size. This means that if after the + * initialization UBI finds out that there are available physical eraseblocks + * present on the device, it automatically appends all of them to the volume + * (the physical eraseblocks reserved for bad eraseblocks handling and other + * reserved physical eraseblocks are not taken). So, if there is a volume with + * the %UBI_VTBL_AUTORESIZE_FLG flag set, the amount of available logical + * eraseblocks will be zero after UBI is loaded, because all of them will be + * reserved for this volume. Note, the %UBI_VTBL_AUTORESIZE_FLG bit is cleared + * after the volume had been initialized. + * + * The auto-resize feature is useful for device production purposes. For + * example, different NAND flash chips may have different amount of initial bad + * eraseblocks, depending of particular chip instance. Manufacturers of NAND + * chips usually guarantee that the amount of initial bad eraseblocks does not + * exceed certain percent, e.g. 2%. When one creates an UBI image which will be + * flashed to the end devices in production, he does not know the exact amount + * of good physical eraseblocks the NAND chip on the device will have, but this + * number is required to calculate the volume sized and put them to the volume + * table of the UBI image. In this case, one of the volumes (e.g., the one + * which will store the root file system) is marked as "auto-resizable", and + * UBI will adjust its size on the first boot if needed. + * + * Note, first UBI reserves some amount of physical eraseblocks for bad + * eraseblock handling, and then re-sizes the volume, not vice-versa. This + * means that the pool of reserved physical eraseblocks will always be present. + */ +enum { +	UBI_VTBL_AUTORESIZE_FLG = 0x01, +}; + +/* + * Compatibility constants used by internal volumes. + * + * @UBI_COMPAT_DELETE: delete this internal volume before anything is written + * to the flash + * @UBI_COMPAT_RO: attach this device in read-only mode + * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its + * physical eraseblocks, don't allow the wear-leveling unit to move them + * @UBI_COMPAT_REJECT: reject this UBI image + */ +enum { +	UBI_COMPAT_DELETE   = 1, +	UBI_COMPAT_RO       = 2, +	UBI_COMPAT_PRESERVE = 4, +	UBI_COMPAT_REJECT   = 5 +}; + +/* Sizes of UBI headers */ +#define UBI_EC_HDR_SIZE  sizeof(struct ubi_ec_hdr) +#define UBI_VID_HDR_SIZE sizeof(struct ubi_vid_hdr) + +/* Sizes of UBI headers without the ending CRC */ +#define UBI_EC_HDR_SIZE_CRC  (UBI_EC_HDR_SIZE  - sizeof(__be32)) +#define UBI_VID_HDR_SIZE_CRC (UBI_VID_HDR_SIZE - sizeof(__be32)) + +/** + * struct ubi_ec_hdr - UBI erase counter header. + * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) + * @version: version of UBI implementation which is supposed to accept this + * UBI image + * @padding1: reserved for future, zeroes + * @ec: the erase counter + * @vid_hdr_offset: where the VID header starts + * @data_offset: where the user data start + * @padding2: reserved for future, zeroes + * @hdr_crc: erase counter header CRC checksum + * + * The erase counter header takes 64 bytes and has a plenty of unused space for + * future usage. The unused fields are zeroed. The @version field is used to + * indicate the version of UBI implementation which is supposed to be able to + * work with this UBI image. If @version is greater then the current UBI + * version, the image is rejected. This may be useful in future if something + * is changed radically. This field is duplicated in the volume identifier + * header. + * + * The @vid_hdr_offset and @data_offset fields contain the offset of the the + * volume identifier header and user data, relative to the beginning of the + * physical eraseblock. These values have to be the same for all physical + * eraseblocks. + */ +struct ubi_ec_hdr { +	__be32  magic; +	__u8    version; +	__u8    padding1[3]; +	__be64  ec; /* Warning: the current limit is 31-bit anyway! */ +	__be32  vid_hdr_offset; +	__be32  data_offset; +	__u8    padding2[36]; +	__be32  hdr_crc; +} __attribute__ ((packed)); + +/** + * struct ubi_vid_hdr - on-flash UBI volume identifier header. + * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) + * @version: UBI implementation version which is supposed to accept this UBI + * image (%UBI_VERSION) + * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) + * @copy_flag: if this logical eraseblock was copied from another physical + * eraseblock (for wear-leveling reasons) + * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, + * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) + * @vol_id: ID of this volume + * @lnum: logical eraseblock number + * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be + * removed, kept only for not breaking older UBI users) + * @data_size: how many bytes of data this logical eraseblock contains + * @used_ebs: total number of used logical eraseblocks in this volume + * @data_pad: how many bytes at the end of this physical eraseblock are not + * used + * @data_crc: CRC checksum of the data stored in this logical eraseblock + * @padding1: reserved for future, zeroes + * @sqnum: sequence number + * @padding2: reserved for future, zeroes + * @hdr_crc: volume identifier header CRC checksum + * + * The @sqnum is the value of the global sequence counter at the time when this + * VID header was created. The global sequence counter is incremented each time + * UBI writes a new VID header to the flash, i.e. when it maps a logical + * eraseblock to a new physical eraseblock. The global sequence counter is an + * unsigned 64-bit integer and we assume it never overflows. The @sqnum + * (sequence number) is used to distinguish between older and newer versions of + * logical eraseblocks. + * + * There are 2 situations when there may be more then one physical eraseblock + * corresponding to the same logical eraseblock, i.e., having the same @vol_id + * and @lnum values in the volume identifier header. Suppose we have a logical + * eraseblock L and it is mapped to the physical eraseblock P. + * + * 1. Because UBI may erase physical eraseblocks asynchronously, the following + * situation is possible: L is asynchronously erased, so P is scheduled for + * erasure, then L is written to,i.e. mapped to another physical eraseblock P1, + * so P1 is written to, then an unclean reboot happens. Result - there are 2 + * physical eraseblocks P and P1 corresponding to the same logical eraseblock + * L. But P1 has greater sequence number, so UBI picks P1 when it attaches the + * flash. + * + * 2. From time to time UBI moves logical eraseblocks to other physical + * eraseblocks for wear-leveling reasons. If, for example, UBI moves L from P + * to P1, and an unclean reboot happens before P is physically erased, there + * are two physical eraseblocks P and P1 corresponding to L and UBI has to + * select one of them when the flash is attached. The @sqnum field says which + * PEB is the original (obviously P will have lower @sqnum) and the copy. But + * it is not enough to select the physical eraseblock with the higher sequence + * number, because the unclean reboot could have happen in the middle of the + * copying process, so the data in P is corrupted. It is also not enough to + * just select the physical eraseblock with lower sequence number, because the + * data there may be old (consider a case if more data was added to P1 after + * the copying). Moreover, the unclean reboot may happen when the erasure of P + * was just started, so it result in unstable P, which is "mostly" OK, but + * still has unstable bits. + * + * UBI uses the @copy_flag field to indicate that this logical eraseblock is a + * copy. UBI also calculates data CRC when the data is moved and stores it at + * the @data_crc field of the copy (P1). So when UBI needs to pick one physical + * eraseblock of two (P or P1), the @copy_flag of the newer one (P1) is + * examined. If it is cleared, the situation* is simple and the newer one is + * picked. If it is set, the data CRC of the copy (P1) is examined. If the CRC + * checksum is correct, this physical eraseblock is selected (P1). Otherwise + * the older one (P) is selected. + * + * Note, there is an obsolete @leb_ver field which was used instead of @sqnum + * in the past. But it is not used anymore and we keep it in order to be able + * to deal with old UBI images. It will be removed at some point. + * + * There are 2 sorts of volumes in UBI: user volumes and internal volumes. + * Internal volumes are not seen from outside and are used for various internal + * UBI purposes. In this implementation there is only one internal volume - the + * layout volume. Internal volumes are the main mechanism of UBI extensions. + * For example, in future one may introduce a journal internal volume. Internal + * volumes have their own reserved range of IDs. + * + * The @compat field is only used for internal volumes and contains the "degree + * of their compatibility". It is always zero for user volumes. This field + * provides a mechanism to introduce UBI extensions and to be still compatible + * with older UBI binaries. For example, if someone introduced a journal in + * future, he would probably use %UBI_COMPAT_DELETE compatibility for the + * journal volume.  And in this case, older UBI binaries, which know nothing + * about the journal volume, would just delete this volume and work perfectly + * fine. This is similar to what Ext2fs does when it is fed by an Ext3fs image + * - it just ignores the Ext3fs journal. + * + * The @data_crc field contains the CRC checksum of the contents of the logical + * eraseblock if this is a static volume. In case of dynamic volumes, it does + * not contain the CRC checksum as a rule. The only exception is when the + * data of the physical eraseblock was moved by the wear-leveling unit, then + * the wear-leveling unit calculates the data CRC and stores it in the + * @data_crc field. And of course, the @copy_flag is %in this case. + * + * The @data_size field is used only for static volumes because UBI has to know + * how many bytes of data are stored in this eraseblock. For dynamic volumes, + * this field usually contains zero. The only exception is when the data of the + * physical eraseblock was moved to another physical eraseblock for + * wear-leveling reasons. In this case, UBI calculates CRC checksum of the + * contents and uses both @data_crc and @data_size fields. In this case, the + * @data_size field contains data size. + * + * The @used_ebs field is used only for static volumes and indicates how many + * eraseblocks the data of the volume takes. For dynamic volumes this field is + * not used and always contains zero. + * + * The @data_pad is calculated when volumes are created using the alignment + * parameter. So, effectively, the @data_pad field reduces the size of logical + * eraseblocks of this volume. This is very handy when one uses block-oriented + * software (say, cramfs) on top of the UBI volume. + */ +struct ubi_vid_hdr { +	__be32  magic; +	__u8    version; +	__u8    vol_type; +	__u8    copy_flag; +	__u8    compat; +	__be32  vol_id; +	__be32  lnum; +	__be32  leb_ver; /* obsolete, to be removed, don't use */ +	__be32  data_size; +	__be32  used_ebs; +	__be32  data_pad; +	__be32  data_crc; +	__u8    padding1[4]; +	__be64  sqnum; +	__u8    padding2[12]; +	__be32  hdr_crc; +} __attribute__ ((packed)); + +/* Internal UBI volumes count */ +#define UBI_INT_VOL_COUNT 1 + +/* + * Starting ID of internal volumes. There is reserved room for 4096 internal + * volumes. + */ +#define UBI_INTERNAL_VOL_START (0x7FFFFFFF - 4096) + +/* The layout volume contains the volume table */ + +#define UBI_LAYOUT_VOLUME_ID     UBI_INTERNAL_VOL_START +#define UBI_LAYOUT_VOLUME_TYPE   UBI_VID_DYNAMIC +#define UBI_LAYOUT_VOLUME_ALIGN  1 +#define UBI_LAYOUT_VOLUME_EBS    2 +#define UBI_LAYOUT_VOLUME_NAME   "layout volume" +#define UBI_LAYOUT_VOLUME_COMPAT UBI_COMPAT_REJECT + +/* The maximum number of volumes per one UBI device */ +#define UBI_MAX_VOLUMES 128 + +/* The maximum volume name length */ +#define UBI_VOL_NAME_MAX 127 + +/* Size of the volume table record */ +#define UBI_VTBL_RECORD_SIZE sizeof(struct ubi_vtbl_record) + +/* Size of the volume table record without the ending CRC */ +#define UBI_VTBL_RECORD_SIZE_CRC (UBI_VTBL_RECORD_SIZE - sizeof(__be32)) + +/** + * struct ubi_vtbl_record - a record in the volume table. + * @reserved_pebs: how many physical eraseblocks are reserved for this volume + * @alignment: volume alignment + * @data_pad: how many bytes are unused at the end of the each physical + * eraseblock to satisfy the requested alignment + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @upd_marker: if volume update was started but not finished + * @name_len: volume name length + * @name: the volume name + * @flags: volume flags (%UBI_VTBL_AUTORESIZE_FLG) + * @padding: reserved, zeroes + * @crc: a CRC32 checksum of the record + * + * The volume table records are stored in the volume table, which is stored in + * the layout volume. The layout volume consists of 2 logical eraseblock, each + * of which contains a copy of the volume table (i.e., the volume table is + * duplicated). The volume table is an array of &struct ubi_vtbl_record + * objects indexed by the volume ID. + * + * If the size of the logical eraseblock is large enough to fit + * %UBI_MAX_VOLUMES records, the volume table contains %UBI_MAX_VOLUMES + * records. Otherwise, it contains as many records as it can fit (i.e., size of + * logical eraseblock divided by sizeof(struct ubi_vtbl_record)). + * + * The @upd_marker flag is used to implement volume update. It is set to %1 + * before update and set to %0 after the update. So if the update operation was + * interrupted, UBI knows that the volume is corrupted. + * + * The @alignment field is specified when the volume is created and cannot be + * later changed. It may be useful, for example, when a block-oriented file + * system works on top of UBI. The @data_pad field is calculated using the + * logical eraseblock size and @alignment. The alignment must be multiple to the + * minimal flash I/O unit. If @alignment is 1, all the available space of + * the physical eraseblocks is used. + * + * Empty records contain all zeroes and the CRC checksum of those zeroes. + */ +struct ubi_vtbl_record { +	__be32  reserved_pebs; +	__be32  alignment; +	__be32  data_pad; +	__u8    vol_type; +	__u8    upd_marker; +	__be16  name_len; +	__u8    name[UBI_VOL_NAME_MAX+1]; +	__u8    flags; +	__u8    padding[23]; +	__be32  crc; +} __attribute__ ((packed)); + +#endif /* !__UBI_MEDIA_H__ */ diff --git a/roms/u-boot/drivers/mtd/ubi/ubi.h b/roms/u-boot/drivers/mtd/ubi/ubi.h new file mode 100644 index 00000000..f4f71655 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/ubi.h @@ -0,0 +1,638 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006, 2007 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __UBI_UBI_H__ +#define __UBI_UBI_H__ + +#ifdef UBI_LINUX +#include <linux/init.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/mutex.h> +#include <linux/rwsem.h> +#include <linux/spinlock.h> +#include <linux/fs.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/string.h> +#include <linux/vmalloc.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/ubi.h> +#endif + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/rbtree.h> +#include <linux/string.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/ubi.h> + +#include "ubi-media.h" +#include "scan.h" +#include "debug.h" + +/* Maximum number of supported UBI devices */ +#define UBI_MAX_DEVICES 32 + +/* UBI name used for character devices, sysfs, etc */ +#define UBI_NAME_STR "ubi" + +/* Normal UBI messages */ +#ifdef CONFIG_UBI_SILENCE_MSG +#define ubi_msg(fmt, ...) +#else +#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__) +#endif +/* UBI warning messages */ +#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \ +				  __func__, ##__VA_ARGS__) +/* UBI error messages */ +#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \ +				 __func__, ##__VA_ARGS__) + +/* Lowest number PEBs reserved for bad PEB handling */ +#define MIN_RESEVED_PEBS 2 + +/* Background thread name pattern */ +#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" + +/* This marker in the EBA table means that the LEB is um-mapped */ +#define UBI_LEB_UNMAPPED -1 + +/* + * In case of errors, UBI tries to repeat the operation several times before + * returning error. The below constant defines how many times UBI re-tries. + */ +#define UBI_IO_RETRIES 3 + +/* + * Error codes returned by the I/O unit. + * + * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only + * 0xFF bytes + * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a + * valid erase counter header, and the rest are %0xFF bytes + * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) + * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or + * CRC) + * UBI_IO_BITFLIPS: bit-flips were detected and corrected + */ +enum { +	UBI_IO_PEB_EMPTY = 1, +	UBI_IO_PEB_FREE, +	UBI_IO_BAD_EC_HDR, +	UBI_IO_BAD_VID_HDR, +	UBI_IO_BITFLIPS +}; + +/** + * struct ubi_wl_entry - wear-leveling entry. + * @rb: link in the corresponding RB-tree + * @ec: erase counter + * @pnum: physical eraseblock number + * + * This data structure is used in the WL unit. Each physical eraseblock has a + * corresponding &struct wl_entry object which may be kept in different + * RB-trees. See WL unit for details. + */ +struct ubi_wl_entry { +	struct rb_node rb; +	int ec; +	int pnum; +}; + +/** + * struct ubi_ltree_entry - an entry in the lock tree. + * @rb: links RB-tree nodes + * @vol_id: volume ID of the locked logical eraseblock + * @lnum: locked logical eraseblock number + * @users: how many tasks are using this logical eraseblock or wait for it + * @mutex: read/write mutex to implement read/write access serialization to + *         the (@vol_id, @lnum) logical eraseblock + * + * This data structure is used in the EBA unit to implement per-LEB locking. + * When a logical eraseblock is being locked - corresponding + * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). + * See EBA unit for details. + */ +struct ubi_ltree_entry { +	struct rb_node rb; +	int vol_id; +	int lnum; +	int users; +	struct rw_semaphore mutex; +}; + +struct ubi_volume_desc; + +/** + * struct ubi_volume - UBI volume description data structure. + * @dev: device object to make use of the the Linux device model + * @cdev: character device object to create character device + * @ubi: reference to the UBI device description object + * @vol_id: volume ID + * @ref_count: volume reference count + * @readers: number of users holding this volume in read-only mode + * @writers: number of users holding this volume in read-write mode + * @exclusive: whether somebody holds this volume in exclusive mode + * + * @reserved_pebs: how many physical eraseblocks are reserved for this volume + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @usable_leb_size: logical eraseblock size without padding + * @used_ebs: how many logical eraseblocks in this volume contain data + * @last_eb_bytes: how many bytes are stored in the last logical eraseblock + * @used_bytes: how many bytes of data this volume contains + * @alignment: volume alignment + * @data_pad: how many bytes are not used at the end of physical eraseblocks to + *            satisfy the requested alignment + * @name_len: volume name length + * @name: volume name + * + * @upd_ebs: how many eraseblocks are expected to be updated + * @ch_lnum: LEB number which is being changing by the atomic LEB change + *           operation + * @ch_dtype: data persistency type which is being changing by the atomic LEB + *            change operation + * @upd_bytes: how many bytes are expected to be received for volume update or + *             atomic LEB change + * @upd_received: how many bytes were already received for volume update or + *                atomic LEB change + * @upd_buf: update buffer which is used to collect update data or data for + *           atomic LEB change + * + * @eba_tbl: EBA table of this volume (LEB->PEB mapping) + * @checked: %1 if this static volume was checked + * @corrupted: %1 if the volume is corrupted (static volumes only) + * @upd_marker: %1 if the update marker is set for this volume + * @updating: %1 if the volume is being updated + * @changing_leb: %1 if the atomic LEB change ioctl command is in progress + * + * @gluebi_desc: gluebi UBI volume descriptor + * @gluebi_refcount: reference count of the gluebi MTD device + * @gluebi_mtd: MTD device description object of the gluebi MTD device + * + * The @corrupted field indicates that the volume's contents is corrupted. + * Since UBI protects only static volumes, this field is not relevant to + * dynamic volumes - it is user's responsibility to assure their data + * integrity. + * + * The @upd_marker flag indicates that this volume is either being updated at + * the moment or is damaged because of an unclean reboot. + */ +struct ubi_volume { +	struct device dev; +	struct cdev cdev; +	struct ubi_device *ubi; +	int vol_id; +	int ref_count; +	int readers; +	int writers; +	int exclusive; + +	int reserved_pebs; +	int vol_type; +	int usable_leb_size; +	int used_ebs; +	int last_eb_bytes; +	long long used_bytes; +	int alignment; +	int data_pad; +	int name_len; +	char name[UBI_VOL_NAME_MAX+1]; + +	int upd_ebs; +	int ch_lnum; +	int ch_dtype; +	long long upd_bytes; +	long long upd_received; +	void *upd_buf; + +	int *eba_tbl; +	unsigned int checked:1; +	unsigned int corrupted:1; +	unsigned int upd_marker:1; +	unsigned int updating:1; +	unsigned int changing_leb:1; + +#ifdef CONFIG_MTD_UBI_GLUEBI +	/* +	 * Gluebi-related stuff may be compiled out. +	 * TODO: this should not be built into UBI but should be a separate +	 * ubimtd driver which works on top of UBI and emulates MTD devices. +	 */ +	struct ubi_volume_desc *gluebi_desc; +	int gluebi_refcount; +	struct mtd_info gluebi_mtd; +#endif +}; + +/** + * struct ubi_volume_desc - descriptor of the UBI volume returned when it is + * opened. + * @vol: reference to the corresponding volume description object + * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) + */ +struct ubi_volume_desc { +	struct ubi_volume *vol; +	int mode; +}; + +struct ubi_wl_entry; + +/** + * struct ubi_device - UBI device description structure + * @dev: UBI device object to use the the Linux device model + * @cdev: character device object to create character device + * @ubi_num: UBI device number + * @ubi_name: UBI device name + * @vol_count: number of volumes in this UBI device + * @volumes: volumes of this UBI device + * @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs, + *                @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, + *                @vol->readers, @vol->writers, @vol->exclusive, + *                @vol->ref_count, @vol->mapping and @vol->eba_tbl. + * @ref_count: count of references on the UBI device + * + * @rsvd_pebs: count of reserved physical eraseblocks + * @avail_pebs: count of available physical eraseblocks + * @beb_rsvd_pebs: how many physical eraseblocks are reserved for bad PEB + *                 handling + * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling + * + * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end + *                     of UBI ititializetion + * @vtbl_slots: how many slots are available in the volume table + * @vtbl_size: size of the volume table in bytes + * @vtbl: in-RAM volume table copy + * @volumes_mutex: protects on-flash volume table and serializes volume + *                 changes, like creation, deletion, update, resize + * + * @max_ec: current highest erase counter value + * @mean_ec: current mean erase counter value + * + * @global_sqnum: global sequence number + * @ltree_lock: protects the lock tree and @global_sqnum + * @ltree: the lock tree + * @alc_mutex: serializes "atomic LEB change" operations + * + * @used: RB-tree of used physical eraseblocks + * @free: RB-tree of free physical eraseblocks + * @scrub: RB-tree of physical eraseblocks which need scrubbing + * @prot: protection trees + * @prot.pnum: protection tree indexed by physical eraseblock numbers + * @prot.aec: protection tree indexed by absolute erase counter value + * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, + *           @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works + *           fields + * @move_mutex: serializes eraseblock moves + * @wl_scheduled: non-zero if the wear-leveling was scheduled + * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any + *             physical eraseblock + * @abs_ec: absolute erase counter + * @move_from: physical eraseblock from where the data is being moved + * @move_to: physical eraseblock where the data is being moved to + * @move_to_put: if the "to" PEB was put + * @works: list of pending works + * @works_count: count of pending works + * @bgt_thread: background thread description object + * @thread_enabled: if the background thread is enabled + * @bgt_name: background thread name + * + * @flash_size: underlying MTD device size (in bytes) + * @peb_count: count of physical eraseblocks on the MTD device + * @peb_size: physical eraseblock size + * @bad_peb_count: count of bad physical eraseblocks + * @good_peb_count: count of good physical eraseblocks + * @min_io_size: minimal input/output unit size of the underlying MTD device + * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers + * @ro_mode: if the UBI device is in read-only mode + * @leb_size: logical eraseblock size + * @leb_start: starting offset of logical eraseblocks within physical + * eraseblocks + * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size + * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size + * @vid_hdr_offset: starting offset of the volume identifier header (might be + * unaligned) + * @vid_hdr_aloffset: starting offset of the VID header aligned to + * @hdrs_min_io_size + * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset + * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or + *               not + * @mtd: MTD device descriptor + * + * @peb_buf1: a buffer of PEB size used for different purposes + * @peb_buf2: another buffer of PEB size used for different purposes + * @buf_mutex: proptects @peb_buf1 and @peb_buf2 + * @dbg_peb_buf: buffer of PEB size used for debugging + * @dbg_buf_mutex: proptects @dbg_peb_buf + */ +struct ubi_device { +	struct cdev cdev; +	struct device dev; +	int ubi_num; +	char ubi_name[sizeof(UBI_NAME_STR)+5]; +	int vol_count; +	struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT]; +	spinlock_t volumes_lock; +	int ref_count; + +	int rsvd_pebs; +	int avail_pebs; +	int beb_rsvd_pebs; +	int beb_rsvd_level; + +	int autoresize_vol_id; +	int vtbl_slots; +	int vtbl_size; +	struct ubi_vtbl_record *vtbl; +	struct mutex volumes_mutex; + +	int max_ec; +	/* TODO: mean_ec is not updated run-time, fix */ +	int mean_ec; + +	/* EBA unit's stuff */ +	unsigned long long global_sqnum; +	spinlock_t ltree_lock; +	struct rb_root ltree; +	struct mutex alc_mutex; + +	/* Wear-leveling unit's stuff */ +	struct rb_root used; +	struct rb_root free; +	struct rb_root scrub; +	struct { +		struct rb_root pnum; +		struct rb_root aec; +	} prot; +	spinlock_t wl_lock; +	struct mutex move_mutex; +	struct rw_semaphore work_sem; +	int wl_scheduled; +	struct ubi_wl_entry **lookuptbl; +	unsigned long long abs_ec; +	struct ubi_wl_entry *move_from; +	struct ubi_wl_entry *move_to; +	int move_to_put; +	struct list_head works; +	int works_count; +	struct task_struct *bgt_thread; +	int thread_enabled; +	char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; + +	/* I/O unit's stuff */ +	long long flash_size; +	int peb_count; +	int peb_size; +	int bad_peb_count; +	int good_peb_count; +	int min_io_size; +	int hdrs_min_io_size; +	int ro_mode; +	int leb_size; +	int leb_start; +	int ec_hdr_alsize; +	int vid_hdr_alsize; +	int vid_hdr_offset; +	int vid_hdr_aloffset; +	int vid_hdr_shift; +	int bad_allowed; +	struct mtd_info *mtd; + +	void *peb_buf1; +	void *peb_buf2; +	struct mutex buf_mutex; +	struct mutex ckvol_mutex; +#ifdef CONFIG_MTD_UBI_DEBUG +	void *dbg_peb_buf; +	struct mutex dbg_buf_mutex; +#endif +}; + +extern struct kmem_cache *ubi_wl_entry_slab; +extern struct file_operations ubi_ctrl_cdev_operations; +extern struct file_operations ubi_cdev_operations; +extern struct file_operations ubi_vol_cdev_operations; +extern struct class *ubi_class; +extern struct mutex ubi_devices_mutex; + +/* vtbl.c */ +int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, +			   struct ubi_vtbl_record *vtbl_rec); +int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); + +/* vmt.c */ +int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); +int ubi_remove_volume(struct ubi_volume_desc *desc); +int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); +int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); +void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); + +/* upd.c */ +int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, +		     long long bytes); +int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, +			 const void __user *buf, int count); +int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, +			 const struct ubi_leb_change_req *req); +int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, +			     const void __user *buf, int count); + +/* misc.c */ +int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); +int ubi_check_volume(struct ubi_device *ubi, int vol_id); +void ubi_calculate_reserved(struct ubi_device *ubi); + +/* gluebi.c */ +#ifdef CONFIG_MTD_UBI_GLUEBI +int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol); +int ubi_destroy_gluebi(struct ubi_volume *vol); +void ubi_gluebi_updated(struct ubi_volume *vol); +#else +#define ubi_create_gluebi(ubi, vol) 0 + +static inline int ubi_destroy_gluebi(struct ubi_volume *vol) +{ +	return 0; +} + +#define ubi_gluebi_updated(vol) +#endif + +/* eba.c */ +int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, +		      int lnum); +int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, +		     void *buf, int offset, int len, int check); +int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, +		      const void *buf, int offset, int len, int dtype); +int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, +			 int lnum, const void *buf, int len, int dtype, +			 int used_ebs); +int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, +			      int lnum, const void *buf, int len, int dtype); +int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, +		     struct ubi_vid_hdr *vid_hdr); +int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); +void ubi_eba_close(const struct ubi_device *ubi); + +/* wl.c */ +int ubi_wl_get_peb(struct ubi_device *ubi, int dtype); +int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture); +int ubi_wl_flush(struct ubi_device *ubi); +int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum); +int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); +void ubi_wl_close(struct ubi_device *ubi); +int ubi_thread(void *u); + +/* io.c */ +int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, +		int len); +int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, +		 int len); +int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture); +int ubi_io_is_bad(const struct ubi_device *ubi, int pnum); +int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum); +int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, +		       struct ubi_ec_hdr *ec_hdr, int verbose); +int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum, +			struct ubi_ec_hdr *ec_hdr); +int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, +			struct ubi_vid_hdr *vid_hdr, int verbose); +int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, +			 struct ubi_vid_hdr *vid_hdr); + +/* build.c */ +int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset); +int ubi_detach_mtd_dev(int ubi_num, int anyway); +struct ubi_device *ubi_get_device(int ubi_num); +void ubi_put_device(struct ubi_device *ubi); +struct ubi_device *ubi_get_by_major(int major); +int ubi_major2num(int major); + +/* + * ubi_rb_for_each_entry - walk an RB-tree. + * @rb: a pointer to type 'struct rb_node' to to use as a loop counter + * @pos: a pointer to RB-tree entry type to use as a loop counter + * @root: RB-tree's root + * @member: the name of the 'struct rb_node' within the RB-tree entry + */ +#define ubi_rb_for_each_entry(rb, pos, root, member)                         \ +	for (rb = rb_first(root),                                            \ +	     pos = (rb ? container_of(rb, typeof(*pos), member) : NULL);     \ +	     rb;                                                             \ +	     rb = rb_next(rb), pos = container_of(rb, typeof(*pos), member)) + +/** + * ubi_zalloc_vid_hdr - allocate a volume identifier header object. + * @ubi: UBI device description object + * @gfp_flags: GFP flags to allocate with + * + * This function returns a pointer to the newly allocated and zero-filled + * volume identifier header object in case of success and %NULL in case of + * failure. + */ +static inline struct ubi_vid_hdr * +ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags) +{ +	void *vid_hdr; + +	vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags); +	if (!vid_hdr) +		return NULL; + +	/* +	 * VID headers may be stored at un-aligned flash offsets, so we shift +	 * the pointer. +	 */ +	return vid_hdr + ubi->vid_hdr_shift; +} + +/** + * ubi_free_vid_hdr - free a volume identifier header object. + * @ubi: UBI device description object + * @vid_hdr: the object to free + */ +static inline void ubi_free_vid_hdr(const struct ubi_device *ubi, +				    struct ubi_vid_hdr *vid_hdr) +{ +	void *p = vid_hdr; + +	if (!p) +		return; + +	kfree(p - ubi->vid_hdr_shift); +} + +/* + * This function is equivalent to 'ubi_io_read()', but @offset is relative to + * the beginning of the logical eraseblock, not to the beginning of the + * physical eraseblock. + */ +static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf, +				   int pnum, int offset, int len) +{ +	ubi_assert(offset >= 0); +	return ubi_io_read(ubi, buf, pnum, offset + ubi->leb_start, len); +} + +/* + * This function is equivalent to 'ubi_io_write()', but @offset is relative to + * the beginning of the logical eraseblock, not to the beginning of the + * physical eraseblock. + */ +static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf, +				    int pnum, int offset, int len) +{ +	ubi_assert(offset >= 0); +	return ubi_io_write(ubi, buf, pnum, offset + ubi->leb_start, len); +} + +/** + * ubi_ro_mode - switch to read-only mode. + * @ubi: UBI device description object + */ +static inline void ubi_ro_mode(struct ubi_device *ubi) +{ +	if (!ubi->ro_mode) { +		ubi->ro_mode = 1; +		ubi_warn("switch to read-only mode"); +	} +} + +/** + * vol_id2idx - get table index by volume ID. + * @ubi: UBI device description object + * @vol_id: volume ID + */ +static inline int vol_id2idx(const struct ubi_device *ubi, int vol_id) +{ +	if (vol_id >= UBI_INTERNAL_VOL_START) +		return vol_id - UBI_INTERNAL_VOL_START + ubi->vtbl_slots; +	else +		return vol_id; +} + +/** + * idx2vol_id - get volume ID by table index. + * @ubi: UBI device description object + * @idx: table index + */ +static inline int idx2vol_id(const struct ubi_device *ubi, int idx) +{ +	if (idx >= ubi->vtbl_slots) +		return idx - ubi->vtbl_slots + UBI_INTERNAL_VOL_START; +	else +		return idx; +} + +#endif /* !__UBI_UBI_H__ */ diff --git a/roms/u-boot/drivers/mtd/ubi/upd.c b/roms/u-boot/drivers/mtd/ubi/upd.c new file mode 100644 index 00000000..e597f82b --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/upd.c @@ -0,0 +1,429 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + * + * Jan 2007: Alexander Schmidt, hacked per-volume update. + */ + +/* + * This file contains implementation of the volume update and atomic LEB change + * functionality. + * + * The update operation is based on the per-volume update marker which is + * stored in the volume table. The update marker is set before the update + * starts, and removed after the update has been finished. So if the update was + * interrupted by an unclean re-boot or due to some other reasons, the update + * marker stays on the flash media and UBI finds it when it attaches the MTD + * device next time. If the update marker is set for a volume, the volume is + * treated as damaged and most I/O operations are prohibited. Only a new update + * operation is allowed. + * + * Note, in general it is possible to implement the update operation as a + * transaction with a roll-back capability. + */ + +#ifdef UBI_LINUX +#include <linux/err.h> +#include <asm/uaccess.h> +#include <asm/div64.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +/** + * set_update_marker - set update marker. + * @ubi: UBI device description object + * @vol: volume description object + * + * This function sets the update marker flag for volume @vol. Returns zero + * in case of success and a negative error code in case of failure. + */ +static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) +{ +	int err; +	struct ubi_vtbl_record vtbl_rec; + +	dbg_msg("set update marker for volume %d", vol->vol_id); + +	if (vol->upd_marker) { +		ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); +		dbg_msg("already set"); +		return 0; +	} + +	memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], +	       sizeof(struct ubi_vtbl_record)); +	vtbl_rec.upd_marker = 1; + +	mutex_lock(&ubi->volumes_mutex); +	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); +	mutex_unlock(&ubi->volumes_mutex); +	vol->upd_marker = 1; +	return err; +} + +/** + * clear_update_marker - clear update marker. + * @ubi: UBI device description object + * @vol: volume description object + * @bytes: new data size in bytes + * + * This function clears the update marker for volume @vol, sets new volume + * data size and clears the "corrupted" flag (static volumes only). Returns + * zero in case of success and a negative error code in case of failure. + */ +static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, +			       long long bytes) +{ +	int err; +	uint64_t tmp; +	struct ubi_vtbl_record vtbl_rec; + +	dbg_msg("clear update marker for volume %d", vol->vol_id); + +	memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], +	       sizeof(struct ubi_vtbl_record)); +	ubi_assert(vol->upd_marker && vtbl_rec.upd_marker); +	vtbl_rec.upd_marker = 0; + +	if (vol->vol_type == UBI_STATIC_VOLUME) { +		vol->corrupted = 0; +		vol->used_bytes = tmp = bytes; +		vol->last_eb_bytes = do_div(tmp, vol->usable_leb_size); +		vol->used_ebs = tmp; +		if (vol->last_eb_bytes) +			vol->used_ebs += 1; +		else +			vol->last_eb_bytes = vol->usable_leb_size; +	} + +	mutex_lock(&ubi->volumes_mutex); +	err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); +	mutex_unlock(&ubi->volumes_mutex); +	vol->upd_marker = 0; +	return err; +} + +/** + * ubi_start_update - start volume update. + * @ubi: UBI device description object + * @vol: volume description object + * @bytes: update bytes + * + * This function starts volume update operation. If @bytes is zero, the volume + * is just wiped out. Returns zero in case of success and a negative error code + * in case of failure. + */ +int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, +		     long long bytes) +{ +	int i, err; +	uint64_t tmp; + +	dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes); +	ubi_assert(!vol->updating && !vol->changing_leb); +	vol->updating = 1; + +	err = set_update_marker(ubi, vol); +	if (err) +		return err; + +	/* Before updating - wipe out the volume */ +	for (i = 0; i < vol->reserved_pebs; i++) { +		err = ubi_eba_unmap_leb(ubi, vol, i); +		if (err) +			return err; +	} + +	if (bytes == 0) { +		err = clear_update_marker(ubi, vol, 0); +		if (err) +			return err; +		err = ubi_wl_flush(ubi); +		if (!err) +			vol->updating = 0; +	} + +	vol->upd_buf = vmalloc(ubi->leb_size); +	if (!vol->upd_buf) +		return -ENOMEM; + +	tmp = bytes; +	vol->upd_ebs = !!do_div(tmp, vol->usable_leb_size); +	vol->upd_ebs += tmp; +	vol->upd_bytes = bytes; +	vol->upd_received = 0; +	return 0; +} + +/** + * ubi_start_leb_change - start atomic LEB change. + * @ubi: UBI device description object + * @vol: volume description object + * @req: operation request + * + * This function starts atomic LEB change operation. Returns zero in case of + * success and a negative error code in case of failure. + */ +int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, +			 const struct ubi_leb_change_req *req) +{ +	ubi_assert(!vol->updating && !vol->changing_leb); + +	dbg_msg("start changing LEB %d:%d, %u bytes", +		vol->vol_id, req->lnum, req->bytes); +	if (req->bytes == 0) +		return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, +						 req->dtype); + +	vol->upd_bytes = req->bytes; +	vol->upd_received = 0; +	vol->changing_leb = 1; +	vol->ch_lnum = req->lnum; +	vol->ch_dtype = req->dtype; + +	vol->upd_buf = vmalloc(req->bytes); +	if (!vol->upd_buf) +		return -ENOMEM; + +	return 0; +} + +/** + * write_leb - write update data. + * @ubi: UBI device description object + * @vol: volume description object + * @lnum: logical eraseblock number + * @buf: data to write + * @len: data size + * @used_ebs: how many logical eraseblocks will this volume contain (static + * volumes only) + * + * This function writes update data to corresponding logical eraseblock. In + * case of dynamic volume, this function checks if the data contains 0xFF bytes + * at the end. If yes, the 0xFF bytes are cut and not written. So if the whole + * buffer contains only 0xFF bytes, the LEB is left unmapped. + * + * The reason why we skip the trailing 0xFF bytes in case of dynamic volume is + * that we want to make sure that more data may be appended to the logical + * eraseblock in future. Indeed, writing 0xFF bytes may have side effects and + * this PEB won't be writable anymore. So if one writes the file-system image + * to the UBI volume where 0xFFs mean free space - UBI makes sure this free + * space is writable after the update. + * + * We do not do this for static volumes because they are read-only. But this + * also cannot be done because we have to store per-LEB CRC and the correct + * data length. + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, +		     void *buf, int len, int used_ebs) +{ +	int err; + +	if (vol->vol_type == UBI_DYNAMIC_VOLUME) { +		int l = ALIGN(len, ubi->min_io_size); + +		memset(buf + len, 0xFF, l - len); +		len = ubi_calc_data_len(ubi, buf, l); +		if (len == 0) { +			dbg_msg("all %d bytes contain 0xFF - skip", len); +			return 0; +		} + +		err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); +	} else { +		/* +		 * When writing static volume, and this is the last logical +		 * eraseblock, the length (@len) does not have to be aligned to +		 * the minimal flash I/O unit. The 'ubi_eba_write_leb_st()' +		 * function accepts exact (unaligned) length and stores it in +		 * the VID header. And it takes care of proper alignment by +		 * padding the buffer. Here we just make sure the padding will +		 * contain zeros, not random trash. +		 */ +		memset(buf + len, 0, vol->usable_leb_size - len); +		err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len, +					   UBI_UNKNOWN, used_ebs); +	} + +	return err; +} + +/** + * ubi_more_update_data - write more update data. + * @vol: volume description object + * @buf: write data (user-space memory buffer) + * @count: how much bytes to write + * + * This function writes more data to the volume which is being updated. It may + * be called arbitrary number of times until all the update data arriveis. This + * function returns %0 in case of success, number of bytes written during the + * last call if the whole volume update has been successfully finished, and a + * negative error code in case of failure. + */ +int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, +			 const void __user *buf, int count) +{ +	uint64_t tmp; +	int lnum, offs, err = 0, len, to_write = count; + +	dbg_msg("write %d of %lld bytes, %lld already passed", +		count, vol->upd_bytes, vol->upd_received); + +	if (ubi->ro_mode) +		return -EROFS; + +	tmp = vol->upd_received; +	offs = do_div(tmp, vol->usable_leb_size); +	lnum = tmp; + +	if (vol->upd_received + count > vol->upd_bytes) +		to_write = count = vol->upd_bytes - vol->upd_received; + +	/* +	 * When updating volumes, we accumulate whole logical eraseblock of +	 * data and write it at once. +	 */ +	if (offs != 0) { +		/* +		 * This is a write to the middle of the logical eraseblock. We +		 * copy the data to our update buffer and wait for more data or +		 * flush it if the whole eraseblock is written or the update +		 * is finished. +		 */ + +		len = vol->usable_leb_size - offs; +		if (len > count) +			len = count; + +		err = copy_from_user(vol->upd_buf + offs, buf, len); +		if (err) +			return -EFAULT; + +		if (offs + len == vol->usable_leb_size || +		    vol->upd_received + len == vol->upd_bytes) { +			int flush_len = offs + len; + +			/* +			 * OK, we gathered either the whole eraseblock or this +			 * is the last chunk, it's time to flush the buffer. +			 */ +			ubi_assert(flush_len <= vol->usable_leb_size); +			err = write_leb(ubi, vol, lnum, vol->upd_buf, flush_len, +					vol->upd_ebs); +			if (err) +				return err; +		} + +		vol->upd_received += len; +		count -= len; +		buf += len; +		lnum += 1; +	} + +	/* +	 * If we've got more to write, let's continue. At this point we know we +	 * are starting from the beginning of an eraseblock. +	 */ +	while (count) { +		if (count > vol->usable_leb_size) +			len = vol->usable_leb_size; +		else +			len = count; + +		err = copy_from_user(vol->upd_buf, buf, len); +		if (err) +			return -EFAULT; + +		if (len == vol->usable_leb_size || +		    vol->upd_received + len == vol->upd_bytes) { +			err = write_leb(ubi, vol, lnum, vol->upd_buf, +					len, vol->upd_ebs); +			if (err) +				break; +		} + +		vol->upd_received += len; +		count -= len; +		lnum += 1; +		buf += len; +	} + +	ubi_assert(vol->upd_received <= vol->upd_bytes); +	if (vol->upd_received == vol->upd_bytes) { +		/* The update is finished, clear the update marker */ +		err = clear_update_marker(ubi, vol, vol->upd_bytes); +		if (err) +			return err; +		err = ubi_wl_flush(ubi); +		if (err == 0) { +			vol->updating = 0; +			err = to_write; +			vfree(vol->upd_buf); +		} +	} + +	return err; +} + +/** + * ubi_more_leb_change_data - accept more data for atomic LEB change. + * @vol: volume description object + * @buf: write data (user-space memory buffer) + * @count: how much bytes to write + * + * This function accepts more data to the volume which is being under the + * "atomic LEB change" operation. It may be called arbitrary number of times + * until all data arrives. This function returns %0 in case of success, number + * of bytes written during the last call if the whole "atomic LEB change" + * operation has been successfully finished, and a negative error code in case + * of failure. + */ +int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, +			     const void __user *buf, int count) +{ +	int err; + +	dbg_msg("write %d of %lld bytes, %lld already passed", +		count, vol->upd_bytes, vol->upd_received); + +	if (ubi->ro_mode) +		return -EROFS; + +	if (vol->upd_received + count > vol->upd_bytes) +		count = vol->upd_bytes - vol->upd_received; + +	err = copy_from_user(vol->upd_buf + vol->upd_received, buf, count); +	if (err) +		return -EFAULT; + +	vol->upd_received += count; + +	if (vol->upd_received == vol->upd_bytes) { +		int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); + +		memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); +		len = ubi_calc_data_len(ubi, vol->upd_buf, len); +		err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, +						vol->upd_buf, len, UBI_UNKNOWN); +		if (err) +			return err; +	} + +	ubi_assert(vol->upd_received <= vol->upd_bytes); +	if (vol->upd_received == vol->upd_bytes) { +		vol->changing_leb = 0; +		err = count; +		vfree(vol->upd_buf); +	} + +	return err; +} diff --git a/roms/u-boot/drivers/mtd/ubi/vmt.c b/roms/u-boot/drivers/mtd/ubi/vmt.c new file mode 100644 index 00000000..c4e894b4 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/vmt.c @@ -0,0 +1,848 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * This file contains implementation of volume creation, deletion, updating and + * resizing. + */ + +#ifdef UBI_LINUX +#include <linux/err.h> +#include <asm/div64.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static void paranoid_check_volumes(struct ubi_device *ubi); +#else +#define paranoid_check_volumes(ubi) +#endif + +#ifdef UBI_LINUX +static ssize_t vol_attribute_show(struct device *dev, +				  struct device_attribute *attr, char *buf); + +/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */ +static struct device_attribute attr_vol_reserved_ebs = +	__ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute attr_vol_type = +	__ATTR(type, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute attr_vol_name = +	__ATTR(name, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute attr_vol_corrupted = +	__ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute attr_vol_alignment = +	__ATTR(alignment, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute attr_vol_usable_eb_size = +	__ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute attr_vol_data_bytes = +	__ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL); +static struct device_attribute attr_vol_upd_marker = +	__ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL); + +/* + * "Show" method for files in '/<sysfs>/class/ubi/ubiX_Y/'. + * + * Consider a situation: + * A. process 1 opens a sysfs file related to volume Y, say + *    /<sysfs>/class/ubi/ubiX_Y/reserved_ebs; + * B. process 2 removes volume Y; + * C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file; + * + * In this situation, this function will return %-ENODEV because it will find + * out that the volume was removed from the @ubi->volumes array. + */ +static ssize_t vol_attribute_show(struct device *dev, +				  struct device_attribute *attr, char *buf) +{ +	int ret; +	struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); +	struct ubi_device *ubi; + +	ubi = ubi_get_device(vol->ubi->ubi_num); +	if (!ubi) +		return -ENODEV; + +	spin_lock(&ubi->volumes_lock); +	if (!ubi->volumes[vol->vol_id]) { +		spin_unlock(&ubi->volumes_lock); +		ubi_put_device(ubi); +		return -ENODEV; +	} +	/* Take a reference to prevent volume removal */ +	vol->ref_count += 1; +	spin_unlock(&ubi->volumes_lock); + +	if (attr == &attr_vol_reserved_ebs) +		ret = sprintf(buf, "%d\n", vol->reserved_pebs); +	else if (attr == &attr_vol_type) { +		const char *tp; + +		if (vol->vol_type == UBI_DYNAMIC_VOLUME) +			tp = "dynamic"; +		else +			tp = "static"; +		ret = sprintf(buf, "%s\n", tp); +	} else if (attr == &attr_vol_name) +		ret = sprintf(buf, "%s\n", vol->name); +	else if (attr == &attr_vol_corrupted) +		ret = sprintf(buf, "%d\n", vol->corrupted); +	else if (attr == &attr_vol_alignment) +		ret = sprintf(buf, "%d\n", vol->alignment); +	else if (attr == &attr_vol_usable_eb_size) +		ret = sprintf(buf, "%d\n", vol->usable_leb_size); +	else if (attr == &attr_vol_data_bytes) +		ret = sprintf(buf, "%lld\n", vol->used_bytes); +	else if (attr == &attr_vol_upd_marker) +		ret = sprintf(buf, "%d\n", vol->upd_marker); +	else +		/* This must be a bug */ +		ret = -EINVAL; + +	/* We've done the operation, drop volume and UBI device references */ +	spin_lock(&ubi->volumes_lock); +	vol->ref_count -= 1; +	ubi_assert(vol->ref_count >= 0); +	spin_unlock(&ubi->volumes_lock); +	ubi_put_device(ubi); +	return ret; +} +#endif + +/* Release method for volume devices */ +static void vol_release(struct device *dev) +{ +	struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); + +	kfree(vol); +} + +#ifdef UBI_LINUX +/** + * volume_sysfs_init - initialize sysfs for new volume. + * @ubi: UBI device description object + * @vol: volume description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + * + * Note, this function does not free allocated resources in case of failure - + * the caller does it. This is because this would cause release() here and the + * caller would oops. + */ +static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol) +{ +	int err; + +	err = device_create_file(&vol->dev, &attr_vol_reserved_ebs); +	if (err) +		return err; +	err = device_create_file(&vol->dev, &attr_vol_type); +	if (err) +		return err; +	err = device_create_file(&vol->dev, &attr_vol_name); +	if (err) +		return err; +	err = device_create_file(&vol->dev, &attr_vol_corrupted); +	if (err) +		return err; +	err = device_create_file(&vol->dev, &attr_vol_alignment); +	if (err) +		return err; +	err = device_create_file(&vol->dev, &attr_vol_usable_eb_size); +	if (err) +		return err; +	err = device_create_file(&vol->dev, &attr_vol_data_bytes); +	if (err) +		return err; +	err = device_create_file(&vol->dev, &attr_vol_upd_marker); +	return err; +} + +/** + * volume_sysfs_close - close sysfs for a volume. + * @vol: volume description object + */ +static void volume_sysfs_close(struct ubi_volume *vol) +{ +	device_remove_file(&vol->dev, &attr_vol_upd_marker); +	device_remove_file(&vol->dev, &attr_vol_data_bytes); +	device_remove_file(&vol->dev, &attr_vol_usable_eb_size); +	device_remove_file(&vol->dev, &attr_vol_alignment); +	device_remove_file(&vol->dev, &attr_vol_corrupted); +	device_remove_file(&vol->dev, &attr_vol_name); +	device_remove_file(&vol->dev, &attr_vol_type); +	device_remove_file(&vol->dev, &attr_vol_reserved_ebs); +	device_unregister(&vol->dev); +} +#endif + +/** + * ubi_create_volume - create volume. + * @ubi: UBI device description object + * @req: volume creation request + * + * This function creates volume described by @req. If @req->vol_id id + * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume + * and saves it in @req->vol_id. Returns zero in case of success and a negative + * error code in case of failure. Note, the caller has to have the + * @ubi->volumes_mutex locked. + */ +int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) +{ +	int i, err, vol_id = req->vol_id, dont_free = 0; +	struct ubi_volume *vol; +	struct ubi_vtbl_record vtbl_rec; +	uint64_t bytes; +	dev_t dev; + +	if (ubi->ro_mode) +		return -EROFS; + +	vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); +	if (!vol) +		return -ENOMEM; + +	spin_lock(&ubi->volumes_lock); +	if (vol_id == UBI_VOL_NUM_AUTO) { +		/* Find unused volume ID */ +		dbg_msg("search for vacant volume ID"); +		for (i = 0; i < ubi->vtbl_slots; i++) +			if (!ubi->volumes[i]) { +				vol_id = i; +				break; +			} + +		if (vol_id == UBI_VOL_NUM_AUTO) { +			dbg_err("out of volume IDs"); +			err = -ENFILE; +			goto out_unlock; +		} +		req->vol_id = vol_id; +	} + +	dbg_msg("volume ID %d, %llu bytes, type %d, name %s", +		vol_id, (unsigned long long)req->bytes, +		(int)req->vol_type, req->name); + +	/* Ensure that this volume does not exist */ +	err = -EEXIST; +	if (ubi->volumes[vol_id]) { +		dbg_err("volume %d already exists", vol_id); +		goto out_unlock; +	} + +	/* Ensure that the name is unique */ +	for (i = 0; i < ubi->vtbl_slots; i++) +		if (ubi->volumes[i] && +		    ubi->volumes[i]->name_len == req->name_len && +		    !strcmp(ubi->volumes[i]->name, req->name)) { +			dbg_err("volume \"%s\" exists (ID %d)", req->name, i); +			goto out_unlock; +		} + +	/* Calculate how many eraseblocks are requested */ +	vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; +	bytes = req->bytes; +	if (do_div(bytes, vol->usable_leb_size)) +		vol->reserved_pebs = 1; +	vol->reserved_pebs += bytes; + +	/* Reserve physical eraseblocks */ +	if (vol->reserved_pebs > ubi->avail_pebs) { +		dbg_err("not enough PEBs, only %d available", ubi->avail_pebs); +		err = -ENOSPC; +		goto out_unlock; +	} +	ubi->avail_pebs -= vol->reserved_pebs; +	ubi->rsvd_pebs += vol->reserved_pebs; +	spin_unlock(&ubi->volumes_lock); + +	vol->vol_id    = vol_id; +	vol->alignment = req->alignment; +	vol->data_pad  = ubi->leb_size % vol->alignment; +	vol->vol_type  = req->vol_type; +	vol->name_len  = req->name_len; +	memcpy(vol->name, req->name, vol->name_len + 1); +	vol->ubi = ubi; + +	/* +	 * Finish all pending erases because there may be some LEBs belonging +	 * to the same volume ID. +	 */ +	err = ubi_wl_flush(ubi); +	if (err) +		goto out_acc; + +	vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int), GFP_KERNEL); +	if (!vol->eba_tbl) { +		err = -ENOMEM; +		goto out_acc; +	} + +	for (i = 0; i < vol->reserved_pebs; i++) +		vol->eba_tbl[i] = UBI_LEB_UNMAPPED; + +	if (vol->vol_type == UBI_DYNAMIC_VOLUME) { +		vol->used_ebs = vol->reserved_pebs; +		vol->last_eb_bytes = vol->usable_leb_size; +		vol->used_bytes = +			(long long)vol->used_ebs * vol->usable_leb_size; +	} else { +		bytes = vol->used_bytes; +		vol->last_eb_bytes = do_div(bytes, vol->usable_leb_size); +		vol->used_ebs = bytes; +		if (vol->last_eb_bytes) +			vol->used_ebs += 1; +		else +			vol->last_eb_bytes = vol->usable_leb_size; +	} + +	/* Register character device for the volume */ +	cdev_init(&vol->cdev, &ubi_vol_cdev_operations); +	vol->cdev.owner = THIS_MODULE; +	dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1); +	err = cdev_add(&vol->cdev, dev, 1); +	if (err) { +		ubi_err("cannot add character device"); +		goto out_mapping; +	} + +	err = ubi_create_gluebi(ubi, vol); +	if (err) +		goto out_cdev; + +	vol->dev.release = vol_release; +	vol->dev.parent = &ubi->dev; +	vol->dev.devt = dev; +	vol->dev.class = ubi_class; + +	sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); +	err = device_register(&vol->dev); +	if (err) { +		ubi_err("cannot register device"); +		goto out_gluebi; +	} + +	err = volume_sysfs_init(ubi, vol); +	if (err) +		goto out_sysfs; + +	/* Fill volume table record */ +	memset(&vtbl_rec, 0, sizeof(struct ubi_vtbl_record)); +	vtbl_rec.reserved_pebs = cpu_to_be32(vol->reserved_pebs); +	vtbl_rec.alignment     = cpu_to_be32(vol->alignment); +	vtbl_rec.data_pad      = cpu_to_be32(vol->data_pad); +	vtbl_rec.name_len      = cpu_to_be16(vol->name_len); +	if (vol->vol_type == UBI_DYNAMIC_VOLUME) +		vtbl_rec.vol_type = UBI_VID_DYNAMIC; +	else +		vtbl_rec.vol_type = UBI_VID_STATIC; +	memcpy(vtbl_rec.name, vol->name, vol->name_len + 1); + +	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); +	if (err) +		goto out_sysfs; + +	spin_lock(&ubi->volumes_lock); +	ubi->volumes[vol_id] = vol; +	ubi->vol_count += 1; +	spin_unlock(&ubi->volumes_lock); + +	paranoid_check_volumes(ubi); +	return 0; + +out_sysfs: +	/* +	 * We have registered our device, we should not free the volume* +	 * description object in this function in case of an error - it is +	 * freed by the release function. +	 * +	 * Get device reference to prevent the release function from being +	 * called just after sysfs has been closed. +	 */ +	dont_free = 1; +	get_device(&vol->dev); +	volume_sysfs_close(vol); +out_gluebi: +	if (ubi_destroy_gluebi(vol)) +		dbg_err("cannot destroy gluebi for volume %d:%d", +			ubi->ubi_num, vol_id); +out_cdev: +	cdev_del(&vol->cdev); +out_mapping: +	kfree(vol->eba_tbl); +out_acc: +	spin_lock(&ubi->volumes_lock); +	ubi->rsvd_pebs -= vol->reserved_pebs; +	ubi->avail_pebs += vol->reserved_pebs; +out_unlock: +	spin_unlock(&ubi->volumes_lock); +	if (dont_free) +		put_device(&vol->dev); +	else +		kfree(vol); +	ubi_err("cannot create volume %d, error %d", vol_id, err); +	return err; +} + +/** + * ubi_remove_volume - remove volume. + * @desc: volume descriptor + * + * This function removes volume described by @desc. The volume has to be opened + * in "exclusive" mode. Returns zero in case of success and a negative error + * code in case of failure. The caller has to have the @ubi->volumes_mutex + * locked. + */ +int ubi_remove_volume(struct ubi_volume_desc *desc) +{ +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; +	int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; + +	dbg_msg("remove UBI volume %d", vol_id); +	ubi_assert(desc->mode == UBI_EXCLUSIVE); +	ubi_assert(vol == ubi->volumes[vol_id]); + +	if (ubi->ro_mode) +		return -EROFS; + +	spin_lock(&ubi->volumes_lock); +	if (vol->ref_count > 1) { +		/* +		 * The volume is busy, probably someone is reading one of its +		 * sysfs files. +		 */ +		err = -EBUSY; +		goto out_unlock; +	} +	ubi->volumes[vol_id] = NULL; +	spin_unlock(&ubi->volumes_lock); + +	err = ubi_destroy_gluebi(vol); +	if (err) +		goto out_err; + +	err = ubi_change_vtbl_record(ubi, vol_id, NULL); +	if (err) +		goto out_err; + +	for (i = 0; i < vol->reserved_pebs; i++) { +		err = ubi_eba_unmap_leb(ubi, vol, i); +		if (err) +			goto out_err; +	} + +	kfree(vol->eba_tbl); +	vol->eba_tbl = NULL; +	cdev_del(&vol->cdev); +	volume_sysfs_close(vol); + +	spin_lock(&ubi->volumes_lock); +	ubi->rsvd_pebs -= reserved_pebs; +	ubi->avail_pebs += reserved_pebs; +	i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; +	if (i > 0) { +		i = ubi->avail_pebs >= i ? i : ubi->avail_pebs; +		ubi->avail_pebs -= i; +		ubi->rsvd_pebs += i; +		ubi->beb_rsvd_pebs += i; +		if (i > 0) +			ubi_msg("reserve more %d PEBs", i); +	} +	ubi->vol_count -= 1; +	spin_unlock(&ubi->volumes_lock); + +	paranoid_check_volumes(ubi); +	return 0; + +out_err: +	ubi_err("cannot remove volume %d, error %d", vol_id, err); +	spin_lock(&ubi->volumes_lock); +	ubi->volumes[vol_id] = vol; +out_unlock: +	spin_unlock(&ubi->volumes_lock); +	return err; +} + +/** + * ubi_resize_volume - re-size volume. + * @desc: volume descriptor + * @reserved_pebs: new size in physical eraseblocks + * + * This function re-sizes the volume and returns zero in case of success, and a + * negative error code in case of failure. The caller has to have the + * @ubi->volumes_mutex locked. + */ +int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) +{ +	int i, err, pebs, *new_mapping; +	struct ubi_volume *vol = desc->vol; +	struct ubi_device *ubi = vol->ubi; +	struct ubi_vtbl_record vtbl_rec; +	int vol_id = vol->vol_id; + +	if (ubi->ro_mode) +		return -EROFS; + +	dbg_msg("re-size volume %d to from %d to %d PEBs", +		vol_id, vol->reserved_pebs, reserved_pebs); + +	if (vol->vol_type == UBI_STATIC_VOLUME && +	    reserved_pebs < vol->used_ebs) { +		dbg_err("too small size %d, %d LEBs contain data", +			reserved_pebs, vol->used_ebs); +		return -EINVAL; +	} + +	/* If the size is the same, we have nothing to do */ +	if (reserved_pebs == vol->reserved_pebs) +		return 0; + +	new_mapping = kmalloc(reserved_pebs * sizeof(int), GFP_KERNEL); +	if (!new_mapping) +		return -ENOMEM; + +	for (i = 0; i < reserved_pebs; i++) +		new_mapping[i] = UBI_LEB_UNMAPPED; + +	spin_lock(&ubi->volumes_lock); +	if (vol->ref_count > 1) { +		spin_unlock(&ubi->volumes_lock); +		err = -EBUSY; +		goto out_free; +	} +	spin_unlock(&ubi->volumes_lock); + +	/* Reserve physical eraseblocks */ +	pebs = reserved_pebs - vol->reserved_pebs; +	if (pebs > 0) { +		spin_lock(&ubi->volumes_lock); +		if (pebs > ubi->avail_pebs) { +			dbg_err("not enough PEBs: requested %d, available %d", +				pebs, ubi->avail_pebs); +			spin_unlock(&ubi->volumes_lock); +			err = -ENOSPC; +			goto out_free; +		} +		ubi->avail_pebs -= pebs; +		ubi->rsvd_pebs += pebs; +		for (i = 0; i < vol->reserved_pebs; i++) +			new_mapping[i] = vol->eba_tbl[i]; +		kfree(vol->eba_tbl); +		vol->eba_tbl = new_mapping; +		spin_unlock(&ubi->volumes_lock); +	} + +	/* Change volume table record */ +	memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record)); +	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); +	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); +	if (err) +		goto out_acc; + +	if (pebs < 0) { +		for (i = 0; i < -pebs; i++) { +			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); +			if (err) +				goto out_acc; +		} +		spin_lock(&ubi->volumes_lock); +		ubi->rsvd_pebs += pebs; +		ubi->avail_pebs -= pebs; +		pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs; +		if (pebs > 0) { +			pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs; +			ubi->avail_pebs -= pebs; +			ubi->rsvd_pebs += pebs; +			ubi->beb_rsvd_pebs += pebs; +			if (pebs > 0) +				ubi_msg("reserve more %d PEBs", pebs); +		} +		for (i = 0; i < reserved_pebs; i++) +			new_mapping[i] = vol->eba_tbl[i]; +		kfree(vol->eba_tbl); +		vol->eba_tbl = new_mapping; +		spin_unlock(&ubi->volumes_lock); +	} + +	vol->reserved_pebs = reserved_pebs; +	if (vol->vol_type == UBI_DYNAMIC_VOLUME) { +		vol->used_ebs = reserved_pebs; +		vol->last_eb_bytes = vol->usable_leb_size; +		vol->used_bytes = +			(long long)vol->used_ebs * vol->usable_leb_size; +	} + +	paranoid_check_volumes(ubi); +	return 0; + +out_acc: +	if (pebs > 0) { +		spin_lock(&ubi->volumes_lock); +		ubi->rsvd_pebs -= pebs; +		ubi->avail_pebs += pebs; +		spin_unlock(&ubi->volumes_lock); +	} +out_free: +	kfree(new_mapping); +	return err; +} + +/** + * ubi_add_volume - add volume. + * @ubi: UBI device description object + * @vol: volume description object + * + * This function adds an existing volume and initializes all its data + * structures. Returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) +{ +	int err, vol_id = vol->vol_id; +	dev_t dev; + +	dbg_msg("add volume %d", vol_id); +	ubi_dbg_dump_vol_info(vol); + +	/* Register character device for the volume */ +	cdev_init(&vol->cdev, &ubi_vol_cdev_operations); +	vol->cdev.owner = THIS_MODULE; +	dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1); +	err = cdev_add(&vol->cdev, dev, 1); +	if (err) { +		ubi_err("cannot add character device for volume %d, error %d", +			vol_id, err); +		return err; +	} + +	err = ubi_create_gluebi(ubi, vol); +	if (err) +		goto out_cdev; + +	vol->dev.release = vol_release; +	vol->dev.parent = &ubi->dev; +	vol->dev.devt = dev; +	vol->dev.class = ubi_class; +	sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); +	err = device_register(&vol->dev); +	if (err) +		goto out_gluebi; + +	err = volume_sysfs_init(ubi, vol); +	if (err) { +		cdev_del(&vol->cdev); +		err = ubi_destroy_gluebi(vol); +		volume_sysfs_close(vol); +		return err; +	} + +	paranoid_check_volumes(ubi); +	return 0; + +out_gluebi: +	err = ubi_destroy_gluebi(vol); +out_cdev: +	cdev_del(&vol->cdev); +	return err; +} + +/** + * ubi_free_volume - free volume. + * @ubi: UBI device description object + * @vol: volume description object + * + * This function frees all resources for volume @vol but does not remove it. + * Used only when the UBI device is detached. + */ +void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) +{ +	dbg_msg("free volume %d", vol->vol_id); + +	ubi->volumes[vol->vol_id] = NULL; +	ubi_destroy_gluebi(vol); +	cdev_del(&vol->cdev); +	volume_sysfs_close(vol); +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_volume - check volume information. + * @ubi: UBI device description object + * @vol_id: volume ID + */ +static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) +{ +	int idx = vol_id2idx(ubi, vol_id); +	int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; +	const struct ubi_volume *vol; +	long long n; +	const char *name; + +	spin_lock(&ubi->volumes_lock); +	reserved_pebs = be32_to_cpu(ubi->vtbl[vol_id].reserved_pebs); +	vol = ubi->volumes[idx]; + +	if (!vol) { +		if (reserved_pebs) { +			ubi_err("no volume info, but volume exists"); +			goto fail; +		} +		spin_unlock(&ubi->volumes_lock); +		return; +	} + +	if (vol->exclusive) { +		/* +		 * The volume may be being created at the moment, do not check +		 * it (e.g., it may be in the middle of ubi_create_volume(). +		 */ +		spin_unlock(&ubi->volumes_lock); +		return; +	} + +	if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || +	    vol->name_len < 0) { +		ubi_err("negative values"); +		goto fail; +	} +	if (vol->alignment > ubi->leb_size || vol->alignment == 0) { +		ubi_err("bad alignment"); +		goto fail; +	} + +	n = vol->alignment & (ubi->min_io_size - 1); +	if (vol->alignment != 1 && n) { +		ubi_err("alignment is not multiple of min I/O unit"); +		goto fail; +	} + +	n = ubi->leb_size % vol->alignment; +	if (vol->data_pad != n) { +		ubi_err("bad data_pad, has to be %lld", n); +		goto fail; +	} + +	if (vol->vol_type != UBI_DYNAMIC_VOLUME && +	    vol->vol_type != UBI_STATIC_VOLUME) { +		ubi_err("bad vol_type"); +		goto fail; +	} + +	if (vol->upd_marker && vol->corrupted) { +		dbg_err("update marker and corrupted simultaneously"); +		goto fail; +	} + +	if (vol->reserved_pebs > ubi->good_peb_count) { +		ubi_err("too large reserved_pebs"); +		goto fail; +	} + +	n = ubi->leb_size - vol->data_pad; +	if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) { +		ubi_err("bad usable_leb_size, has to be %lld", n); +		goto fail; +	} + +	if (vol->name_len > UBI_VOL_NAME_MAX) { +		ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX); +		goto fail; +	} + +	if (!vol->name) { +		ubi_err("NULL volume name"); +		goto fail; +	} + +	n = strnlen(vol->name, vol->name_len + 1); +	if (n != vol->name_len) { +		ubi_err("bad name_len %lld", n); +		goto fail; +	} + +	n = (long long)vol->used_ebs * vol->usable_leb_size; +	if (vol->vol_type == UBI_DYNAMIC_VOLUME) { +		if (vol->corrupted) { +			ubi_err("corrupted dynamic volume"); +			goto fail; +		} +		if (vol->used_ebs != vol->reserved_pebs) { +			ubi_err("bad used_ebs"); +			goto fail; +		} +		if (vol->last_eb_bytes != vol->usable_leb_size) { +			ubi_err("bad last_eb_bytes"); +			goto fail; +		} +		if (vol->used_bytes != n) { +			ubi_err("bad used_bytes"); +			goto fail; +		} +	} else { +		if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { +			ubi_err("bad used_ebs"); +			goto fail; +		} +		if (vol->last_eb_bytes < 0 || +		    vol->last_eb_bytes > vol->usable_leb_size) { +			ubi_err("bad last_eb_bytes"); +			goto fail; +		} +		if (vol->used_bytes < 0 || vol->used_bytes > n || +		    vol->used_bytes < n - vol->usable_leb_size) { +			ubi_err("bad used_bytes"); +			goto fail; +		} +	} + +	alignment  = be32_to_cpu(ubi->vtbl[vol_id].alignment); +	data_pad   = be32_to_cpu(ubi->vtbl[vol_id].data_pad); +	name_len   = be16_to_cpu(ubi->vtbl[vol_id].name_len); +	upd_marker = ubi->vtbl[vol_id].upd_marker; +	name       = &ubi->vtbl[vol_id].name[0]; +	if (ubi->vtbl[vol_id].vol_type == UBI_VID_DYNAMIC) +		vol_type = UBI_DYNAMIC_VOLUME; +	else +		vol_type = UBI_STATIC_VOLUME; + +	if (alignment != vol->alignment || data_pad != vol->data_pad || +	    upd_marker != vol->upd_marker || vol_type != vol->vol_type || +	    name_len!= vol->name_len || strncmp(name, vol->name, name_len)) { +		ubi_err("volume info is different"); +		goto fail; +	} + +	spin_unlock(&ubi->volumes_lock); +	return; + +fail: +	ubi_err("paranoid check failed for volume %d", vol_id); +	ubi_dbg_dump_vol_info(vol); +	ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); +	spin_unlock(&ubi->volumes_lock); +	BUG(); +} + +/** + * paranoid_check_volumes - check information about all volumes. + * @ubi: UBI device description object + */ +static void paranoid_check_volumes(struct ubi_device *ubi) +{ +	int i; + +	for (i = 0; i < ubi->vtbl_slots; i++) +		paranoid_check_volume(ubi, i); +} +#endif diff --git a/roms/u-boot/drivers/mtd/ubi/vtbl.c b/roms/u-boot/drivers/mtd/ubi/vtbl.c new file mode 100644 index 00000000..3fbb4a0a --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/vtbl.c @@ -0,0 +1,826 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * Copyright (c) Nokia Corporation, 2006, 2007 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +/* + * This file includes volume table manipulation code. The volume table is an + * on-flash table containing volume meta-data like name, number of reserved + * physical eraseblocks, type, etc. The volume table is stored in the so-called + * "layout volume". + * + * The layout volume is an internal volume which is organized as follows. It + * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical + * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each + * other. This redundancy guarantees robustness to unclean reboots. The volume + * table is basically an array of volume table records. Each record contains + * full information about the volume and protected by a CRC checksum. + * + * The volume table is changed, it is first changed in RAM. Then LEB 0 is + * erased, and the updated volume table is written back to LEB 0. Then same for + * LEB 1. This scheme guarantees recoverability from unclean reboots. + * + * In this UBI implementation the on-flash volume table does not contain any + * information about how many data static volumes contain. This information may + * be found from the scanning data. + * + * But it would still be beneficial to store this information in the volume + * table. For example, suppose we have a static volume X, and all its physical + * eraseblocks became bad for some reasons. Suppose we are attaching the + * corresponding MTD device, the scanning has found no logical eraseblocks + * corresponding to the volume X. According to the volume table volume X does + * exist. So we don't know whether it is just empty or all its physical + * eraseblocks went bad. So we cannot alarm the user about this corruption. + * + * The volume table also stores so-called "update marker", which is used for + * volume updates. Before updating the volume, the update marker is set, and + * after the update operation is finished, the update marker is cleared. So if + * the update operation was interrupted (e.g. by an unclean reboot) - the + * update marker is still there and we know that the volume's contents is + * damaged. + */ + +#ifdef UBI_LINUX +#include <linux/crc32.h> +#include <linux/err.h> +#include <asm/div64.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static void paranoid_vtbl_check(const struct ubi_device *ubi); +#else +#define paranoid_vtbl_check(ubi) +#endif + +/* Empty volume table record */ +static struct ubi_vtbl_record empty_vtbl_record; + +/** + * ubi_change_vtbl_record - change volume table record. + * @ubi: UBI device description object + * @idx: table index to change + * @vtbl_rec: new volume table record + * + * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty + * volume table record is written. The caller does not have to calculate CRC of + * the record as it is done by this function. Returns zero in case of success + * and a negative error code in case of failure. + */ +int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, +			   struct ubi_vtbl_record *vtbl_rec) +{ +	int i, err; +	uint32_t crc; +	struct ubi_volume *layout_vol; + +	ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); +	layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; + +	if (!vtbl_rec) +		vtbl_rec = &empty_vtbl_record; +	else { +		crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); +		vtbl_rec->crc = cpu_to_be32(crc); +	} + +	memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); +	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { +		err = ubi_eba_unmap_leb(ubi, layout_vol, i); +		if (err) +			return err; + +		err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, +					ubi->vtbl_size, UBI_LONGTERM); +		if (err) +			return err; +	} + +	paranoid_vtbl_check(ubi); +	return 0; +} + +/** + * vtbl_check - check if volume table is not corrupted and contains sensible + *              data. + * @ubi: UBI device description object + * @vtbl: volume table + * + * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, + * and %-EINVAL if it contains inconsistent data. + */ +static int vtbl_check(const struct ubi_device *ubi, +		      const struct ubi_vtbl_record *vtbl) +{ +	int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; +	int upd_marker, err; +	uint32_t crc; +	const char *name; + +	for (i = 0; i < ubi->vtbl_slots; i++) { +		cond_resched(); + +		reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); +		alignment = be32_to_cpu(vtbl[i].alignment); +		data_pad = be32_to_cpu(vtbl[i].data_pad); +		upd_marker = vtbl[i].upd_marker; +		vol_type = vtbl[i].vol_type; +		name_len = be16_to_cpu(vtbl[i].name_len); +		name = (const char *) &vtbl[i].name[0]; + +		crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); +		if (be32_to_cpu(vtbl[i].crc) != crc) { +			ubi_err("bad CRC at record %u: %#08x, not %#08x", +				 i, crc, be32_to_cpu(vtbl[i].crc)); +			ubi_dbg_dump_vtbl_record(&vtbl[i], i); +			return 1; +		} + +		if (reserved_pebs == 0) { +			if (memcmp(&vtbl[i], &empty_vtbl_record, +						UBI_VTBL_RECORD_SIZE)) { +				err = 2; +				goto bad; +			} +			continue; +		} + +		if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || +		    name_len < 0) { +			err = 3; +			goto bad; +		} + +		if (alignment > ubi->leb_size || alignment == 0) { +			err = 4; +			goto bad; +		} + +		n = alignment & (ubi->min_io_size - 1); +		if (alignment != 1 && n) { +			err = 5; +			goto bad; +		} + +		n = ubi->leb_size % alignment; +		if (data_pad != n) { +			dbg_err("bad data_pad, has to be %d", n); +			err = 6; +			goto bad; +		} + +		if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { +			err = 7; +			goto bad; +		} + +		if (upd_marker != 0 && upd_marker != 1) { +			err = 8; +			goto bad; +		} + +		if (reserved_pebs > ubi->good_peb_count) { +			dbg_err("too large reserved_pebs, good PEBs %d", +				ubi->good_peb_count); +			err = 9; +			goto bad; +		} + +		if (name_len > UBI_VOL_NAME_MAX) { +			err = 10; +			goto bad; +		} + +		if (name[0] == '\0') { +			err = 11; +			goto bad; +		} + +		if (name_len != strnlen(name, name_len + 1)) { +			err = 12; +			goto bad; +		} +	} + +	/* Checks that all names are unique */ +	for (i = 0; i < ubi->vtbl_slots - 1; i++) { +		for (n = i + 1; n < ubi->vtbl_slots; n++) { +			int len1 = be16_to_cpu(vtbl[i].name_len); +			int len2 = be16_to_cpu(vtbl[n].name_len); + +			if (len1 > 0 && len1 == len2 && +			    !strncmp((char *)vtbl[i].name, (char *)vtbl[n].name, len1)) { +				ubi_err("volumes %d and %d have the same name" +					" \"%s\"", i, n, vtbl[i].name); +				ubi_dbg_dump_vtbl_record(&vtbl[i], i); +				ubi_dbg_dump_vtbl_record(&vtbl[n], n); +				return -EINVAL; +			} +		} +	} + +	return 0; + +bad: +	ubi_err("volume table check failed: record %d, error %d", i, err); +	ubi_dbg_dump_vtbl_record(&vtbl[i], i); +	return -EINVAL; +} + +/** + * create_vtbl - create a copy of volume table. + * @ubi: UBI device description object + * @si: scanning information + * @copy: number of the volume table copy + * @vtbl: contents of the volume table + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si, +		       int copy, void *vtbl) +{ +	int err, tries = 0; +	static struct ubi_vid_hdr *vid_hdr; +	struct ubi_scan_volume *sv; +	struct ubi_scan_leb *new_seb, *old_seb = NULL; + +	ubi_msg("create volume table (copy #%d)", copy + 1); + +	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); +	if (!vid_hdr) +		return -ENOMEM; + +	/* +	 * Check if there is a logical eraseblock which would have to contain +	 * this volume table copy was found during scanning. It has to be wiped +	 * out. +	 */ +	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); +	if (sv) +		old_seb = ubi_scan_find_seb(sv, copy); + +retry: +	new_seb = ubi_scan_get_free_peb(ubi, si); +	if (IS_ERR(new_seb)) { +		err = PTR_ERR(new_seb); +		goto out_free; +	} + +	vid_hdr->vol_type = UBI_VID_DYNAMIC; +	vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); +	vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; +	vid_hdr->data_size = vid_hdr->used_ebs = +			     vid_hdr->data_pad = cpu_to_be32(0); +	vid_hdr->lnum = cpu_to_be32(copy); +	vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum); +	vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0); + +	/* The EC header is already there, write the VID header */ +	err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); +	if (err) +		goto write_error; + +	/* Write the layout volume contents */ +	err = ubi_io_write_data(ubi, vtbl, new_seb->pnum, 0, ubi->vtbl_size); +	if (err) +		goto write_error; + +	/* +	 * And add it to the scanning information. Don't delete the old +	 * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'. +	 */ +	err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec, +				vid_hdr, 0); +	kfree(new_seb); +	ubi_free_vid_hdr(ubi, vid_hdr); +	return err; + +write_error: +	if (err == -EIO && ++tries <= 5) { +		/* +		 * Probably this physical eraseblock went bad, try to pick +		 * another one. +		 */ +		list_add_tail(&new_seb->u.list, &si->corr); +		goto retry; +	} +	kfree(new_seb); +out_free: +	ubi_free_vid_hdr(ubi, vid_hdr); +	return err; + +} + +/** + * process_lvol - process the layout volume. + * @ubi: UBI device description object + * @si: scanning information + * @sv: layout volume scanning information + * + * This function is responsible for reading the layout volume, ensuring it is + * not corrupted, and recovering from corruptions if needed. Returns volume + * table in case of success and a negative error code in case of failure. + */ +static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, +					    struct ubi_scan_info *si, +					    struct ubi_scan_volume *sv) +{ +	int err; +	struct rb_node *rb; +	struct ubi_scan_leb *seb; +	struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; +	int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; + +	/* +	 * UBI goes through the following steps when it changes the layout +	 * volume: +	 * a. erase LEB 0; +	 * b. write new data to LEB 0; +	 * c. erase LEB 1; +	 * d. write new data to LEB 1. +	 * +	 * Before the change, both LEBs contain the same data. +	 * +	 * Due to unclean reboots, the contents of LEB 0 may be lost, but there +	 * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not. +	 * Similarly, LEB 1 may be lost, but there should be LEB 0. And +	 * finally, unclean reboots may result in a situation when neither LEB +	 * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB +	 * 0 contains more recent information. +	 * +	 * So the plan is to first check LEB 0. Then +	 * a. if LEB 0 is OK, it must be containing the most resent data; then +	 *    we compare it with LEB 1, and if they are different, we copy LEB +	 *    0 to LEB 1; +	 * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 +	 *    to LEB 0. +	 */ + +	dbg_msg("check layout volume"); + +	/* Read both LEB 0 and LEB 1 into memory */ +	ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { +		leb[seb->lnum] = vmalloc(ubi->vtbl_size); +		if (!leb[seb->lnum]) { +			err = -ENOMEM; +			goto out_free; +		} +		memset(leb[seb->lnum], 0, ubi->vtbl_size); + +		err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, +				       ubi->vtbl_size); +		if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) +			/* +			 * Scrub the PEB later. Note, -EBADMSG indicates an +			 * uncorrectable ECC error, but we have our own CRC and +			 * the data will be checked later. If the data is OK, +			 * the PEB will be scrubbed (because we set +			 * seb->scrub). If the data is not OK, the contents of +			 * the PEB will be recovered from the second copy, and +			 * seb->scrub will be cleared in +			 * 'ubi_scan_add_used()'. +			 */ +			seb->scrub = 1; +		else if (err) +			goto out_free; +	} + +	err = -EINVAL; +	if (leb[0]) { +		leb_corrupted[0] = vtbl_check(ubi, leb[0]); +		if (leb_corrupted[0] < 0) +			goto out_free; +	} + +	if (!leb_corrupted[0]) { +		/* LEB 0 is OK */ +		if (leb[1]) +			leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); +		if (leb_corrupted[1]) { +			ubi_warn("volume table copy #2 is corrupted"); +			err = create_vtbl(ubi, si, 1, leb[0]); +			if (err) +				goto out_free; +			ubi_msg("volume table was restored"); +		} + +		/* Both LEB 1 and LEB 2 are OK and consistent */ +		vfree(leb[1]); +		return leb[0]; +	} else { +		/* LEB 0 is corrupted or does not exist */ +		if (leb[1]) { +			leb_corrupted[1] = vtbl_check(ubi, leb[1]); +			if (leb_corrupted[1] < 0) +				goto out_free; +		} +		if (leb_corrupted[1]) { +			/* Both LEB 0 and LEB 1 are corrupted */ +			ubi_err("both volume tables are corrupted"); +			goto out_free; +		} + +		ubi_warn("volume table copy #1 is corrupted"); +		err = create_vtbl(ubi, si, 0, leb[1]); +		if (err) +			goto out_free; +		ubi_msg("volume table was restored"); + +		vfree(leb[0]); +		return leb[1]; +	} + +out_free: +	vfree(leb[0]); +	vfree(leb[1]); +	return ERR_PTR(err); +} + +/** + * create_empty_lvol - create empty layout volume. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns volume table contents in case of success and a + * negative error code in case of failure. + */ +static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi, +						 struct ubi_scan_info *si) +{ +	int i; +	struct ubi_vtbl_record *vtbl; + +	vtbl = vmalloc(ubi->vtbl_size); +	if (!vtbl) +		return ERR_PTR(-ENOMEM); +	memset(vtbl, 0, ubi->vtbl_size); + +	for (i = 0; i < ubi->vtbl_slots; i++) +		memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); + +	for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { +		int err; + +		err = create_vtbl(ubi, si, i, vtbl); +		if (err) { +			vfree(vtbl); +			return ERR_PTR(err); +		} +	} + +	return vtbl; +} + +/** + * init_volumes - initialize volume information for existing volumes. + * @ubi: UBI device description object + * @si: scanning information + * @vtbl: volume table + * + * This function allocates volume description objects for existing volumes. + * Returns zero in case of success and a negative error code in case of + * failure. + */ +static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, +			const struct ubi_vtbl_record *vtbl) +{ +	int i, reserved_pebs = 0; +	struct ubi_scan_volume *sv; +	struct ubi_volume *vol; + +	for (i = 0; i < ubi->vtbl_slots; i++) { +		cond_resched(); + +		if (be32_to_cpu(vtbl[i].reserved_pebs) == 0) +			continue; /* Empty record */ + +		vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); +		if (!vol) +			return -ENOMEM; + +		vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); +		vol->alignment = be32_to_cpu(vtbl[i].alignment); +		vol->data_pad = be32_to_cpu(vtbl[i].data_pad); +		vol->upd_marker = vtbl[i].upd_marker; +		vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? +					UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; +		vol->name_len = be16_to_cpu(vtbl[i].name_len); +		vol->usable_leb_size = ubi->leb_size - vol->data_pad; +		memcpy(vol->name, vtbl[i].name, vol->name_len); +		vol->name[vol->name_len] = '\0'; +		vol->vol_id = i; + +		if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { +			/* Auto re-size flag may be set only for one volume */ +			if (ubi->autoresize_vol_id != -1) { +				ubi_err("more then one auto-resize volume (%d " +					"and %d)", ubi->autoresize_vol_id, i); +				kfree(vol); +				return -EINVAL; +			} + +			ubi->autoresize_vol_id = i; +		} + +		ubi_assert(!ubi->volumes[i]); +		ubi->volumes[i] = vol; +		ubi->vol_count += 1; +		vol->ubi = ubi; +		reserved_pebs += vol->reserved_pebs; + +		/* +		 * In case of dynamic volume UBI knows nothing about how many +		 * data is stored there. So assume the whole volume is used. +		 */ +		if (vol->vol_type == UBI_DYNAMIC_VOLUME) { +			vol->used_ebs = vol->reserved_pebs; +			vol->last_eb_bytes = vol->usable_leb_size; +			vol->used_bytes = +				(long long)vol->used_ebs * vol->usable_leb_size; +			continue; +		} + +		/* Static volumes only */ +		sv = ubi_scan_find_sv(si, i); +		if (!sv) { +			/* +			 * No eraseblocks belonging to this volume found. We +			 * don't actually know whether this static volume is +			 * completely corrupted or just contains no data. And +			 * we cannot know this as long as data size is not +			 * stored on flash. So we just assume the volume is +			 * empty. FIXME: this should be handled. +			 */ +			continue; +		} + +		if (sv->leb_count != sv->used_ebs) { +			/* +			 * We found a static volume which misses several +			 * eraseblocks. Treat it as corrupted. +			 */ +			ubi_warn("static volume %d misses %d LEBs - corrupted", +				 sv->vol_id, sv->used_ebs - sv->leb_count); +			vol->corrupted = 1; +			continue; +		} + +		vol->used_ebs = sv->used_ebs; +		vol->used_bytes = +			(long long)(vol->used_ebs - 1) * vol->usable_leb_size; +		vol->used_bytes += sv->last_data_size; +		vol->last_eb_bytes = sv->last_data_size; +	} + +	/* And add the layout volume */ +	vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); +	if (!vol) +		return -ENOMEM; + +	vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; +	vol->alignment = 1; +	vol->vol_type = UBI_DYNAMIC_VOLUME; +	vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; +	memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); +	vol->usable_leb_size = ubi->leb_size; +	vol->used_ebs = vol->reserved_pebs; +	vol->last_eb_bytes = vol->reserved_pebs; +	vol->used_bytes = +		(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); +	vol->vol_id = UBI_LAYOUT_VOLUME_ID; +	vol->ref_count = 1; + +	ubi_assert(!ubi->volumes[i]); +	ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; +	reserved_pebs += vol->reserved_pebs; +	ubi->vol_count += 1; +	vol->ubi = ubi; + +	if (reserved_pebs > ubi->avail_pebs) +		ubi_err("not enough PEBs, required %d, available %d", +			reserved_pebs, ubi->avail_pebs); +	ubi->rsvd_pebs += reserved_pebs; +	ubi->avail_pebs -= reserved_pebs; + +	return 0; +} + +/** + * check_sv - check volume scanning information. + * @vol: UBI volume description object + * @sv: volume scanning information + * + * This function returns zero if the volume scanning information is consistent + * to the data read from the volume tabla, and %-EINVAL if not. + */ +static int check_sv(const struct ubi_volume *vol, +		    const struct ubi_scan_volume *sv) +{ +	int err; + +	if (sv->highest_lnum >= vol->reserved_pebs) { +		err = 1; +		goto bad; +	} +	if (sv->leb_count > vol->reserved_pebs) { +		err = 2; +		goto bad; +	} +	if (sv->vol_type != vol->vol_type) { +		err = 3; +		goto bad; +	} +	if (sv->used_ebs > vol->reserved_pebs) { +		err = 4; +		goto bad; +	} +	if (sv->data_pad != vol->data_pad) { +		err = 5; +		goto bad; +	} +	return 0; + +bad: +	ubi_err("bad scanning information, error %d", err); +	ubi_dbg_dump_sv(sv); +	ubi_dbg_dump_vol_info(vol); +	return -EINVAL; +} + +/** + * check_scanning_info - check that scanning information. + * @ubi: UBI device description object + * @si: scanning information + * + * Even though we protect on-flash data by CRC checksums, we still don't trust + * the media. This function ensures that scanning information is consistent to + * the information read from the volume table. Returns zero if the scanning + * information is OK and %-EINVAL if it is not. + */ +static int check_scanning_info(const struct ubi_device *ubi, +			       struct ubi_scan_info *si) +{ +	int err, i; +	struct ubi_scan_volume *sv; +	struct ubi_volume *vol; + +	if (si->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { +		ubi_err("scanning found %d volumes, maximum is %d + %d", +			si->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); +		return -EINVAL; +	} + +	if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && +	    si->highest_vol_id < UBI_INTERNAL_VOL_START) { +		ubi_err("too large volume ID %d found by scanning", +			si->highest_vol_id); +		return -EINVAL; +	} + +	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { +		cond_resched(); + +		sv = ubi_scan_find_sv(si, i); +		vol = ubi->volumes[i]; +		if (!vol) { +			if (sv) +				ubi_scan_rm_volume(si, sv); +			continue; +		} + +		if (vol->reserved_pebs == 0) { +			ubi_assert(i < ubi->vtbl_slots); + +			if (!sv) +				continue; + +			/* +			 * During scanning we found a volume which does not +			 * exist according to the information in the volume +			 * table. This must have happened due to an unclean +			 * reboot while the volume was being removed. Discard +			 * these eraseblocks. +			 */ +			ubi_msg("finish volume %d removal", sv->vol_id); +			ubi_scan_rm_volume(si, sv); +		} else if (sv) { +			err = check_sv(vol, sv); +			if (err) +				return err; +		} +	} + +	return 0; +} + +/** + * ubi_read_volume_table - read volume table. + * information. + * @ubi: UBI device description object + * @si: scanning information + * + * This function reads volume table, checks it, recover from errors if needed, + * or creates it if needed. Returns zero in case of success and a negative + * error code in case of failure. + */ +int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si) +{ +	int i, err; +	struct ubi_scan_volume *sv; + +	empty_vtbl_record.crc = cpu_to_be32(0xf116c36b); + +	/* +	 * The number of supported volumes is limited by the eraseblock size +	 * and by the UBI_MAX_VOLUMES constant. +	 */ +	ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; +	if (ubi->vtbl_slots > UBI_MAX_VOLUMES) +		ubi->vtbl_slots = UBI_MAX_VOLUMES; + +	ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; +	ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); + +	sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID); +	if (!sv) { +		/* +		 * No logical eraseblocks belonging to the layout volume were +		 * found. This could mean that the flash is just empty. In +		 * this case we create empty layout volume. +		 * +		 * But if flash is not empty this must be a corruption or the +		 * MTD device just contains garbage. +		 */ +		if (si->is_empty) { +			ubi->vtbl = create_empty_lvol(ubi, si); +			if (IS_ERR(ubi->vtbl)) +				return PTR_ERR(ubi->vtbl); +		} else { +			ubi_err("the layout volume was not found"); +			return -EINVAL; +		} +	} else { +		if (sv->leb_count > UBI_LAYOUT_VOLUME_EBS) { +			/* This must not happen with proper UBI images */ +			dbg_err("too many LEBs (%d) in layout volume", +				sv->leb_count); +			return -EINVAL; +		} + +		ubi->vtbl = process_lvol(ubi, si, sv); +		if (IS_ERR(ubi->vtbl)) +			return PTR_ERR(ubi->vtbl); +	} + +	ubi->avail_pebs = ubi->good_peb_count; + +	/* +	 * The layout volume is OK, initialize the corresponding in-RAM data +	 * structures. +	 */ +	err = init_volumes(ubi, si, ubi->vtbl); +	if (err) +		goto out_free; + +	/* +	 * Get sure that the scanning information is consistent to the +	 * information stored in the volume table. +	 */ +	err = check_scanning_info(ubi, si); +	if (err) +		goto out_free; + +	return 0; + +out_free: +	vfree(ubi->vtbl); +	for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) +		if (ubi->volumes[i]) { +			kfree(ubi->volumes[i]); +			ubi->volumes[i] = NULL; +		} +	return err; +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_vtbl_check - check volume table. + * @ubi: UBI device description object + */ +static void paranoid_vtbl_check(const struct ubi_device *ubi) +{ +	if (vtbl_check(ubi, ubi->vtbl)) { +		ubi_err("paranoid check failed"); +		BUG(); +	} +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ diff --git a/roms/u-boot/drivers/mtd/ubi/wl.c b/roms/u-boot/drivers/mtd/ubi/wl.c new file mode 100644 index 00000000..1eaa88b3 --- /dev/null +++ b/roms/u-boot/drivers/mtd/ubi/wl.c @@ -0,0 +1,1664 @@ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * SPDX-License-Identifier:	GPL-2.0+ + * + * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner + */ + +/* + * UBI wear-leveling unit. + * + * This unit is responsible for wear-leveling. It works in terms of physical + * eraseblocks and erase counters and knows nothing about logical eraseblocks, + * volumes, etc. From this unit's perspective all physical eraseblocks are of + * two types - used and free. Used physical eraseblocks are those that were + * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are + * those that were put by the 'ubi_wl_put_peb()' function. + * + * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter + * header. The rest of the physical eraseblock contains only 0xFF bytes. + * + * When physical eraseblocks are returned to the WL unit by means of the + * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is + * done asynchronously in context of the per-UBI device background thread, + * which is also managed by the WL unit. + * + * The wear-leveling is ensured by means of moving the contents of used + * physical eraseblocks with low erase counter to free physical eraseblocks + * with high erase counter. + * + * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick + * an "optimal" physical eraseblock. For example, when it is known that the + * physical eraseblock will be "put" soon because it contains short-term data, + * the WL unit may pick a free physical eraseblock with low erase counter, and + * so forth. + * + * If the WL unit fails to erase a physical eraseblock, it marks it as bad. + * + * This unit is also responsible for scrubbing. If a bit-flip is detected in a + * physical eraseblock, it has to be moved. Technically this is the same as + * moving it for wear-leveling reasons. + * + * As it was said, for the UBI unit all physical eraseblocks are either "free" + * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used + * eraseblocks are kept in a set of different RB-trees: @wl->used, + * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. + * + * Note, in this implementation, we keep a small in-RAM object for each physical + * eraseblock. This is surely not a scalable solution. But it appears to be good + * enough for moderately large flashes and it is simple. In future, one may + * re-work this unit and make it more scalable. + * + * At the moment this unit does not utilize the sequence number, which was + * introduced relatively recently. But it would be wise to do this because the + * sequence number of a logical eraseblock characterizes how old is it. For + * example, when we move a PEB with low erase counter, and we need to pick the + * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we + * pick target PEB with an average EC if our PEB is not very "old". This is a + * room for future re-works of the WL unit. + * + * FIXME: looks too complex, should be simplified (later). + */ + +#ifdef UBI_LINUX +#include <linux/slab.h> +#include <linux/crc32.h> +#include <linux/freezer.h> +#include <linux/kthread.h> +#endif + +#include <ubi_uboot.h> +#include "ubi.h" + +/* Number of physical eraseblocks reserved for wear-leveling purposes */ +#define WL_RESERVED_PEBS 1 + +/* + * How many erase cycles are short term, unknown, and long term physical + * eraseblocks protected. + */ +#define ST_PROTECTION 16 +#define U_PROTECTION  10 +#define LT_PROTECTION 4 + +/* + * Maximum difference between two erase counters. If this threshold is + * exceeded, the WL unit starts moving data from used physical eraseblocks with + * low erase counter to free physical eraseblocks with high erase counter. + */ +#define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD + +/* + * When a physical eraseblock is moved, the WL unit has to pick the target + * physical eraseblock to move to. The simplest way would be just to pick the + * one with the highest erase counter. But in certain workloads this could lead + * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a + * situation when the picked physical eraseblock is constantly erased after the + * data is written to it. So, we have a constant which limits the highest erase + * counter of the free physical eraseblock to pick. Namely, the WL unit does + * not pick eraseblocks with erase counter greater then the lowest erase + * counter plus %WL_FREE_MAX_DIFF. + */ +#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) + +/* + * Maximum number of consecutive background thread failures which is enough to + * switch to read-only mode. + */ +#define WL_MAX_FAILURES 32 + +/** + * struct ubi_wl_prot_entry - PEB protection entry. + * @rb_pnum: link in the @wl->prot.pnum RB-tree + * @rb_aec: link in the @wl->prot.aec RB-tree + * @abs_ec: the absolute erase counter value when the protection ends + * @e: the wear-leveling entry of the physical eraseblock under protection + * + * When the WL unit returns a physical eraseblock, the physical eraseblock is + * protected from being moved for some "time". For this reason, the physical + * eraseblock is not directly moved from the @wl->free tree to the @wl->used + * tree. There is one more tree in between where this physical eraseblock is + * temporarily stored (@wl->prot). + * + * All this protection stuff is needed because: + *  o we don't want to move physical eraseblocks just after we have given them + *    to the user; instead, we first want to let users fill them up with data; + * + *  o there is a chance that the user will put the physical eraseblock very + *    soon, so it makes sense not to move it for some time, but wait; this is + *    especially important in case of "short term" physical eraseblocks. + * + * Physical eraseblocks stay protected only for limited time. But the "time" is + * measured in erase cycles in this case. This is implemented with help of the + * absolute erase counter (@wl->abs_ec). When it reaches certain value, the + * physical eraseblocks are moved from the protection trees (@wl->prot.*) to + * the @wl->used tree. + * + * Protected physical eraseblocks are searched by physical eraseblock number + * (when they are put) and by the absolute erase counter (to check if it is + * time to move them to the @wl->used tree). So there are actually 2 RB-trees + * storing the protected physical eraseblocks: @wl->prot.pnum and + * @wl->prot.aec. They are referred to as the "protection" trees. The + * first one is indexed by the physical eraseblock number. The second one is + * indexed by the absolute erase counter. Both trees store + * &struct ubi_wl_prot_entry objects. + * + * Each physical eraseblock has 2 main states: free and used. The former state + * corresponds to the @wl->free tree. The latter state is split up on several + * sub-states: + * o the WL movement is allowed (@wl->used tree); + * o the WL movement is temporarily prohibited (@wl->prot.pnum and + * @wl->prot.aec trees); + * o scrubbing is needed (@wl->scrub tree). + * + * Depending on the sub-state, wear-leveling entries of the used physical + * eraseblocks may be kept in one of those trees. + */ +struct ubi_wl_prot_entry { +	struct rb_node rb_pnum; +	struct rb_node rb_aec; +	unsigned long long abs_ec; +	struct ubi_wl_entry *e; +}; + +/** + * struct ubi_work - UBI work description data structure. + * @list: a link in the list of pending works + * @func: worker function + * @priv: private data of the worker function + * + * @e: physical eraseblock to erase + * @torture: if the physical eraseblock has to be tortured + * + * The @func pointer points to the worker function. If the @cancel argument is + * not zero, the worker has to free the resources and exit immediately. The + * worker has to return zero in case of success and a negative error code in + * case of failure. + */ +struct ubi_work { +	struct list_head list; +	int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel); +	/* The below fields are only relevant to erasure works */ +	struct ubi_wl_entry *e; +	int torture; +}; + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); +static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, +				     struct rb_root *root); +#else +#define paranoid_check_ec(ubi, pnum, ec) 0 +#define paranoid_check_in_wl_tree(e, root) +#endif + +/** + * wl_tree_add - add a wear-leveling entry to a WL RB-tree. + * @e: the wear-leveling entry to add + * @root: the root of the tree + * + * Note, we use (erase counter, physical eraseblock number) pairs as keys in + * the @ubi->used and @ubi->free RB-trees. + */ +static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) +{ +	struct rb_node **p, *parent = NULL; + +	p = &root->rb_node; +	while (*p) { +		struct ubi_wl_entry *e1; + +		parent = *p; +		e1 = rb_entry(parent, struct ubi_wl_entry, rb); + +		if (e->ec < e1->ec) +			p = &(*p)->rb_left; +		else if (e->ec > e1->ec) +			p = &(*p)->rb_right; +		else { +			ubi_assert(e->pnum != e1->pnum); +			if (e->pnum < e1->pnum) +				p = &(*p)->rb_left; +			else +				p = &(*p)->rb_right; +		} +	} + +	rb_link_node(&e->rb, parent, p); +	rb_insert_color(&e->rb, root); +} + +/** + * do_work - do one pending work. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int do_work(struct ubi_device *ubi) +{ +	int err; +	struct ubi_work *wrk; + +	cond_resched(); + +	/* +	 * @ubi->work_sem is used to synchronize with the workers. Workers take +	 * it in read mode, so many of them may be doing works at a time. But +	 * the queue flush code has to be sure the whole queue of works is +	 * done, and it takes the mutex in write mode. +	 */ +	down_read(&ubi->work_sem); +	spin_lock(&ubi->wl_lock); +	if (list_empty(&ubi->works)) { +		spin_unlock(&ubi->wl_lock); +		up_read(&ubi->work_sem); +		return 0; +	} + +	wrk = list_entry(ubi->works.next, struct ubi_work, list); +	list_del(&wrk->list); +	ubi->works_count -= 1; +	ubi_assert(ubi->works_count >= 0); +	spin_unlock(&ubi->wl_lock); + +	/* +	 * Call the worker function. Do not touch the work structure +	 * after this call as it will have been freed or reused by that +	 * time by the worker function. +	 */ +	err = wrk->func(ubi, wrk, 0); +	if (err) +		ubi_err("work failed with error code %d", err); +	up_read(&ubi->work_sem); + +	return err; +} + +/** + * produce_free_peb - produce a free physical eraseblock. + * @ubi: UBI device description object + * + * This function tries to make a free PEB by means of synchronous execution of + * pending works. This may be needed if, for example the background thread is + * disabled. Returns zero in case of success and a negative error code in case + * of failure. + */ +static int produce_free_peb(struct ubi_device *ubi) +{ +	int err; + +	spin_lock(&ubi->wl_lock); +	while (!ubi->free.rb_node) { +		spin_unlock(&ubi->wl_lock); + +		dbg_wl("do one work synchronously"); +		err = do_work(ubi); +		if (err) +			return err; + +		spin_lock(&ubi->wl_lock); +	} +	spin_unlock(&ubi->wl_lock); + +	return 0; +} + +/** + * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree. + * @e: the wear-leveling entry to check + * @root: the root of the tree + * + * This function returns non-zero if @e is in the @root RB-tree and zero if it + * is not. + */ +static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) +{ +	struct rb_node *p; + +	p = root->rb_node; +	while (p) { +		struct ubi_wl_entry *e1; + +		e1 = rb_entry(p, struct ubi_wl_entry, rb); + +		if (e->pnum == e1->pnum) { +			ubi_assert(e == e1); +			return 1; +		} + +		if (e->ec < e1->ec) +			p = p->rb_left; +		else if (e->ec > e1->ec) +			p = p->rb_right; +		else { +			ubi_assert(e->pnum != e1->pnum); +			if (e->pnum < e1->pnum) +				p = p->rb_left; +			else +				p = p->rb_right; +		} +	} + +	return 0; +} + +/** + * prot_tree_add - add physical eraseblock to protection trees. + * @ubi: UBI device description object + * @e: the physical eraseblock to add + * @pe: protection entry object to use + * @abs_ec: absolute erase counter value when this physical eraseblock has + * to be removed from the protection trees. + * + * @wl->lock has to be locked. + */ +static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, +			  struct ubi_wl_prot_entry *pe, int abs_ec) +{ +	struct rb_node **p, *parent = NULL; +	struct ubi_wl_prot_entry *pe1; + +	pe->e = e; +	pe->abs_ec = ubi->abs_ec + abs_ec; + +	p = &ubi->prot.pnum.rb_node; +	while (*p) { +		parent = *p; +		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum); + +		if (e->pnum < pe1->e->pnum) +			p = &(*p)->rb_left; +		else +			p = &(*p)->rb_right; +	} +	rb_link_node(&pe->rb_pnum, parent, p); +	rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum); + +	p = &ubi->prot.aec.rb_node; +	parent = NULL; +	while (*p) { +		parent = *p; +		pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec); + +		if (pe->abs_ec < pe1->abs_ec) +			p = &(*p)->rb_left; +		else +			p = &(*p)->rb_right; +	} +	rb_link_node(&pe->rb_aec, parent, p); +	rb_insert_color(&pe->rb_aec, &ubi->prot.aec); +} + +/** + * find_wl_entry - find wear-leveling entry closest to certain erase counter. + * @root: the RB-tree where to look for + * @max: highest possible erase counter + * + * This function looks for a wear leveling entry with erase counter closest to + * @max and less then @max. + */ +static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) +{ +	struct rb_node *p; +	struct ubi_wl_entry *e; + +	e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); +	max += e->ec; + +	p = root->rb_node; +	while (p) { +		struct ubi_wl_entry *e1; + +		e1 = rb_entry(p, struct ubi_wl_entry, rb); +		if (e1->ec >= max) +			p = p->rb_left; +		else { +			p = p->rb_right; +			e = e1; +		} +	} + +	return e; +} + +/** + * ubi_wl_get_peb - get a physical eraseblock. + * @ubi: UBI device description object + * @dtype: type of data which will be stored in this physical eraseblock + * + * This function returns a physical eraseblock in case of success and a + * negative error code in case of failure. Might sleep. + */ +int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) +{ +	int err, protect, medium_ec; +	struct ubi_wl_entry *e, *first, *last; +	struct ubi_wl_prot_entry *pe; + +	ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || +		   dtype == UBI_UNKNOWN); + +	pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); +	if (!pe) +		return -ENOMEM; + +retry: +	spin_lock(&ubi->wl_lock); +	if (!ubi->free.rb_node) { +		if (ubi->works_count == 0) { +			ubi_assert(list_empty(&ubi->works)); +			ubi_err("no free eraseblocks"); +			spin_unlock(&ubi->wl_lock); +			kfree(pe); +			return -ENOSPC; +		} +		spin_unlock(&ubi->wl_lock); + +		err = produce_free_peb(ubi); +		if (err < 0) { +			kfree(pe); +			return err; +		} +		goto retry; +	} + +	switch (dtype) { +		case UBI_LONGTERM: +			/* +			 * For long term data we pick a physical eraseblock +			 * with high erase counter. But the highest erase +			 * counter we can pick is bounded by the the lowest +			 * erase counter plus %WL_FREE_MAX_DIFF. +			 */ +			e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); +			protect = LT_PROTECTION; +			break; +		case UBI_UNKNOWN: +			/* +			 * For unknown data we pick a physical eraseblock with +			 * medium erase counter. But we by no means can pick a +			 * physical eraseblock with erase counter greater or +			 * equivalent than the lowest erase counter plus +			 * %WL_FREE_MAX_DIFF. +			 */ +			first = rb_entry(rb_first(&ubi->free), +					 struct ubi_wl_entry, rb); +			last = rb_entry(rb_last(&ubi->free), +					struct ubi_wl_entry, rb); + +			if (last->ec - first->ec < WL_FREE_MAX_DIFF) +				e = rb_entry(ubi->free.rb_node, +						struct ubi_wl_entry, rb); +			else { +				medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; +				e = find_wl_entry(&ubi->free, medium_ec); +			} +			protect = U_PROTECTION; +			break; +		case UBI_SHORTTERM: +			/* +			 * For short term data we pick a physical eraseblock +			 * with the lowest erase counter as we expect it will +			 * be erased soon. +			 */ +			e = rb_entry(rb_first(&ubi->free), +				     struct ubi_wl_entry, rb); +			protect = ST_PROTECTION; +			break; +		default: +			protect = 0; +			e = NULL; +			BUG(); +	} + +	/* +	 * Move the physical eraseblock to the protection trees where it will +	 * be protected from being moved for some time. +	 */ +	paranoid_check_in_wl_tree(e, &ubi->free); +	rb_erase(&e->rb, &ubi->free); +	prot_tree_add(ubi, e, pe, protect); + +	dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect); +	spin_unlock(&ubi->wl_lock); + +	return e->pnum; +} + +/** + * prot_tree_del - remove a physical eraseblock from the protection trees + * @ubi: UBI device description object + * @pnum: the physical eraseblock to remove + * + * This function returns PEB @pnum from the protection trees and returns zero + * in case of success and %-ENODEV if the PEB was not found in the protection + * trees. + */ +static int prot_tree_del(struct ubi_device *ubi, int pnum) +{ +	struct rb_node *p; +	struct ubi_wl_prot_entry *pe = NULL; + +	p = ubi->prot.pnum.rb_node; +	while (p) { + +		pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum); + +		if (pnum == pe->e->pnum) +			goto found; + +		if (pnum < pe->e->pnum) +			p = p->rb_left; +		else +			p = p->rb_right; +	} + +	return -ENODEV; + +found: +	ubi_assert(pe->e->pnum == pnum); +	rb_erase(&pe->rb_aec, &ubi->prot.aec); +	rb_erase(&pe->rb_pnum, &ubi->prot.pnum); +	kfree(pe); +	return 0; +} + +/** + * sync_erase - synchronously erase a physical eraseblock. + * @ubi: UBI device description object + * @e: the the physical eraseblock to erase + * @torture: if the physical eraseblock has to be tortured + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) +{ +	int err; +	struct ubi_ec_hdr *ec_hdr; +	unsigned long long ec = e->ec; + +	dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); + +	err = paranoid_check_ec(ubi, e->pnum, e->ec); +	if (err > 0) +		return -EINVAL; + +	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); +	if (!ec_hdr) +		return -ENOMEM; + +	err = ubi_io_sync_erase(ubi, e->pnum, torture); +	if (err < 0) +		goto out_free; + +	ec += err; +	if (ec > UBI_MAX_ERASECOUNTER) { +		/* +		 * Erase counter overflow. Upgrade UBI and use 64-bit +		 * erase counters internally. +		 */ +		ubi_err("erase counter overflow at PEB %d, EC %llu", +			e->pnum, ec); +		err = -EINVAL; +		goto out_free; +	} + +	dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); + +	ec_hdr->ec = cpu_to_be64(ec); + +	err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); +	if (err) +		goto out_free; + +	e->ec = ec; +	spin_lock(&ubi->wl_lock); +	if (e->ec > ubi->max_ec) +		ubi->max_ec = e->ec; +	spin_unlock(&ubi->wl_lock); + +out_free: +	kfree(ec_hdr); +	return err; +} + +/** + * check_protection_over - check if it is time to stop protecting some + * physical eraseblocks. + * @ubi: UBI device description object + * + * This function is called after each erase operation, when the absolute erase + * counter is incremented, to check if some physical eraseblock  have not to be + * protected any longer. These physical eraseblocks are moved from the + * protection trees to the used tree. + */ +static void check_protection_over(struct ubi_device *ubi) +{ +	struct ubi_wl_prot_entry *pe; + +	/* +	 * There may be several protected physical eraseblock to remove, +	 * process them all. +	 */ +	while (1) { +		spin_lock(&ubi->wl_lock); +		if (!ubi->prot.aec.rb_node) { +			spin_unlock(&ubi->wl_lock); +			break; +		} + +		pe = rb_entry(rb_first(&ubi->prot.aec), +			      struct ubi_wl_prot_entry, rb_aec); + +		if (pe->abs_ec > ubi->abs_ec) { +			spin_unlock(&ubi->wl_lock); +			break; +		} + +		dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu", +		       pe->e->pnum, ubi->abs_ec, pe->abs_ec); +		rb_erase(&pe->rb_aec, &ubi->prot.aec); +		rb_erase(&pe->rb_pnum, &ubi->prot.pnum); +		wl_tree_add(pe->e, &ubi->used); +		spin_unlock(&ubi->wl_lock); + +		kfree(pe); +		cond_resched(); +	} +} + +/** + * schedule_ubi_work - schedule a work. + * @ubi: UBI device description object + * @wrk: the work to schedule + * + * This function enqueues a work defined by @wrk to the tail of the pending + * works list. + */ +static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) +{ +	spin_lock(&ubi->wl_lock); +	list_add_tail(&wrk->list, &ubi->works); +	ubi_assert(ubi->works_count >= 0); +	ubi->works_count += 1; + +	/* +	 * U-Boot special: We have no bgt_thread in U-Boot! +	 * So just call do_work() here directly. +	 */ +	do_work(ubi); + +	spin_unlock(&ubi->wl_lock); +} + +static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, +			int cancel); + +/** + * schedule_erase - schedule an erase work. + * @ubi: UBI device description object + * @e: the WL entry of the physical eraseblock to erase + * @torture: if the physical eraseblock has to be tortured + * + * This function returns zero in case of success and a %-ENOMEM in case of + * failure. + */ +static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, +			  int torture) +{ +	struct ubi_work *wl_wrk; + +	dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", +	       e->pnum, e->ec, torture); + +	wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); +	if (!wl_wrk) +		return -ENOMEM; + +	wl_wrk->func = &erase_worker; +	wl_wrk->e = e; +	wl_wrk->torture = torture; + +	schedule_ubi_work(ubi, wl_wrk); +	return 0; +} + +/** + * wear_leveling_worker - wear-leveling worker function. + * @ubi: UBI device description object + * @wrk: the work object + * @cancel: non-zero if the worker has to free memory and exit + * + * This function copies a more worn out physical eraseblock to a less worn out + * one. Returns zero in case of success and a negative error code in case of + * failure. + */ +static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, +				int cancel) +{ +	int err, put = 0, scrubbing = 0, protect = 0; +	struct ubi_wl_prot_entry *uninitialized_var(pe); +	struct ubi_wl_entry *e1, *e2; +	struct ubi_vid_hdr *vid_hdr; + +	kfree(wrk); + +	if (cancel) +		return 0; + +	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); +	if (!vid_hdr) +		return -ENOMEM; + +	mutex_lock(&ubi->move_mutex); +	spin_lock(&ubi->wl_lock); +	ubi_assert(!ubi->move_from && !ubi->move_to); +	ubi_assert(!ubi->move_to_put); + +	if (!ubi->free.rb_node || +	    (!ubi->used.rb_node && !ubi->scrub.rb_node)) { +		/* +		 * No free physical eraseblocks? Well, they must be waiting in +		 * the queue to be erased. Cancel movement - it will be +		 * triggered again when a free physical eraseblock appears. +		 * +		 * No used physical eraseblocks? They must be temporarily +		 * protected from being moved. They will be moved to the +		 * @ubi->used tree later and the wear-leveling will be +		 * triggered again. +		 */ +		dbg_wl("cancel WL, a list is empty: free %d, used %d", +		       !ubi->free.rb_node, !ubi->used.rb_node); +		goto out_cancel; +	} + +	if (!ubi->scrub.rb_node) { +		/* +		 * Now pick the least worn-out used physical eraseblock and a +		 * highly worn-out free physical eraseblock. If the erase +		 * counters differ much enough, start wear-leveling. +		 */ +		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); +		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); + +		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { +			dbg_wl("no WL needed: min used EC %d, max free EC %d", +			       e1->ec, e2->ec); +			goto out_cancel; +		} +		paranoid_check_in_wl_tree(e1, &ubi->used); +		rb_erase(&e1->rb, &ubi->used); +		dbg_wl("move PEB %d EC %d to PEB %d EC %d", +		       e1->pnum, e1->ec, e2->pnum, e2->ec); +	} else { +		/* Perform scrubbing */ +		scrubbing = 1; +		e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); +		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); +		paranoid_check_in_wl_tree(e1, &ubi->scrub); +		rb_erase(&e1->rb, &ubi->scrub); +		dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); +	} + +	paranoid_check_in_wl_tree(e2, &ubi->free); +	rb_erase(&e2->rb, &ubi->free); +	ubi->move_from = e1; +	ubi->move_to = e2; +	spin_unlock(&ubi->wl_lock); + +	/* +	 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum. +	 * We so far do not know which logical eraseblock our physical +	 * eraseblock (@e1) belongs to. We have to read the volume identifier +	 * header first. +	 * +	 * Note, we are protected from this PEB being unmapped and erased. The +	 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB +	 * which is being moved was unmapped. +	 */ + +	err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); +	if (err && err != UBI_IO_BITFLIPS) { +		if (err == UBI_IO_PEB_FREE) { +			/* +			 * We are trying to move PEB without a VID header. UBI +			 * always write VID headers shortly after the PEB was +			 * given, so we have a situation when it did not have +			 * chance to write it down because it was preempted. +			 * Just re-schedule the work, so that next time it will +			 * likely have the VID header in place. +			 */ +			dbg_wl("PEB %d has no VID header", e1->pnum); +			goto out_not_moved; +		} + +		ubi_err("error %d while reading VID header from PEB %d", +			err, e1->pnum); +		if (err > 0) +			err = -EIO; +		goto out_error; +	} + +	err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); +	if (err) { + +		if (err < 0) +			goto out_error; +		if (err == 1) +			goto out_not_moved; + +		/* +		 * For some reason the LEB was not moved - it might be because +		 * the volume is being deleted. We should prevent this PEB from +		 * being selected for wear-levelling movement for some "time", +		 * so put it to the protection tree. +		 */ + +		dbg_wl("cancelled moving PEB %d", e1->pnum); +		pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); +		if (!pe) { +			err = -ENOMEM; +			goto out_error; +		} + +		protect = 1; +	} + +	ubi_free_vid_hdr(ubi, vid_hdr); +	spin_lock(&ubi->wl_lock); +	if (protect) +		prot_tree_add(ubi, e1, pe, protect); +	if (!ubi->move_to_put) +		wl_tree_add(e2, &ubi->used); +	else +		put = 1; +	ubi->move_from = ubi->move_to = NULL; +	ubi->move_to_put = ubi->wl_scheduled = 0; +	spin_unlock(&ubi->wl_lock); + +	if (put) { +		/* +		 * Well, the target PEB was put meanwhile, schedule it for +		 * erasure. +		 */ +		dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); +		err = schedule_erase(ubi, e2, 0); +		if (err) +			goto out_error; +	} + +	if (!protect) { +		err = schedule_erase(ubi, e1, 0); +		if (err) +			goto out_error; +	} + + +	dbg_wl("done"); +	mutex_unlock(&ubi->move_mutex); +	return 0; + +	/* +	 * For some reasons the LEB was not moved, might be an error, might be +	 * something else. @e1 was not changed, so return it back. @e2 might +	 * be changed, schedule it for erasure. +	 */ +out_not_moved: +	ubi_free_vid_hdr(ubi, vid_hdr); +	spin_lock(&ubi->wl_lock); +	if (scrubbing) +		wl_tree_add(e1, &ubi->scrub); +	else +		wl_tree_add(e1, &ubi->used); +	ubi->move_from = ubi->move_to = NULL; +	ubi->move_to_put = ubi->wl_scheduled = 0; +	spin_unlock(&ubi->wl_lock); + +	err = schedule_erase(ubi, e2, 0); +	if (err) +		goto out_error; + +	mutex_unlock(&ubi->move_mutex); +	return 0; + +out_error: +	ubi_err("error %d while moving PEB %d to PEB %d", +		err, e1->pnum, e2->pnum); + +	ubi_free_vid_hdr(ubi, vid_hdr); +	spin_lock(&ubi->wl_lock); +	ubi->move_from = ubi->move_to = NULL; +	ubi->move_to_put = ubi->wl_scheduled = 0; +	spin_unlock(&ubi->wl_lock); + +	kmem_cache_free(ubi_wl_entry_slab, e1); +	kmem_cache_free(ubi_wl_entry_slab, e2); +	ubi_ro_mode(ubi); + +	mutex_unlock(&ubi->move_mutex); +	return err; + +out_cancel: +	ubi->wl_scheduled = 0; +	spin_unlock(&ubi->wl_lock); +	mutex_unlock(&ubi->move_mutex); +	ubi_free_vid_hdr(ubi, vid_hdr); +	return 0; +} + +/** + * ensure_wear_leveling - schedule wear-leveling if it is needed. + * @ubi: UBI device description object + * + * This function checks if it is time to start wear-leveling and schedules it + * if yes. This function returns zero in case of success and a negative error + * code in case of failure. + */ +static int ensure_wear_leveling(struct ubi_device *ubi) +{ +	int err = 0; +	struct ubi_wl_entry *e1; +	struct ubi_wl_entry *e2; +	struct ubi_work *wrk; + +	spin_lock(&ubi->wl_lock); +	if (ubi->wl_scheduled) +		/* Wear-leveling is already in the work queue */ +		goto out_unlock; + +	/* +	 * If the ubi->scrub tree is not empty, scrubbing is needed, and the +	 * the WL worker has to be scheduled anyway. +	 */ +	if (!ubi->scrub.rb_node) { +		if (!ubi->used.rb_node || !ubi->free.rb_node) +			/* No physical eraseblocks - no deal */ +			goto out_unlock; + +		/* +		 * We schedule wear-leveling only if the difference between the +		 * lowest erase counter of used physical eraseblocks and a high +		 * erase counter of free physical eraseblocks is greater then +		 * %UBI_WL_THRESHOLD. +		 */ +		e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); +		e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); + +		if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) +			goto out_unlock; +		dbg_wl("schedule wear-leveling"); +	} else +		dbg_wl("schedule scrubbing"); + +	ubi->wl_scheduled = 1; +	spin_unlock(&ubi->wl_lock); + +	wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); +	if (!wrk) { +		err = -ENOMEM; +		goto out_cancel; +	} + +	wrk->func = &wear_leveling_worker; +	schedule_ubi_work(ubi, wrk); +	return err; + +out_cancel: +	spin_lock(&ubi->wl_lock); +	ubi->wl_scheduled = 0; +out_unlock: +	spin_unlock(&ubi->wl_lock); +	return err; +} + +/** + * erase_worker - physical eraseblock erase worker function. + * @ubi: UBI device description object + * @wl_wrk: the work object + * @cancel: non-zero if the worker has to free memory and exit + * + * This function erases a physical eraseblock and perform torture testing if + * needed. It also takes care about marking the physical eraseblock bad if + * needed. Returns zero in case of success and a negative error code in case of + * failure. + */ +static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, +			int cancel) +{ +	struct ubi_wl_entry *e = wl_wrk->e; +	int pnum = e->pnum, err, need; + +	if (cancel) { +		dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); +		kfree(wl_wrk); +		kmem_cache_free(ubi_wl_entry_slab, e); +		return 0; +	} + +	dbg_wl("erase PEB %d EC %d", pnum, e->ec); + +	err = sync_erase(ubi, e, wl_wrk->torture); +	if (!err) { +		/* Fine, we've erased it successfully */ +		kfree(wl_wrk); + +		spin_lock(&ubi->wl_lock); +		ubi->abs_ec += 1; +		wl_tree_add(e, &ubi->free); +		spin_unlock(&ubi->wl_lock); + +		/* +		 * One more erase operation has happened, take care about protected +		 * physical eraseblocks. +		 */ +		check_protection_over(ubi); + +		/* And take care about wear-leveling */ +		err = ensure_wear_leveling(ubi); +		return err; +	} + +	ubi_err("failed to erase PEB %d, error %d", pnum, err); +	kfree(wl_wrk); +	kmem_cache_free(ubi_wl_entry_slab, e); + +	if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || +	    err == -EBUSY) { +		int err1; + +		/* Re-schedule the LEB for erasure */ +		err1 = schedule_erase(ubi, e, 0); +		if (err1) { +			err = err1; +			goto out_ro; +		} +		return err; +	} else if (err != -EIO) { +		/* +		 * If this is not %-EIO, we have no idea what to do. Scheduling +		 * this physical eraseblock for erasure again would cause +		 * errors again and again. Well, lets switch to RO mode. +		 */ +		goto out_ro; +	} + +	/* It is %-EIO, the PEB went bad */ + +	if (!ubi->bad_allowed) { +		ubi_err("bad physical eraseblock %d detected", pnum); +		goto out_ro; +	} + +	spin_lock(&ubi->volumes_lock); +	need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; +	if (need > 0) { +		need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; +		ubi->avail_pebs -= need; +		ubi->rsvd_pebs += need; +		ubi->beb_rsvd_pebs += need; +		if (need > 0) +			ubi_msg("reserve more %d PEBs", need); +	} + +	if (ubi->beb_rsvd_pebs == 0) { +		spin_unlock(&ubi->volumes_lock); +		ubi_err("no reserved physical eraseblocks"); +		goto out_ro; +	} + +	spin_unlock(&ubi->volumes_lock); +	ubi_msg("mark PEB %d as bad", pnum); + +	err = ubi_io_mark_bad(ubi, pnum); +	if (err) +		goto out_ro; + +	spin_lock(&ubi->volumes_lock); +	ubi->beb_rsvd_pebs -= 1; +	ubi->bad_peb_count += 1; +	ubi->good_peb_count -= 1; +	ubi_calculate_reserved(ubi); +	if (ubi->beb_rsvd_pebs == 0) +		ubi_warn("last PEB from the reserved pool was used"); +	spin_unlock(&ubi->volumes_lock); + +	return err; + +out_ro: +	ubi_ro_mode(ubi); +	return err; +} + +/** + * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. + * @ubi: UBI device description object + * @pnum: physical eraseblock to return + * @torture: if this physical eraseblock has to be tortured + * + * This function is called to return physical eraseblock @pnum to the pool of + * free physical eraseblocks. The @torture flag has to be set if an I/O error + * occurred to this @pnum and it has to be tested. This function returns zero + * in case of success, and a negative error code in case of failure. + */ +int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) +{ +	int err; +	struct ubi_wl_entry *e; + +	dbg_wl("PEB %d", pnum); +	ubi_assert(pnum >= 0); +	ubi_assert(pnum < ubi->peb_count); + +retry: +	spin_lock(&ubi->wl_lock); +	e = ubi->lookuptbl[pnum]; +	if (e == ubi->move_from) { +		/* +		 * User is putting the physical eraseblock which was selected to +		 * be moved. It will be scheduled for erasure in the +		 * wear-leveling worker. +		 */ +		dbg_wl("PEB %d is being moved, wait", pnum); +		spin_unlock(&ubi->wl_lock); + +		/* Wait for the WL worker by taking the @ubi->move_mutex */ +		mutex_lock(&ubi->move_mutex); +		mutex_unlock(&ubi->move_mutex); +		goto retry; +	} else if (e == ubi->move_to) { +		/* +		 * User is putting the physical eraseblock which was selected +		 * as the target the data is moved to. It may happen if the EBA +		 * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but +		 * the WL unit has not put the PEB to the "used" tree yet, but +		 * it is about to do this. So we just set a flag which will +		 * tell the WL worker that the PEB is not needed anymore and +		 * should be scheduled for erasure. +		 */ +		dbg_wl("PEB %d is the target of data moving", pnum); +		ubi_assert(!ubi->move_to_put); +		ubi->move_to_put = 1; +		spin_unlock(&ubi->wl_lock); +		return 0; +	} else { +		if (in_wl_tree(e, &ubi->used)) { +			paranoid_check_in_wl_tree(e, &ubi->used); +			rb_erase(&e->rb, &ubi->used); +		} else if (in_wl_tree(e, &ubi->scrub)) { +			paranoid_check_in_wl_tree(e, &ubi->scrub); +			rb_erase(&e->rb, &ubi->scrub); +		} else { +			err = prot_tree_del(ubi, e->pnum); +			if (err) { +				ubi_err("PEB %d not found", pnum); +				ubi_ro_mode(ubi); +				spin_unlock(&ubi->wl_lock); +				return err; +			} +		} +	} +	spin_unlock(&ubi->wl_lock); + +	err = schedule_erase(ubi, e, torture); +	if (err) { +		spin_lock(&ubi->wl_lock); +		wl_tree_add(e, &ubi->used); +		spin_unlock(&ubi->wl_lock); +	} + +	return err; +} + +/** + * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing. + * @ubi: UBI device description object + * @pnum: the physical eraseblock to schedule + * + * If a bit-flip in a physical eraseblock is detected, this physical eraseblock + * needs scrubbing. This function schedules a physical eraseblock for + * scrubbing which is done in background. This function returns zero in case of + * success and a negative error code in case of failure. + */ +int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) +{ +	struct ubi_wl_entry *e; + +	ubi_msg("schedule PEB %d for scrubbing", pnum); + +retry: +	spin_lock(&ubi->wl_lock); +	e = ubi->lookuptbl[pnum]; +	if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) { +		spin_unlock(&ubi->wl_lock); +		return 0; +	} + +	if (e == ubi->move_to) { +		/* +		 * This physical eraseblock was used to move data to. The data +		 * was moved but the PEB was not yet inserted to the proper +		 * tree. We should just wait a little and let the WL worker +		 * proceed. +		 */ +		spin_unlock(&ubi->wl_lock); +		dbg_wl("the PEB %d is not in proper tree, retry", pnum); +		yield(); +		goto retry; +	} + +	if (in_wl_tree(e, &ubi->used)) { +		paranoid_check_in_wl_tree(e, &ubi->used); +		rb_erase(&e->rb, &ubi->used); +	} else { +		int err; + +		err = prot_tree_del(ubi, e->pnum); +		if (err) { +			ubi_err("PEB %d not found", pnum); +			ubi_ro_mode(ubi); +			spin_unlock(&ubi->wl_lock); +			return err; +		} +	} + +	wl_tree_add(e, &ubi->scrub); +	spin_unlock(&ubi->wl_lock); + +	/* +	 * Technically scrubbing is the same as wear-leveling, so it is done +	 * by the WL worker. +	 */ +	return ensure_wear_leveling(ubi); +} + +/** + * ubi_wl_flush - flush all pending works. + * @ubi: UBI device description object + * + * This function returns zero in case of success and a negative error code in + * case of failure. + */ +int ubi_wl_flush(struct ubi_device *ubi) +{ +	int err; + +	/* +	 * Erase while the pending works queue is not empty, but not more then +	 * the number of currently pending works. +	 */ +	dbg_wl("flush (%d pending works)", ubi->works_count); +	while (ubi->works_count) { +		err = do_work(ubi); +		if (err) +			return err; +	} + +	/* +	 * Make sure all the works which have been done in parallel are +	 * finished. +	 */ +	down_write(&ubi->work_sem); +	up_write(&ubi->work_sem); + +	/* +	 * And in case last was the WL worker and it cancelled the LEB +	 * movement, flush again. +	 */ +	while (ubi->works_count) { +		dbg_wl("flush more (%d pending works)", ubi->works_count); +		err = do_work(ubi); +		if (err) +			return err; +	} + +	return 0; +} + +/** + * tree_destroy - destroy an RB-tree. + * @root: the root of the tree to destroy + */ +static void tree_destroy(struct rb_root *root) +{ +	struct rb_node *rb; +	struct ubi_wl_entry *e; + +	rb = root->rb_node; +	while (rb) { +		if (rb->rb_left) +			rb = rb->rb_left; +		else if (rb->rb_right) +			rb = rb->rb_right; +		else { +			e = rb_entry(rb, struct ubi_wl_entry, rb); + +			rb = rb_parent(rb); +			if (rb) { +				if (rb->rb_left == &e->rb) +					rb->rb_left = NULL; +				else +					rb->rb_right = NULL; +			} + +			kmem_cache_free(ubi_wl_entry_slab, e); +		} +	} +} + +/** + * ubi_thread - UBI background thread. + * @u: the UBI device description object pointer + */ +int ubi_thread(void *u) +{ +	int failures = 0; +	struct ubi_device *ubi = u; + +	ubi_msg("background thread \"%s\" started, PID %d", +		ubi->bgt_name, task_pid_nr(current)); + +	set_freezable(); +	for (;;) { +		int err; + +		if (kthread_should_stop()) +			break; + +		if (try_to_freeze()) +			continue; + +		spin_lock(&ubi->wl_lock); +		if (list_empty(&ubi->works) || ubi->ro_mode || +			       !ubi->thread_enabled) { +			set_current_state(TASK_INTERRUPTIBLE); +			spin_unlock(&ubi->wl_lock); +			schedule(); +			continue; +		} +		spin_unlock(&ubi->wl_lock); + +		err = do_work(ubi); +		if (err) { +			ubi_err("%s: work failed with error code %d", +				ubi->bgt_name, err); +			if (failures++ > WL_MAX_FAILURES) { +				/* +				 * Too many failures, disable the thread and +				 * switch to read-only mode. +				 */ +				ubi_msg("%s: %d consecutive failures", +					ubi->bgt_name, WL_MAX_FAILURES); +				ubi_ro_mode(ubi); +				break; +			} +		} else +			failures = 0; + +		cond_resched(); +	} + +	dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); +	return 0; +} + +/** + * cancel_pending - cancel all pending works. + * @ubi: UBI device description object + */ +static void cancel_pending(struct ubi_device *ubi) +{ +	while (!list_empty(&ubi->works)) { +		struct ubi_work *wrk; + +		wrk = list_entry(ubi->works.next, struct ubi_work, list); +		list_del(&wrk->list); +		wrk->func(ubi, wrk, 1); +		ubi->works_count -= 1; +		ubi_assert(ubi->works_count >= 0); +	} +} + +/** + * ubi_wl_init_scan - initialize the wear-leveling unit using scanning + * information. + * @ubi: UBI device description object + * @si: scanning information + * + * This function returns zero in case of success, and a negative error code in + * case of failure. + */ +int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) +{ +	int err; +	struct rb_node *rb1, *rb2; +	struct ubi_scan_volume *sv; +	struct ubi_scan_leb *seb, *tmp; +	struct ubi_wl_entry *e; + + +	ubi->used = ubi->free = ubi->scrub = RB_ROOT; +	ubi->prot.pnum = ubi->prot.aec = RB_ROOT; +	spin_lock_init(&ubi->wl_lock); +	mutex_init(&ubi->move_mutex); +	init_rwsem(&ubi->work_sem); +	ubi->max_ec = si->max_ec; +	INIT_LIST_HEAD(&ubi->works); + +	sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); + +	err = -ENOMEM; +	ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL); +	if (!ubi->lookuptbl) +		return err; + +	list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { +		cond_resched(); + +		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); +		if (!e) +			goto out_free; + +		e->pnum = seb->pnum; +		e->ec = seb->ec; +		ubi->lookuptbl[e->pnum] = e; +		if (schedule_erase(ubi, e, 0)) { +			kmem_cache_free(ubi_wl_entry_slab, e); +			goto out_free; +		} +	} + +	list_for_each_entry(seb, &si->free, u.list) { +		cond_resched(); + +		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); +		if (!e) +			goto out_free; + +		e->pnum = seb->pnum; +		e->ec = seb->ec; +		ubi_assert(e->ec >= 0); +		wl_tree_add(e, &ubi->free); +		ubi->lookuptbl[e->pnum] = e; +	} + +	list_for_each_entry(seb, &si->corr, u.list) { +		cond_resched(); + +		e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); +		if (!e) +			goto out_free; + +		e->pnum = seb->pnum; +		e->ec = seb->ec; +		ubi->lookuptbl[e->pnum] = e; +		if (schedule_erase(ubi, e, 0)) { +			kmem_cache_free(ubi_wl_entry_slab, e); +			goto out_free; +		} +	} + +	ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { +		ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { +			cond_resched(); + +			e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); +			if (!e) +				goto out_free; + +			e->pnum = seb->pnum; +			e->ec = seb->ec; +			ubi->lookuptbl[e->pnum] = e; +			if (!seb->scrub) { +				dbg_wl("add PEB %d EC %d to the used tree", +				       e->pnum, e->ec); +				wl_tree_add(e, &ubi->used); +			} else { +				dbg_wl("add PEB %d EC %d to the scrub tree", +				       e->pnum, e->ec); +				wl_tree_add(e, &ubi->scrub); +			} +		} +	} + +	if (ubi->avail_pebs < WL_RESERVED_PEBS) { +		ubi_err("no enough physical eraseblocks (%d, need %d)", +			ubi->avail_pebs, WL_RESERVED_PEBS); +		err = -ENOSPC; +		goto out_free; +	} +	ubi->avail_pebs -= WL_RESERVED_PEBS; +	ubi->rsvd_pebs += WL_RESERVED_PEBS; + +	/* Schedule wear-leveling if needed */ +	err = ensure_wear_leveling(ubi); +	if (err) +		goto out_free; + +	return 0; + +out_free: +	cancel_pending(ubi); +	tree_destroy(&ubi->used); +	tree_destroy(&ubi->free); +	tree_destroy(&ubi->scrub); +	kfree(ubi->lookuptbl); +	return err; +} + +/** + * protection_trees_destroy - destroy the protection RB-trees. + * @ubi: UBI device description object + */ +static void protection_trees_destroy(struct ubi_device *ubi) +{ +	struct rb_node *rb; +	struct ubi_wl_prot_entry *pe; + +	rb = ubi->prot.aec.rb_node; +	while (rb) { +		if (rb->rb_left) +			rb = rb->rb_left; +		else if (rb->rb_right) +			rb = rb->rb_right; +		else { +			pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec); + +			rb = rb_parent(rb); +			if (rb) { +				if (rb->rb_left == &pe->rb_aec) +					rb->rb_left = NULL; +				else +					rb->rb_right = NULL; +			} + +			kmem_cache_free(ubi_wl_entry_slab, pe->e); +			kfree(pe); +		} +	} +} + +/** + * ubi_wl_close - close the wear-leveling unit. + * @ubi: UBI device description object + */ +void ubi_wl_close(struct ubi_device *ubi) +{ +	dbg_wl("close the UBI wear-leveling unit"); + +	cancel_pending(ubi); +	protection_trees_destroy(ubi); +	tree_destroy(&ubi->used); +	tree_destroy(&ubi->free); +	tree_destroy(&ubi->scrub); +	kfree(ubi->lookuptbl); +} + +#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID + +/** + * paranoid_check_ec - make sure that the erase counter of a physical eraseblock + * is correct. + * @ubi: UBI device description object + * @pnum: the physical eraseblock number to check + * @ec: the erase counter to check + * + * This function returns zero if the erase counter of physical eraseblock @pnum + * is equivalent to @ec, %1 if not, and a negative error code if an error + * occurred. + */ +static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) +{ +	int err; +	long long read_ec; +	struct ubi_ec_hdr *ec_hdr; + +	ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); +	if (!ec_hdr) +		return -ENOMEM; + +	err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0); +	if (err && err != UBI_IO_BITFLIPS) { +		/* The header does not have to exist */ +		err = 0; +		goto out_free; +	} + +	read_ec = be64_to_cpu(ec_hdr->ec); +	if (ec != read_ec) { +		ubi_err("paranoid check failed for PEB %d", pnum); +		ubi_err("read EC is %lld, should be %d", read_ec, ec); +		ubi_dbg_dump_stack(); +		err = 1; +	} else +		err = 0; + +out_free: +	kfree(ec_hdr); +	return err; +} + +/** + * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present + * in a WL RB-tree. + * @e: the wear-leveling entry to check + * @root: the root of the tree + * + * This function returns zero if @e is in the @root RB-tree and %1 if it + * is not. + */ +static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, +				     struct rb_root *root) +{ +	if (in_wl_tree(e, root)) +		return 0; + +	ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ", +		e->pnum, e->ec, root); +	ubi_dbg_dump_stack(); +	return 1; +} + +#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */  | 
