Commit 16271224 authored by Richard Weinberger's avatar Richard Weinberger

Merge tag 'nand/for-4.15' of git://git.infradead.org/l2-mtd

From Boris:
"
Core changes:
* Add a flag to mark NANDs that require 3 address cycles to encode a
  page address
* Set a default ECC/free layout when NAND_ECC_NONE is requested
* Fix a bug in panic_nand_write()

Driver changes:
* Another batch of cleanups for the denali driver
* Fix PM support in the atmel driver
* Remove support for platform data in the omap driver
* Fix subpage write in the omap driver
* Fix irq handling in the mtk driver
* Change link order of mtk_ecc and mtk_nand drivers to speed up boot
  time
* Change log level of ECC error messages in the mxc driver
* Patch the pxa3xx driver to support Armada 8k platforms
* Add BAM DMA support to the qcom driver
* Convert gpio-nand to the GPIO desc API
* Fix ECC handling in the mt29f driver
"
parents 20b2fc79 30863e38
...@@ -29,7 +29,7 @@ nand: nand@ff900000 { ...@@ -29,7 +29,7 @@ nand: nand@ff900000 {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <1>; #size-cells = <1>;
compatible = "altr,socfpga-denali-nand"; compatible = "altr,socfpga-denali-nand";
reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; reg = <0xff900000 0x20>, <0xffb80000 0x1000>;
reg-names = "nand_data", "denali_reg"; reg-names = "nand_data", "denali_reg";
interrupts = <0 144 4>; interrupts = <0 144 4>;
}; };
...@@ -5,9 +5,13 @@ Required properties: ...@@ -5,9 +5,13 @@ Required properties:
- compatible: Should be set to one of the following: - compatible: Should be set to one of the following:
marvell,pxa3xx-nand marvell,pxa3xx-nand
marvell,armada370-nand marvell,armada370-nand
marvell,armada-8k-nand
- reg: The register base for the controller - reg: The register base for the controller
- interrupts: The interrupt to map - interrupts: The interrupt to map
- #address-cells: Set to <1> if the node includes partitions - #address-cells: Set to <1> if the node includes partitions
- marvell,system-controller: Set to retrieve the syscon node that handles
NAND controller related registers (only required
with marvell,armada-8k-nand compatible).
Optional properties: Optional properties:
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/mtd/nand-gpio.h> #include <linux/mtd/nand-gpio.h>
#include <linux/gpio/machine.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
#include <linux/spi/pxa2xx_spi.h> #include <linux/spi/pxa2xx_spi.h>
...@@ -176,6 +176,17 @@ static inline void cmx255_init_nor(void) {} ...@@ -176,6 +176,17 @@ static inline void cmx255_init_nor(void) {}
#endif #endif
#if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE)
static struct gpiod_lookup_table cmx255_nand_gpiod_table = {
.dev_id = "gpio-nand",
.table = {
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH),
},
};
static struct resource cmx255_nand_resource[] = { static struct resource cmx255_nand_resource[] = {
[0] = { [0] = {
.start = PXA_CS1_PHYS, .start = PXA_CS1_PHYS,
...@@ -198,11 +209,6 @@ static struct mtd_partition cmx255_nand_parts[] = { ...@@ -198,11 +209,6 @@ static struct mtd_partition cmx255_nand_parts[] = {
}; };
static struct gpio_nand_platdata cmx255_nand_platdata = { static struct gpio_nand_platdata cmx255_nand_platdata = {
.gpio_nce = GPIO_NAND_CS,
.gpio_cle = GPIO_NAND_CLE,
.gpio_ale = GPIO_NAND_ALE,
.gpio_rdy = GPIO_NAND_RB,
.gpio_nwp = -1,
.parts = cmx255_nand_parts, .parts = cmx255_nand_parts,
.num_parts = ARRAY_SIZE(cmx255_nand_parts), .num_parts = ARRAY_SIZE(cmx255_nand_parts),
.chip_delay = 25, .chip_delay = 25,
...@@ -220,6 +226,7 @@ static struct platform_device cmx255_nand = { ...@@ -220,6 +226,7 @@ static struct platform_device cmx255_nand = {
static void __init cmx255_init_nand(void) static void __init cmx255_init_nand(void)
{ {
gpiod_add_lookup_table(&cmx255_nand_gpiod_table);
platform_device_register(&cmx255_nand); platform_device_register(&cmx255_nand);
} }
#else #else
......
...@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx ...@@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx
tristate "NAND support on PXA3xx and Armada 370/XP" tristate "NAND support on PXA3xx and Armada 370/XP"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU
help help
This enables the driver for the NAND flash device found on This enables the driver for the NAND flash device found on
PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). PXA3xx processors (NFCv1) and also on 32-bit Armada
platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada
platforms (7K, 8K) (NFCv2).
config MTD_NAND_SLC_LPC32XX config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC Controller" tristate "NXP LPC32xx SLC Controller"
......
...@@ -58,7 +58,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o ...@@ -58,7 +58,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o
obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o
obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/
obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o
obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o
nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o
nand-objs += nand_amd.o nand-objs += nand_amd.o
......
...@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column) ...@@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
nc->op.addrs[nc->op.naddrs++] = page; nc->op.addrs[nc->op.naddrs++] = page;
nc->op.addrs[nc->op.naddrs++] = page >> 8; nc->op.addrs[nc->op.naddrs++] = page >> 8;
if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) || if (chip->options & NAND_ROW_ADDR_3)
(mtd->writesize <= 512 && chip->chipsize > SZ_32M))
nc->op.addrs[nc->op.naddrs++] = page >> 16; nc->op.addrs[nc->op.naddrs++] = page >> 16;
} }
} }
...@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev) ...@@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
struct atmel_nand_controller *nc = dev_get_drvdata(dev); struct atmel_nand_controller *nc = dev_get_drvdata(dev);
struct atmel_nand *nand; struct atmel_nand *nand;
if (nc->pmecc)
atmel_pmecc_reset(nc->pmecc);
list_for_each_entry(nand, &nc->chips, node) { list_for_each_entry(nand, &nc->chips, node) {
int i; int i;
...@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = { ...@@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = {
.driver = { .driver = {
.name = "atmel-nand-controller", .name = "atmel-nand-controller",
.of_match_table = of_match_ptr(atmel_nand_controller_of_ids), .of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
.pm = &atmel_nand_controller_pm_ops,
}, },
.probe = atmel_nand_controller_probe, .probe = atmel_nand_controller_probe,
.remove = atmel_nand_controller_remove, .remove = atmel_nand_controller_remove,
......
...@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user, ...@@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user,
} }
EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes); EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes);
void atmel_pmecc_reset(struct atmel_pmecc *pmecc)
{
writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
}
EXPORT_SYMBOL_GPL(atmel_pmecc_reset);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op) int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op)
{ {
struct atmel_pmecc *pmecc = user->pmecc; struct atmel_pmecc *pmecc = user->pmecc;
...@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable); ...@@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable);
void atmel_pmecc_disable(struct atmel_pmecc_user *user) void atmel_pmecc_disable(struct atmel_pmecc_user *user)
{ {
struct atmel_pmecc *pmecc = user->pmecc; atmel_pmecc_reset(user->pmecc);
writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
mutex_unlock(&user->pmecc->lock); mutex_unlock(&user->pmecc->lock);
} }
EXPORT_SYMBOL_GPL(atmel_pmecc_disable); EXPORT_SYMBOL_GPL(atmel_pmecc_disable);
...@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev, ...@@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev,
/* Disable all interrupts before registering the PMECC handler. */ /* Disable all interrupts before registering the PMECC handler. */
writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR);
atmel_pmecc_reset(pmecc);
/* Reset the ECC engine */
writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL);
writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL);
return pmecc; return pmecc;
} }
......
...@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, ...@@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
struct atmel_pmecc_user_req *req); struct atmel_pmecc_user_req *req);
void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user);
void atmel_pmecc_reset(struct atmel_pmecc *pmecc);
int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op);
void atmel_pmecc_disable(struct atmel_pmecc_user *user); void atmel_pmecc_disable(struct atmel_pmecc_user *user);
int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user); int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user);
......
...@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i ...@@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
ctx->write_byte(mtd, (u8)(page_addr >> 8)); ctx->write_byte(mtd, (u8)(page_addr >> 8));
/* One more address cycle for devices > 32MiB */ if (this->options & NAND_ROW_ADDR_3)
if (this->chipsize > (32 << 20))
ctx->write_byte(mtd, ctx->write_byte(mtd,
((page_addr >> 16) & 0x0f)); ((page_addr >> 16) & 0x0f));
} }
......
...@@ -10,20 +10,18 @@ ...@@ -10,20 +10,18 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details. * more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/ */
#include <linux/interrupt.h>
#include <linux/delay.h> #include <linux/bitfield.h>
#include <linux/completion.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/wait.h> #include <linux/interrupt.h>
#include <linux/mutex.h> #include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h>
#include "denali.h" #include "denali.h"
...@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL"); ...@@ -31,9 +29,9 @@ MODULE_LICENSE("GPL");
#define DENALI_NAND_NAME "denali-nand" #define DENALI_NAND_NAME "denali-nand"
/* Host Data/Command Interface */ /* for Indexed Addressing */
#define DENALI_HOST_ADDR 0x00 #define DENALI_INDEXED_CTRL 0x00
#define DENALI_HOST_DATA 0x10 #define DENALI_INDEXED_DATA 0x10
#define DENALI_MAP00 (0 << 26) /* direct access to buffer */ #define DENALI_MAP00 (0 << 26) /* direct access to buffer */
#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */
...@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL"); ...@@ -61,31 +59,55 @@ MODULE_LICENSE("GPL");
*/ */
#define DENALI_CLK_X_MULT 6 #define DENALI_CLK_X_MULT 6
/*
* this macro allows us to convert from an MTD structure to our own
* device context (denali) structure.
*/
static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
{ {
return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
} }
static void denali_host_write(struct denali_nand_info *denali, /*
uint32_t addr, uint32_t data) * Direct Addressing - the slave address forms the control information (command
* type, bank, block, and page address). The slave data is the actual data to
* be transferred. This mode requires 28 bits of address region allocated.
*/
static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr)
{
return ioread32(denali->host + addr);
}
static void denali_direct_write(struct denali_nand_info *denali, u32 addr,
u32 data)
{ {
iowrite32(addr, denali->host + DENALI_HOST_ADDR); iowrite32(data, denali->host + addr);
iowrite32(data, denali->host + DENALI_HOST_DATA); }
/*
* Indexed Addressing - address translation module intervenes in passing the
* control information. This mode reduces the required address range. The
* control information and transferred data are latched by the registers in
* the translation module.
*/
static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
return ioread32(denali->host + DENALI_INDEXED_DATA);
}
static void denali_indexed_write(struct denali_nand_info *denali, u32 addr,
u32 data)
{
iowrite32(addr, denali->host + DENALI_INDEXED_CTRL);
iowrite32(data, denali->host + DENALI_INDEXED_DATA);
} }
/* /*
* Use the configuration feature register to determine the maximum number of * Use the configuration feature register to determine the maximum number of
* banks that the hardware supports. * banks that the hardware supports.
*/ */
static void detect_max_banks(struct denali_nand_info *denali) static void denali_detect_max_banks(struct denali_nand_info *denali)
{ {
uint32_t features = ioread32(denali->reg + FEATURES); uint32_t features = ioread32(denali->reg + FEATURES);
denali->max_banks = 1 << (features & FEATURES__N_BANKS); denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features);
/* the encoding changed from rev 5.0 to 5.1 */ /* the encoding changed from rev 5.0 to 5.1 */
if (denali->revision < 0x0501) if (denali->revision < 0x0501)
...@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, ...@@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
msecs_to_jiffies(1000)); msecs_to_jiffies(1000));
if (!time_left) { if (!time_left) {
dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", dev_err(denali->dev, "timeout while waiting for irq 0x%x\n",
denali->irq_mask); irq_mask);
return 0; return 0;
} }
...@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali) ...@@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali)
return irq_status; return irq_status;
} }
/*
* This helper function setups the registers for ECC and whether or not
* the spare area will be transferred.
*/
static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
bool transfer_spare)
{
int ecc_en_flag, transfer_spare_flag;
/* set ECC, transfer spare bits if needed */
ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
/* Enable spare area/ECC per user's request. */
iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE);
iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG);
}
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{ {
struct denali_nand_info *denali = mtd_to_denali(mtd); struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i; int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
buf[i] = ioread32(denali->host + DENALI_HOST_DATA); buf[i] = denali->host_read(denali, addr);
} }
static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
{ {
struct denali_nand_info *denali = mtd_to_denali(mtd); struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
int i; int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
iowrite32(buf[i], denali->host + DENALI_HOST_DATA); denali->host_write(denali, addr, buf[i]);
} }
static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
{ {
struct denali_nand_info *denali = mtd_to_denali(mtd); struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
uint16_t *buf16 = (uint16_t *)buf; uint16_t *buf16 = (uint16_t *)buf;
int i; int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len / 2; i++) for (i = 0; i < len / 2; i++)
buf16[i] = ioread32(denali->host + DENALI_HOST_DATA); buf16[i] = denali->host_read(denali, addr);
} }
static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf,
int len) int len)
{ {
struct denali_nand_info *denali = mtd_to_denali(mtd); struct denali_nand_info *denali = mtd_to_denali(mtd);
u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali);
const uint16_t *buf16 = (const uint16_t *)buf; const uint16_t *buf16 = (const uint16_t *)buf;
int i; int i;
iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali),
denali->host + DENALI_HOST_ADDR);
for (i = 0; i < len / 2; i++) for (i = 0; i < len / 2; i++)
iowrite32(buf16[i], denali->host + DENALI_HOST_DATA); denali->host_write(denali, addr, buf16[i]);
} }
static uint8_t denali_read_byte(struct mtd_info *mtd) static uint8_t denali_read_byte(struct mtd_info *mtd)
...@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) ...@@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
if (ctrl & NAND_CTRL_CHANGE) if (ctrl & NAND_CTRL_CHANGE)
denali_reset_irq(denali); denali_reset_irq(denali);
denali_host_write(denali, DENALI_BANK(denali) | type, dat); denali->host_write(denali, DENALI_BANK(denali) | type, dat);
} }
static int denali_dev_ready(struct mtd_info *mtd) static int denali_dev_ready(struct mtd_info *mtd)
...@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, ...@@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return 0; return 0;
} }
max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS; max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor);
/* /*
* The register holds the maximum of per-sector corrected bitflips. * The register holds the maximum of per-sector corrected bitflips.
...@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, ...@@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips; return max_bitflips;
} }
#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE)
#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
static int denali_sw_ecc_fixup(struct mtd_info *mtd, static int denali_sw_ecc_fixup(struct mtd_info *mtd,
struct denali_nand_info *denali, struct denali_nand_info *denali,
unsigned long *uncor_ecc_flags, uint8_t *buf) unsigned long *uncor_ecc_flags, uint8_t *buf)
...@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, ...@@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
do { do {
err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS);
err_sector = ECC_SECTOR(err_addr); err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr);
err_byte = ECC_BYTE(err_addr); err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr);
err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO);
err_cor_value = ECC_CORRECTION_VALUE(err_cor_info); err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE,
err_device = ECC_ERR_DEVICE(err_cor_info); err_cor_info);
err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE,
err_cor_info);
/* reset the bitflip counter when crossing ECC sector */ /* reset the bitflip counter when crossing ECC sector */
if (err_sector != prev_sector) if (err_sector != prev_sector)
bitflips = 0; bitflips = 0;
if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) { if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) {
/* /*
* Check later if this is a real ECC error, or * Check later if this is a real ECC error, or
* an erased sector. * an erased sector.
...@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, ...@@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
} }
prev_sector = err_sector; prev_sector = err_sector;
} while (!ECC_LAST_ERR(err_cor_info)); } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR));
/* /*
* Once handle all ecc errors, controller will trigger a * Once handle all ECC errors, controller will trigger an
* ECC_TRANSACTION_DONE interrupt, so here just wait for * ECC_TRANSACTION_DONE interrupt.
* a while for this interrupt
*/ */
irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE);
if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) if (!(irq_status & INTR__ECC_TRANSACTION_DONE))
...@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, ...@@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd,
return max_bitflips; return max_bitflips;
} }
/* programs the controller to either enable/disable DMA transfers */
static void denali_enable_dma(struct denali_nand_info *denali, bool en)
{
iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE);
ioread32(denali->reg + DMA_ENABLE);
}
static void denali_setup_dma64(struct denali_nand_info *denali, static void denali_setup_dma64(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write) dma_addr_t dma_addr, int page, int write)
{ {
...@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali, ...@@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali,
* 1. setup transfer type, interrupt when complete, * 1. setup transfer type, interrupt when complete,
* burst len = 64 bytes, the number of pages * burst len = 64 bytes, the number of pages
*/ */
denali_host_write(denali, mode, denali->host_write(denali, mode,
0x01002000 | (64 << 16) | (write << 8) | page_count); 0x01002000 | (64 << 16) | (write << 8) | page_count);
/* 2. set memory low address */ /* 2. set memory low address */
denali_host_write(denali, mode, dma_addr); denali->host_write(denali, mode, lower_32_bits(dma_addr));
/* 3. set memory high address */ /* 3. set memory high address */
denali_host_write(denali, mode, (uint64_t)dma_addr >> 32); denali->host_write(denali, mode, upper_32_bits(dma_addr));
} }
static void denali_setup_dma32(struct denali_nand_info *denali, static void denali_setup_dma32(struct denali_nand_info *denali,
...@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali, ...@@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali,
/* DMA is a four step process */ /* DMA is a four step process */
/* 1. setup transfer type and # of pages */ /* 1. setup transfer type and # of pages */
denali_host_write(denali, mode | page, denali->host_write(denali, mode | page,
0x2000 | (write << 8) | page_count); 0x2000 | (write << 8) | page_count);
/* 2. set memory high address bits 23:8 */ /* 2. set memory high address bits 23:8 */
denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200);
/* 3. set memory low address bits 23:8 */ /* 3. set memory low address bits 23:8 */
denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300);
/* 4. interrupt when complete, burst len = 64 bytes */ /* 4. interrupt when complete, burst len = 64 bytes */
denali_host_write(denali, mode | 0x14000, 0x2400); denali->host_write(denali, mode | 0x14000, 0x2400);
}
static void denali_setup_dma(struct denali_nand_info *denali,
dma_addr_t dma_addr, int page, int write)
{
if (denali->caps & DENALI_CAP_DMA_64BIT)
denali_setup_dma64(denali, dma_addr, page, write);
else
denali_setup_dma32(denali, dma_addr, page, write);
} }
static int denali_pio_read(struct denali_nand_info *denali, void *buf, static int denali_pio_read(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw) size_t size, int page, int raw)
{ {
uint32_t addr = DENALI_BANK(denali) | page; u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
uint32_t *buf32 = (uint32_t *)buf; uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status, ecc_err_mask; uint32_t irq_status, ecc_err_mask;
int i; int i;
...@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, ...@@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
denali_reset_irq(denali); denali_reset_irq(denali);
iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++) for (i = 0; i < size / 4; i++)
*buf32++ = ioread32(denali->host + DENALI_HOST_DATA); *buf32++ = denali->host_read(denali, addr);
irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC);
if (!(irq_status & INTR__PAGE_XFER_INC)) if (!(irq_status & INTR__PAGE_XFER_INC))
...@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, ...@@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf,
static int denali_pio_write(struct denali_nand_info *denali, static int denali_pio_write(struct denali_nand_info *denali,
const void *buf, size_t size, int page, int raw) const void *buf, size_t size, int page, int raw)
{ {
uint32_t addr = DENALI_BANK(denali) | page; u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page;
const uint32_t *buf32 = (uint32_t *)buf; const uint32_t *buf32 = (uint32_t *)buf;
uint32_t irq_status; uint32_t irq_status;
int i; int i;
denali_reset_irq(denali); denali_reset_irq(denali);
iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR);
for (i = 0; i < size / 4; i++) for (i = 0; i < size / 4; i++)
iowrite32(*buf32++, denali->host + DENALI_HOST_DATA); denali->host_write(denali, addr, *buf32++);
irq_status = denali_wait_for_irq(denali, irq_status = denali_wait_for_irq(denali,
INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL);
...@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, ...@@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
ecc_err_mask = INTR__ECC_ERR; ecc_err_mask = INTR__ECC_ERR;
} }
denali_enable_dma(denali, true); iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
denali_reset_irq(denali); denali_reset_irq(denali);
denali_setup_dma(denali, dma_addr, page, write); denali->setup_dma(denali, dma_addr, page, write);
/* wait for operation to complete */
irq_status = denali_wait_for_irq(denali, irq_mask); irq_status = denali_wait_for_irq(denali, irq_mask);
if (!(irq_status & INTR__DMA_CMD_COMP)) if (!(irq_status & INTR__DMA_CMD_COMP))
ret = -EIO; ret = -EIO;
else if (irq_status & ecc_err_mask) else if (irq_status & ecc_err_mask)
ret = -EBADMSG; ret = -EBADMSG;
denali_enable_dma(denali, false); iowrite32(0, denali->reg + DMA_ENABLE);
dma_unmap_single(denali->dev, dma_addr, size, dir); dma_unmap_single(denali->dev, dma_addr, size, dir);
if (irq_status & INTR__ERASED_PAGE) if (irq_status & INTR__ERASED_PAGE)
...@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, ...@@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
static int denali_data_xfer(struct denali_nand_info *denali, void *buf, static int denali_data_xfer(struct denali_nand_info *denali, void *buf,
size_t size, int page, int raw, int write) size_t size, int page, int raw, int write)
{ {
setup_ecc_for_xfer(denali, !raw, raw); iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE);
iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0,
denali->reg + TRANSFER_SPARE_REG);
if (denali->dma_avail) if (denali->dma_avail)
return denali_dma_xfer(denali, buf, size, page, raw, write); return denali_dma_xfer(denali, buf, size, page, raw, write);
...@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page) ...@@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page)
denali_reset_irq(denali); denali_reset_irq(denali);
denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page,
DENALI_ERASE); DENALI_ERASE);
/* wait for erase to complete or failure to occur */ /* wait for erase to complete or failure to occur */
irq_status = denali_wait_for_irq(denali, irq_status = denali_wait_for_irq(denali,
...@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, ...@@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + ACC_CLKS); tmp = ioread32(denali->reg + ACC_CLKS);
tmp &= ~ACC_CLKS__VALUE; tmp &= ~ACC_CLKS__VALUE;
tmp |= acc_clks; tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks);
iowrite32(tmp, denali->reg + ACC_CLKS); iowrite32(tmp, denali->reg + ACC_CLKS);
/* tRWH -> RE_2_WE */ /* tRWH -> RE_2_WE */
...@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, ...@@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_WE); tmp = ioread32(denali->reg + RE_2_WE);
tmp &= ~RE_2_WE__VALUE; tmp &= ~RE_2_WE__VALUE;
tmp |= re_2_we; tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we);
iowrite32(tmp, denali->reg + RE_2_WE); iowrite32(tmp, denali->reg + RE_2_WE);
/* tRHZ -> RE_2_RE */ /* tRHZ -> RE_2_RE */
...@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, ...@@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RE_2_RE); tmp = ioread32(denali->reg + RE_2_RE);
tmp &= ~RE_2_RE__VALUE; tmp &= ~RE_2_RE__VALUE;
tmp |= re_2_re; tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re);
iowrite32(tmp, denali->reg + RE_2_RE); iowrite32(tmp, denali->reg + RE_2_RE);
/* tWHR -> WE_2_RE */ /*
we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk); * tCCS, tWHR -> WE_2_RE
*
* With WE_2_RE properly set, the Denali controller automatically takes
* care of the delay; the driver need not set NAND_WAIT_TCCS.
*/
we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min),
t_clk);
we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE);
tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE);
tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE;
tmp |= we_2_re; tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re);
iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE);
/* tADL -> ADDR_2_DATA */ /* tADL -> ADDR_2_DATA */
...@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, ...@@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); addr_2_data = min_t(int, addr_2_data, addr_2_data_mask);
tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA);
tmp &= ~addr_2_data_mask; tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA;
tmp |= addr_2_data; tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data);
iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA);
/* tREH, tWH -> RDWR_EN_HI_CNT */ /* tREH, tWH -> RDWR_EN_HI_CNT */
...@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, ...@@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); tmp = ioread32(denali->reg + RDWR_EN_HI_CNT);
tmp &= ~RDWR_EN_HI_CNT__VALUE; tmp &= ~RDWR_EN_HI_CNT__VALUE;
tmp |= rdwr_en_hi; tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi);
iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT);
/* tRP, tWP -> RDWR_EN_LO_CNT */ /* tRP, tWP -> RDWR_EN_LO_CNT */
...@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, ...@@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); tmp = ioread32(denali->reg + RDWR_EN_LO_CNT);
tmp &= ~RDWR_EN_LO_CNT__VALUE; tmp &= ~RDWR_EN_LO_CNT__VALUE;
tmp |= rdwr_en_lo; tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo);
iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT);
/* tCS, tCEA -> CS_SETUP_CNT */ /* tCS, tCEA -> CS_SETUP_CNT */
...@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, ...@@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr,
tmp = ioread32(denali->reg + CS_SETUP_CNT); tmp = ioread32(denali->reg + CS_SETUP_CNT);
tmp &= ~CS_SETUP_CNT__VALUE; tmp &= ~CS_SETUP_CNT__VALUE;
tmp |= cs_setup; tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup);
iowrite32(tmp, denali->reg + CS_SETUP_CNT); iowrite32(tmp, denali->reg + CS_SETUP_CNT);
return 0; return 0;
...@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali) ...@@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali)
* if this value is 0, just let it be. * if this value is 0, just let it be.
*/ */
denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES);
detect_max_banks(denali); denali_detect_max_banks(denali);
iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); iowrite32(0x0F, denali->reg + RB_PIN_ENABLED);
iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE);
iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER);
/* Should set value for these registers when init */
iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(1, denali->reg + ECC_ENABLE);
} }
int denali_calc_ecc_bytes(int step_size, int strength) int denali_calc_ecc_bytes(int step_size, int strength)
...@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = { ...@@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
.free = denali_ooblayout_free, .free = denali_ooblayout_free,
}; };
/* initialize driver data structures */
static void denali_drv_init(struct denali_nand_info *denali)
{
/*
* the completion object will be used to notify
* the callee that the interrupt is done
*/
init_completion(&denali->complete);
/*
* the spinlock will be used to synchronize the ISR with any
* element that might be access shared data (interrupt status)
*/
spin_lock_init(&denali->irq_lock);
}
static int denali_multidev_fixup(struct denali_nand_info *denali) static int denali_multidev_fixup(struct denali_nand_info *denali)
{ {
struct nand_chip *chip = &denali->nand; struct nand_chip *chip = &denali->nand;
...@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali) ...@@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali)
{ {
struct nand_chip *chip = &denali->nand; struct nand_chip *chip = &denali->nand;
struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_info *mtd = nand_to_mtd(chip);
u32 features = ioread32(denali->reg + FEATURES);
int ret; int ret;
mtd->dev.parent = denali->dev; mtd->dev.parent = denali->dev;
denali_hw_init(denali); denali_hw_init(denali);
denali_drv_init(denali);
init_completion(&denali->complete);
spin_lock_init(&denali->irq_lock);
denali_clear_irq_all(denali); denali_clear_irq_all(denali);
/* Request IRQ after all the hardware initialization is finished */
ret = devm_request_irq(denali->dev, denali->irq, denali_isr, ret = devm_request_irq(denali->dev, denali->irq, denali_isr,
IRQF_SHARED, DENALI_NAND_NAME, denali); IRQF_SHARED, DENALI_NAND_NAME, denali);
if (ret) { if (ret) {
...@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali) ...@@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name) if (!mtd->name)
mtd->name = "denali-nand"; mtd->name = "denali-nand";
/* register the driver with the NAND core subsystem */
chip->select_chip = denali_select_chip; chip->select_chip = denali_select_chip;
chip->read_byte = denali_read_byte; chip->read_byte = denali_read_byte;
chip->write_byte = denali_write_byte; chip->write_byte = denali_write_byte;
...@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali) ...@@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali)
chip->dev_ready = denali_dev_ready; chip->dev_ready = denali_dev_ready;
chip->waitfunc = denali_waitfunc; chip->waitfunc = denali_waitfunc;
if (features & FEATURES__INDEX_ADDR) {
denali->host_read = denali_indexed_read;
denali->host_write = denali_indexed_write;
} else {
denali->host_read = denali_direct_read;
denali->host_write = denali_direct_write;
}
/* clk rate info is needed for setup_data_interface */ /* clk rate info is needed for setup_data_interface */
if (denali->clk_x_rate) if (denali->clk_x_rate)
chip->setup_data_interface = denali_setup_data_interface; chip->setup_data_interface = denali_setup_data_interface;
/*
* scan for NAND devices attached to the controller
* this is the first stage in a two step process to register
* with the nand subsystem
*/
ret = nand_scan_ident(mtd, denali->max_banks, NULL); ret = nand_scan_ident(mtd, denali->max_banks, NULL);
if (ret) if (ret)
goto disable_irq; goto disable_irq;
...@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali) ...@@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali)
if (denali->dma_avail) { if (denali->dma_avail) {
chip->options |= NAND_USE_BOUNCE_BUFFER; chip->options |= NAND_USE_BOUNCE_BUFFER;
chip->buf_align = 16; chip->buf_align = 16;
if (denali->caps & DENALI_CAP_DMA_64BIT)
denali->setup_dma = denali_setup_dma64;
else
denali->setup_dma = denali_setup_dma32;
} }
/*
* second stage of the NAND scan
* this stage requires information regarding ECC and
* bad block management.
*/
chip->bbt_options |= NAND_BBT_USE_FLASH; chip->bbt_options |= NAND_BBT_USE_FLASH;
chip->bbt_options |= NAND_BBT_NO_OOB; chip->bbt_options |= NAND_BBT_NO_OOB;
chip->ecc.mode = NAND_ECC_HW_SYNDROME; chip->ecc.mode = NAND_ECC_HW_SYNDROME;
/* no subpage writes on denali */
chip->options |= NAND_NO_SUBPAGE_WRITE; chip->options |= NAND_NO_SUBPAGE_WRITE;
ret = denali_ecc_setup(mtd, chip, denali); ret = denali_ecc_setup(mtd, chip, denali);
...@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali) ...@@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali)
"chosen ECC settings: step=%d, strength=%d, bytes=%d\n", "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1), iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) |
FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength),
denali->reg + ECC_CORRECTION); denali->reg + ECC_CORRECTION);
iowrite32(mtd->erasesize / mtd->writesize, iowrite32(mtd->erasesize / mtd->writesize,
denali->reg + PAGES_PER_BLOCK); denali->reg + PAGES_PER_BLOCK);
iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0,
denali->reg + DEVICE_WIDTH); denali->reg + DEVICE_WIDTH);
iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG,
denali->reg + TWO_ROW_ADDR_CYCLES);
iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE);
iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE);
...@@ -1441,7 +1403,6 @@ int denali_init(struct denali_nand_info *denali) ...@@ -1441,7 +1403,6 @@ int denali_init(struct denali_nand_info *denali)
} }
EXPORT_SYMBOL(denali_init); EXPORT_SYMBOL(denali_init);
/* driver exit point */
void denali_remove(struct denali_nand_info *denali) void denali_remove(struct denali_nand_info *denali)
{ {
struct mtd_info *mtd = nand_to_mtd(&denali->nand); struct mtd_info *mtd = nand_to_mtd(&denali->nand);
......
...@@ -10,18 +10,16 @@ ...@@ -10,18 +10,16 @@
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details. * more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/ */
#ifndef __DENALI_H__ #ifndef __DENALI_H__
#define __DENALI_H__ #define __DENALI_H__
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/mtd/rawnand.h> #include <linux/mtd/rawnand.h>
#include <linux/spinlock_types.h>
#include <linux/types.h>
#define DEVICE_RESET 0x0 #define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK(bank) BIT(bank) #define DEVICE_RESET__BANK(bank) BIT(bank)
...@@ -111,9 +109,6 @@ ...@@ -111,9 +109,6 @@
#define ECC_CORRECTION 0x1b0 #define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE GENMASK(4, 0) #define ECC_CORRECTION__VALUE GENMASK(4, 0)
#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) #define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16)
#define MAKE_ECC_CORRECTION(val, thresh) \
(((val) & (ECC_CORRECTION__VALUE)) | \
(((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD)))
#define READ_MODE 0x1c0 #define READ_MODE 0x1c0
#define READ_MODE__VALUE GENMASK(3, 0) #define READ_MODE__VALUE GENMASK(3, 0)
...@@ -255,13 +250,13 @@ ...@@ -255,13 +250,13 @@
#define ECC_ERROR_ADDRESS 0x630 #define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) #define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0)
#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12) #define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12)
#define ERR_CORRECTION_INFO 0x640 #define ERR_CORRECTION_INFO 0x640
#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0) #define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0)
#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8) #define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8)
#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14) #define ERR_CORRECTION_INFO__UNCOR BIT(14)
#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15) #define ERR_CORRECTION_INFO__LAST_ERR BIT(15)
#define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) #define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10)
#define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) #define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8)
...@@ -310,23 +305,24 @@ struct denali_nand_info { ...@@ -310,23 +305,24 @@ struct denali_nand_info {
struct device *dev; struct device *dev;
void __iomem *reg; /* Register Interface */ void __iomem *reg; /* Register Interface */
void __iomem *host; /* Host Data/Command Interface */ void __iomem *host; /* Host Data/Command Interface */
/* elements used by ISR */
struct completion complete; struct completion complete;
spinlock_t irq_lock; spinlock_t irq_lock; /* protect irq_mask and irq_status */
uint32_t irq_mask; u32 irq_mask; /* interrupts we are waiting for */
uint32_t irq_status; u32 irq_status; /* interrupts that have happened */
int irq; int irq;
void *buf; /* for syndrome layout conversion */
void *buf;
dma_addr_t dma_addr; dma_addr_t dma_addr;
int dma_avail; int dma_avail; /* can support DMA? */
int devs_per_cs; /* devices connected in parallel */ int devs_per_cs; /* devices connected in parallel */
int oob_skip_bytes; int oob_skip_bytes; /* number of bytes reserved for BBM */
int max_banks; int max_banks;
unsigned int revision; unsigned int revision; /* IP revision */
unsigned int caps; unsigned int caps; /* IP capability (or quirk) */
const struct nand_ecc_caps *ecc_caps; const struct nand_ecc_caps *ecc_caps;
u32 (*host_read)(struct denali_nand_info *denali, u32 addr);
void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data);
void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr,
int page, int write);
}; };
#define DENALI_CAP_HW_ECC_FIXUP BIT(0) #define DENALI_CAP_HW_ECC_FIXUP BIT(0)
......
...@@ -12,15 +12,16 @@ ...@@ -12,15 +12,16 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details. * more details.
*/ */
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/platform_device.h>
#include "denali.h" #include "denali.h"
...@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = { ...@@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = {
.of_match_table = denali_nand_dt_ids, .of_match_table = denali_nand_dt_ids,
}, },
}; };
module_platform_driver(denali_dt_driver); module_platform_driver(denali_dt_driver);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
......
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details. * more details.
*/ */
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -106,7 +109,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) ...@@ -106,7 +109,6 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return ret; return ret;
} }
/* driver exit point */
static void denali_pci_remove(struct pci_dev *dev) static void denali_pci_remove(struct pci_dev *dev)
{ {
struct denali_nand_info *denali = pci_get_drvdata(dev); struct denali_nand_info *denali = pci_get_drvdata(dev);
...@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = { ...@@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = {
.probe = denali_pci_probe, .probe = denali_pci_probe,
.remove = denali_pci_remove, .remove = denali_pci_remove,
}; };
module_pci_driver(denali_pci_driver); module_pci_driver(denali_pci_driver);
...@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu ...@@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu
if (page_addr != -1) { if (page_addr != -1) {
WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
/* One more address cycle for higher density devices */ if (this->options & NAND_ROW_ADDR_3) {
if (this->chipsize & 0x0c000000) {
WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
printk("high density\n"); printk("high density\n");
} }
......
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/gpio.h> #include <linux/gpio/consumer.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h> #include <linux/mtd/rawnand.h>
...@@ -31,12 +31,16 @@ ...@@ -31,12 +31,16 @@
#include <linux/mtd/nand-gpio.h> #include <linux/mtd/nand-gpio.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_gpio.h>
struct gpiomtd { struct gpiomtd {
void __iomem *io_sync; void __iomem *io_sync;
struct nand_chip nand_chip; struct nand_chip nand_chip;
struct gpio_nand_platdata plat; struct gpio_nand_platdata plat;
struct gpio_desc *nce; /* Optional chip enable */
struct gpio_desc *cle;
struct gpio_desc *ale;
struct gpio_desc *rdy;
struct gpio_desc *nwp; /* Optional write protection */
}; };
static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd) static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
...@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) ...@@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
gpio_nand_dosync(gpiomtd); gpio_nand_dosync(gpiomtd);
if (ctrl & NAND_CTRL_CHANGE) { if (ctrl & NAND_CTRL_CHANGE) {
if (gpio_is_valid(gpiomtd->plat.gpio_nce)) if (gpiomtd->nce)
gpio_set_value(gpiomtd->plat.gpio_nce, gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE));
!(ctrl & NAND_NCE)); gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE));
gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE));
gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
gpio_nand_dosync(gpiomtd); gpio_nand_dosync(gpiomtd);
} }
if (cmd == NAND_CMD_NONE) if (cmd == NAND_CMD_NONE)
...@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd) ...@@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd)
{ {
struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
return gpio_get_value(gpiomtd->plat.gpio_rdy); return gpiod_get_value(gpiomtd->rdy);
} }
#ifdef CONFIG_OF #ifdef CONFIG_OF
...@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev, ...@@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev,
} }
} }
plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
plat->gpio_nce = of_get_gpio(dev->of_node, 1);
plat->gpio_ale = of_get_gpio(dev->of_node, 2);
plat->gpio_cle = of_get_gpio(dev->of_node, 3);
plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
plat->chip_delay = val; plat->chip_delay = val;
...@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev) ...@@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev)
nand_release(nand_to_mtd(&gpiomtd->nand_chip)); nand_release(nand_to_mtd(&gpiomtd->nand_chip));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) /* Enable write protection and disable the chip */
gpio_set_value(gpiomtd->plat.gpio_nwp, 0); if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
if (gpio_is_valid(gpiomtd->plat.gpio_nce)) gpiod_set_value(gpiomtd->nwp, 0);
gpio_set_value(gpiomtd->plat.gpio_nce, 1); if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
gpiod_set_value(gpiomtd->nce, 0);
return 0; return 0;
} }
...@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev) ...@@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev)
struct nand_chip *chip; struct nand_chip *chip;
struct mtd_info *mtd; struct mtd_info *mtd;
struct resource *res; struct resource *res;
struct device *dev = &pdev->dev;
int ret = 0; int ret = 0;
if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev)) if (!dev->of_node && !dev_get_platdata(dev))
return -EINVAL; return -EINVAL;
gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL); gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
if (!gpiomtd) if (!gpiomtd)
return -ENOMEM; return -ENOMEM;
chip = &gpiomtd->nand_chip; chip = &gpiomtd->nand_chip;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); chip->IO_ADDR_R = devm_ioremap_resource(dev, res);
if (IS_ERR(chip->IO_ADDR_R)) if (IS_ERR(chip->IO_ADDR_R))
return PTR_ERR(chip->IO_ADDR_R); return PTR_ERR(chip->IO_ADDR_R);
res = gpio_nand_get_io_sync(pdev); res = gpio_nand_get_io_sync(pdev);
if (res) { if (res) {
gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res); gpiomtd->io_sync = devm_ioremap_resource(dev, res);
if (IS_ERR(gpiomtd->io_sync)) if (IS_ERR(gpiomtd->io_sync))
return PTR_ERR(gpiomtd->io_sync); return PTR_ERR(gpiomtd->io_sync);
} }
ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat); ret = gpio_nand_get_config(dev, &gpiomtd->plat);
if (ret) if (ret)
return ret; return ret;
if (gpio_is_valid(gpiomtd->plat.gpio_nce)) { /* Just enable the chip */
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
"NAND NCE"); if (IS_ERR(gpiomtd->nce))
if (ret) return PTR_ERR(gpiomtd->nce);
return ret;
gpio_direction_output(gpiomtd->plat.gpio_nce, 1); /* We disable write protection once we know probe() will succeed */
gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
if (IS_ERR(gpiomtd->nwp)) {
ret = PTR_ERR(gpiomtd->nwp);
goto out_ce;
} }
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, if (IS_ERR(gpiomtd->nwp)) {
"NAND NWP"); ret = PTR_ERR(gpiomtd->nwp);
if (ret) goto out_ce;
return ret;
} }
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE"); gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
if (ret) if (IS_ERR(gpiomtd->cle)) {
return ret; ret = PTR_ERR(gpiomtd->cle);
gpio_direction_output(gpiomtd->plat.gpio_ale, 0); goto out_ce;
}
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE"); gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
if (ret) if (IS_ERR(gpiomtd->rdy)) {
return ret; ret = PTR_ERR(gpiomtd->rdy);
gpio_direction_output(gpiomtd->plat.gpio_cle, 0); goto out_ce;
if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
"NAND RDY");
if (ret)
return ret;
gpio_direction_input(gpiomtd->plat.gpio_rdy);
chip->dev_ready = gpio_nand_devready;
} }
/* Using RDY pin */
if (gpiomtd->rdy)
chip->dev_ready = gpio_nand_devready;
nand_set_flash_node(chip, pdev->dev.of_node); nand_set_flash_node(chip, pdev->dev.of_node);
chip->IO_ADDR_W = chip->IO_ADDR_R; chip->IO_ADDR_W = chip->IO_ADDR_R;
...@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev) ...@@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev)
chip->cmd_ctrl = gpio_nand_cmd_ctrl; chip->cmd_ctrl = gpio_nand_cmd_ctrl;
mtd = nand_to_mtd(chip); mtd = nand_to_mtd(chip);
mtd->dev.parent = &pdev->dev; mtd->dev.parent = dev;
platform_set_drvdata(pdev, gpiomtd); platform_set_drvdata(pdev, gpiomtd);
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) /* Disable write protection, if wired up */
gpio_direction_output(gpiomtd->plat.gpio_nwp, 1); if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpiod_direction_output(gpiomtd->nwp, 1);
ret = nand_scan(mtd, 1); ret = nand_scan(mtd, 1);
if (ret) if (ret)
...@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev) ...@@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev)
return 0; return 0;
err_wp: err_wp:
if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0); gpiod_set_value(gpiomtd->nwp, 0);
out_ce:
if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
gpiod_set_value(gpiomtd->nce, 0);
return ret; return ret;
} }
......
...@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr) ...@@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr)
host->addr_value[0] |= (page_addr & 0xffff) host->addr_value[0] |= (page_addr & 0xffff)
<< (host->addr_cycle * 8); << (host->addr_cycle * 8);
host->addr_cycle += 2; host->addr_cycle += 2;
/* One more address cycle for devices > 128MiB */ if (chip->options & NAND_ROW_ADDR_3) {
if (chip->chipsize > (128 << 20)) {
host->addr_cycle += 1; host->addr_cycle += 1;
if (host->command == NAND_CMD_ERASE1) if (host->command == NAND_CMD_ERASE1)
host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16; host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
......
...@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) ...@@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
op = ECC_DECODE; op = ECC_DECODE;
dec = readw(ecc->regs + ECC_DECDONE); dec = readw(ecc->regs + ECC_DECDONE);
if (dec & ecc->sectors) { if (dec & ecc->sectors) {
/*
* Clear decode IRQ status once again to ensure that
* there will be no extra IRQ.
*/
readw(ecc->regs + ECC_DECIRQ_STA);
ecc->sectors = 0; ecc->sectors = 0;
complete(&ecc->done); complete(&ecc->done);
} else { } else {
...@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) ...@@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
} }
} }
writel(0, ecc->regs + ECC_IRQ_REG(op));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc) ...@@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
/* disable it */ /* disable it */
mtk_ecc_wait_idle(ecc, op); mtk_ecc_wait_idle(ecc, op);
if (op == ECC_DECODE)
/*
* Clear decode IRQ status in case there is a timeout to wait
* decode IRQ.
*/
readw(ecc->regs + ECC_DECIRQ_STA);
writew(0, ecc->regs + ECC_IRQ_REG(op)); writew(0, ecc->regs + ECC_IRQ_REG(op));
writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
......
...@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq) ...@@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
* waits for completion. */ * waits for completion. */
static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
{ {
pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
writew(cmd, NFC_V1_V2_FLASH_CMD); writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2); writew(NFC_CMD, NFC_V1_V2_CONFIG2);
...@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) ...@@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
udelay(1); udelay(1);
} }
if (max_retries < 0) if (max_retries < 0)
pr_debug("%s: RESET failed\n", __func__); dev_dbg(host->dev, "%s: RESET failed\n", __func__);
} else { } else {
/* Wait for operation to complete */ /* Wait for operation to complete */
wait_op_done(host, useirq); wait_op_done(host, useirq);
...@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast) ...@@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
* a NAND command. */ * a NAND command. */
static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
{ {
pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast);
writew(addr, NFC_V1_V2_FLASH_ADDR); writew(addr, NFC_V1_V2_FLASH_ADDR);
writew(NFC_ADDR, NFC_V1_V2_CONFIG2); writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
...@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat, ...@@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
uint16_t ecc_status = get_ecc_status_v1(host); uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n");
return -EBADMSG; return -EBADMSG;
} }
...@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, ...@@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
do { do {
err = ecc_stat & ecc_bit_mask; err = ecc_stat & ecc_bit_mask;
if (err > err_limit) { if (err > err_limit) {
printk(KERN_WARNING "UnCorrectable RS-ECC Error\n"); dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n");
return -EBADMSG; return -EBADMSG;
} else { } else {
ret += err; ret += err;
...@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, ...@@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4; ecc_stat >>= 4;
} while (--no_subpages); } while (--no_subpages);
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret); dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret);
return ret; return ret;
} }
...@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd) ...@@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
host->buf_start++; host->buf_start++;
} }
pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
return ret; return ret;
} }
...@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) ...@@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
host->devtype_data->send_addr(host, host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, true); (page_addr >> 8) & 0xff, true);
} else { } else {
/* One more address cycle for higher density devices */ if (nand_chip->options & NAND_ROW_ADDR_3) {
if (mtd->size >= 0x4000000) {
/* paddr_8 - paddr_15 */ /* paddr_8 - paddr_15 */
host->devtype_data->send_addr(host, host->devtype_data->send_addr(host,
(page_addr >> 8) & 0xff, (page_addr >> 8) & 0xff,
...@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, ...@@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
struct nand_chip *nand_chip = mtd_to_nand(mtd); struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct mxc_nand_host *host = nand_get_controller_data(nand_chip); struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
command, column, page_addr); command, column, page_addr);
/* Reset command state information */ /* Reset command state information */
......
...@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, ...@@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
struct nand_chip *chip = mtd_to_nand(mtd); struct nand_chip *chip = mtd_to_nand(mtd);
struct nand_ecc_ctrl *ecc = &chip->ecc; struct nand_ecc_ctrl *ecc = &chip->ecc;
if (section) if (section || !ecc->total)
return -ERANGE; return -ERANGE;
oobregion->length = ecc->total; oobregion->length = ecc->total;
...@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, ...@@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl); chip->cmd_ctrl(mtd, page_addr, ctrl);
ctrl &= ~NAND_CTRL_CHANGE; ctrl &= ~NAND_CTRL_CHANGE;
chip->cmd_ctrl(mtd, page_addr >> 8, ctrl); chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
/* One more address cycle for devices > 32MiB */ if (chip->options & NAND_ROW_ADDR_3)
if (chip->chipsize > (32 << 20))
chip->cmd_ctrl(mtd, page_addr >> 16, ctrl); chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
} }
chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
...@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, ...@@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
chip->cmd_ctrl(mtd, page_addr, ctrl); chip->cmd_ctrl(mtd, page_addr, ctrl);
chip->cmd_ctrl(mtd, page_addr >> 8, chip->cmd_ctrl(mtd, page_addr >> 8,
NAND_NCE | NAND_ALE); NAND_NCE | NAND_ALE);
/* One more address cycle for devices > 128MiB */ if (chip->options & NAND_ROW_ADDR_3)
if (chip->chipsize > (128 << 20))
chip->cmd_ctrl(mtd, page_addr >> 16, chip->cmd_ctrl(mtd, page_addr >> 16,
NAND_NCE | NAND_ALE); NAND_NCE | NAND_ALE);
} }
...@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) ...@@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(nand_reset);
/** /**
* nand_check_erased_buf - check if a buffer contains (almost) only 0xff data * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
...@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, ...@@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const uint8_t *buf) size_t *retlen, const uint8_t *buf)
{ {
struct nand_chip *chip = mtd_to_nand(mtd); struct nand_chip *chip = mtd_to_nand(mtd);
int chipnr = (int)(to >> chip->chip_shift);
struct mtd_oob_ops ops; struct mtd_oob_ops ops;
int ret; int ret;
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
/* Grab the device */ /* Grab the device */
panic_nand_get_device(chip, mtd, FL_WRITING); panic_nand_get_device(chip, mtd, FL_WRITING);
chip->select_chip(mtd, chipnr);
/* Wait for the device to get ready */
panic_nand_wait(mtd, chip, 400);
memset(&ops, 0, sizeof(ops)); memset(&ops, 0, sizeof(ops));
ops.len = len; ops.len = len;
ops.datbuf = (uint8_t *)buf; ops.datbuf = (uint8_t *)buf;
...@@ -3999,6 +4001,9 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) ...@@ -3999,6 +4001,9 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
chip->chip_shift += 32 - 1; chip->chip_shift += 32 - 1;
} }
if (chip->chip_shift - chip->page_shift > 16)
chip->options |= NAND_ROW_ADDR_3;
chip->badblockbits = 8; chip->badblockbits = 8;
chip->erase = single_erase; chip->erase = single_erase;
...@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd) ...@@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd)
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break; break;
default: default:
/*
* Expose the whole OOB area to users if ECC_NONE
* is passed. We could do that for all kind of
* ->oobsize, but we must keep the old large/small
* page with ECC layout when ->oobsize <= 128 for
* compatibility reasons.
*/
if (ecc->mode == NAND_ECC_NONE) {
mtd_set_ooblayout(mtd,
&nand_ooblayout_lp_ops);
break;
}
WARN(1, "No oob scheme defined for oobsize %d\n", WARN(1, "No oob scheme defined for oobsize %d\n",
mtd->oobsize); mtd->oobsize);
ret = -EINVAL; ret = -EINVAL;
......
...@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, ...@@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
if (page_addr != -1) { if (page_addr != -1) {
write_addr_reg(nand, page_addr); write_addr_reg(nand, page_addr);
if (chip->chipsize > (128 << 20)) { if (chip->options & NAND_ROW_ADDR_3) {
write_addr_reg(nand, page_addr >> 8); write_addr_reg(nand, page_addr >> 8);
write_addr_reg(nand, page_addr >> 16 | ENDADDR); write_addr_reg(nand, page_addr >> 16 | ENDADDR);
} else { } else {
......
...@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, ...@@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
0x97, 0x79, 0xe5, 0x24, 0xb5}; 0x97, 0x79, 0xe5, 0x24, 0xb5};
/** /**
* omap_calculate_ecc_bch - Generate bytes of ECC bytes * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
* @mtd: MTD device structure * @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed * @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer * @ecc_code: The ecc_code buffer
* @i: The sector number (for a multi sector page)
* *
* Support calculating of BCH4/8 ecc vectors for the page * Support calculating of BCH4/8/16 ECC vectors for one sector
* within a page. Sector number is in @i.
*/ */
static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc) const u_char *dat, u_char *ecc_calc, int i)
{ {
struct omap_nand_info *info = mtd_to_omap(mtd); struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes; int eccbytes = info->nand.ecc.bytes;
struct gpmc_nand_regs *gpmc_regs = &info->reg; struct gpmc_nand_regs *gpmc_regs = &info->reg;
u8 *ecc_code; u8 *ecc_code;
unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
u32 val; u32 val;
int i, j; int j;
ecc_code = ecc_calc;
switch (info->ecc_opt) {
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH8_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
*ecc_code++ = (bch_val3 & 0xFF);
*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
*ecc_code++ = (bch_val2 & 0xFF);
*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH4_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
((bch_val1 >> 28) & 0xF);
*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
case OMAP_ECC_BCH16_CODE_HW:
val = readl(gpmc_regs->gpmc_bch_result6[i]);
ecc_code[0] = ((val >> 8) & 0xFF);
ecc_code[1] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result5[i]);
ecc_code[2] = ((val >> 24) & 0xFF);
ecc_code[3] = ((val >> 16) & 0xFF);
ecc_code[4] = ((val >> 8) & 0xFF);
ecc_code[5] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result4[i]);
ecc_code[6] = ((val >> 24) & 0xFF);
ecc_code[7] = ((val >> 16) & 0xFF);
ecc_code[8] = ((val >> 8) & 0xFF);
ecc_code[9] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result3[i]);
ecc_code[10] = ((val >> 24) & 0xFF);
ecc_code[11] = ((val >> 16) & 0xFF);
ecc_code[12] = ((val >> 8) & 0xFF);
ecc_code[13] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result2[i]);
ecc_code[14] = ((val >> 24) & 0xFF);
ecc_code[15] = ((val >> 16) & 0xFF);
ecc_code[16] = ((val >> 8) & 0xFF);
ecc_code[17] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result1[i]);
ecc_code[18] = ((val >> 24) & 0xFF);
ecc_code[19] = ((val >> 16) & 0xFF);
ecc_code[20] = ((val >> 8) & 0xFF);
ecc_code[21] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result0[i]);
ecc_code[22] = ((val >> 24) & 0xFF);
ecc_code[23] = ((val >> 16) & 0xFF);
ecc_code[24] = ((val >> 8) & 0xFF);
ecc_code[25] = ((val >> 0) & 0xFF);
break;
default:
return -EINVAL;
}
/* ECC scheme specific syndrome customizations */
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back
*/
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH16_CODE_HW:
break;
default:
return -EINVAL;
}
return 0;
}
/**
* omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
* when SW based correction is required as ECC is required for one sector
* at a time.
*/
static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
{
return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
}
/**
* omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
* @mtd: MTD device structure
* @dat: The pointer to data on which ecc is computed
* @ecc_code: The ecc_code buffer
*
* Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
*/
static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_calc)
{
struct omap_nand_info *info = mtd_to_omap(mtd);
int eccbytes = info->nand.ecc.bytes;
unsigned long nsectors;
int i, ret;
nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
for (i = 0; i < nsectors; i++) { for (i = 0; i < nsectors; i++) {
ecc_code = ecc_calc; ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
switch (info->ecc_opt) { if (ret)
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: return ret;
case OMAP_ECC_BCH8_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
*ecc_code++ = (bch_val4 & 0xFF);
*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
*ecc_code++ = (bch_val3 & 0xFF);
*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
*ecc_code++ = (bch_val2 & 0xFF);
*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
*ecc_code++ = (bch_val1 & 0xFF);
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
case OMAP_ECC_BCH4_CODE_HW:
bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
*ecc_code++ = ((bch_val2 & 0xF) << 4) |
((bch_val1 >> 28) & 0xF);
*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
*ecc_code++ = ((bch_val1 & 0xF) << 4);
break;
case OMAP_ECC_BCH16_CODE_HW:
val = readl(gpmc_regs->gpmc_bch_result6[i]);
ecc_code[0] = ((val >> 8) & 0xFF);
ecc_code[1] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result5[i]);
ecc_code[2] = ((val >> 24) & 0xFF);
ecc_code[3] = ((val >> 16) & 0xFF);
ecc_code[4] = ((val >> 8) & 0xFF);
ecc_code[5] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result4[i]);
ecc_code[6] = ((val >> 24) & 0xFF);
ecc_code[7] = ((val >> 16) & 0xFF);
ecc_code[8] = ((val >> 8) & 0xFF);
ecc_code[9] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result3[i]);
ecc_code[10] = ((val >> 24) & 0xFF);
ecc_code[11] = ((val >> 16) & 0xFF);
ecc_code[12] = ((val >> 8) & 0xFF);
ecc_code[13] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result2[i]);
ecc_code[14] = ((val >> 24) & 0xFF);
ecc_code[15] = ((val >> 16) & 0xFF);
ecc_code[16] = ((val >> 8) & 0xFF);
ecc_code[17] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result1[i]);
ecc_code[18] = ((val >> 24) & 0xFF);
ecc_code[19] = ((val >> 16) & 0xFF);
ecc_code[20] = ((val >> 8) & 0xFF);
ecc_code[21] = ((val >> 0) & 0xFF);
val = readl(gpmc_regs->gpmc_bch_result0[i]);
ecc_code[22] = ((val >> 24) & 0xFF);
ecc_code[23] = ((val >> 16) & 0xFF);
ecc_code[24] = ((val >> 8) & 0xFF);
ecc_code[25] = ((val >> 0) & 0xFF);
break;
default:
return -EINVAL;
}
/* ECC scheme specific syndrome customizations */
switch (info->ecc_opt) {
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch4_polynomial[j];
break;
case OMAP_ECC_BCH4_CODE_HW:
/* Set 8th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
/* Add constant polynomial to remainder, so that
* ECC of blank pages results in 0x0 on reading back */
for (j = 0; j < eccbytes; j++)
ecc_calc[j] ^= bch8_polynomial[j];
break;
case OMAP_ECC_BCH8_CODE_HW:
/* Set 14th ECC byte as 0x0 for ROM compatibility */
ecc_calc[eccbytes - 1] = 0x0;
break;
case OMAP_ECC_BCH16_CODE_HW:
break;
default:
return -EINVAL;
}
ecc_calc += eccbytes; ecc_calc += eccbytes;
} }
return 0; return 0;
...@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, ...@@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->write_buf(mtd, buf, mtd->writesize); chip->write_buf(mtd, buf, mtd->writesize);
/* Update ecc vector from GPMC result registers */ /* Update ecc vector from GPMC result registers */
chip->ecc.calculate(mtd, buf, &ecc_calc[0]); omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total); chip->ecc.total);
...@@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, ...@@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
return 0; return 0;
} }
/**
* omap_write_subpage_bch - BCH hardware ECC based subpage write
* @mtd: mtd info structure
* @chip: nand chip info structure
* @offset: column address of subpage within the page
* @data_len: data length
* @buf: data buffer
* @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
*
* OMAP optimized subpage write method.
*/
static int omap_write_subpage_bch(struct mtd_info *mtd,
struct nand_chip *chip, u32 offset,
u32 data_len, const u8 *buf,
int oob_required, int page)
{
u8 *ecc_calc = chip->buffers->ecccalc;
int ecc_size = chip->ecc.size;
int ecc_bytes = chip->ecc.bytes;
int ecc_steps = chip->ecc.steps;
u32 start_step = offset / ecc_size;
u32 end_step = (offset + data_len - 1) / ecc_size;
int step, ret = 0;
/*
* Write entire page at one go as it would be optimal
* as ECC is calculated by hardware.
* ECC is calculated for all subpages but we choose
* only what we want.
*/
/* Enable GPMC ECC engine */
chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
/* Write data */
chip->write_buf(mtd, buf, mtd->writesize);
for (step = 0; step < ecc_steps; step++) {
/* mask ECC of un-touched subpages by padding 0xFF */
if (step < start_step || step > end_step)
memset(ecc_calc, 0xff, ecc_bytes);
else
ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
if (ret)
return ret;
buf += ecc_size;
ecc_calc += ecc_bytes;
}
/* copy calculated ECC for whole page to chip->buffer->oob */
/* this include masked-value(0xFF) for unwritten subpages */
ecc_calc = chip->buffers->ecccalc;
ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
chip->ecc.total);
if (ret)
return ret;
/* write OOB buffer to NAND device */
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
/** /**
* omap_read_page_bch - BCH ecc based page read function for entire page * omap_read_page_bch - BCH ecc based page read function for entire page
* @mtd: mtd info structure * @mtd: mtd info structure
...@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, ...@@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.total); chip->ecc.total);
/* Calculate ecc bytes */ /* Calculate ecc bytes */
chip->ecc.calculate(mtd, buf, ecc_calc); omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
chip->ecc.total); chip->ecc.total);
...@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info, ...@@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info,
return true; return true;
} }
static bool omap2_nand_ecc_check(struct omap_nand_info *info, static bool omap2_nand_ecc_check(struct omap_nand_info *info)
struct omap_nand_platform_data *pdata)
{ {
bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm; bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
...@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = { ...@@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
static int omap_nand_probe(struct platform_device *pdev) static int omap_nand_probe(struct platform_device *pdev)
{ {
struct omap_nand_info *info; struct omap_nand_info *info;
struct omap_nand_platform_data *pdata = NULL;
struct mtd_info *mtd; struct mtd_info *mtd;
struct nand_chip *nand_chip; struct nand_chip *nand_chip;
int err; int err;
...@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev; info->pdev = pdev;
if (dev->of_node) { err = omap_get_dt_info(dev, info);
if (omap_get_dt_info(dev, info)) if (err)
return -EINVAL; return err;
} else {
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "platform data missing\n");
return -EINVAL;
}
info->gpmc_cs = pdata->cs;
info->reg = pdata->reg;
info->ecc_opt = pdata->ecc_opt;
if (pdata->dev_ready)
dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
info->xfer_type = pdata->xfer_type;
info->devsize = pdata->devsize;
info->elm_of_node = pdata->elm_of_node;
info->flash_bbt = pdata->flash_bbt;
}
platform_set_drvdata(pdev, info);
info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
if (!info->ops) { if (!info->ops) {
dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
...@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error; goto return_error;
} }
if (!omap2_nand_ecc_check(info, pdata)) { if (!omap2_nand_ecc_check(info)) {
err = -EINVAL; err = -EINVAL;
goto return_error; goto return_error;
} }
...@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4; nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data; nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */ /* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1; oobbytes_per_step = nand_chip->ecc.bytes + 1;
...@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 4; nand_chip->ecc.strength = 4;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data; nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch; nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes; oobbytes_per_step = nand_chip->ecc.bytes;
...@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8; nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = nand_bch_correct_data; nand_chip->ecc.correct = nand_bch_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
/* Reserve one byte for the OMAP marker */ /* Reserve one byte for the OMAP marker */
oobbytes_per_step = nand_chip->ecc.bytes + 1; oobbytes_per_step = nand_chip->ecc.bytes + 1;
...@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 8; nand_chip->ecc.strength = 8;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data; nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch; nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes; oobbytes_per_step = nand_chip->ecc.bytes;
...@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev)
nand_chip->ecc.strength = 16; nand_chip->ecc.strength = 16;
nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
nand_chip->ecc.correct = omap_elm_correct_data; nand_chip->ecc.correct = omap_elm_correct_data;
nand_chip->ecc.calculate = omap_calculate_ecc_bch;
nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.read_page = omap_read_page_bch;
nand_chip->ecc.write_page = omap_write_page_bch; nand_chip->ecc.write_page = omap_write_page_bch;
nand_chip->ecc.write_subpage = omap_write_subpage_bch;
mtd_set_ooblayout(mtd, &omap_ooblayout_ops); mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
oobbytes_per_step = nand_chip->ecc.bytes; oobbytes_per_step = nand_chip->ecc.bytes;
...@@ -2167,10 +2255,9 @@ static int omap_nand_probe(struct platform_device *pdev) ...@@ -2167,10 +2255,9 @@ static int omap_nand_probe(struct platform_device *pdev)
if (err) if (err)
goto return_error; goto return_error;
if (dev->of_node) err = mtd_device_register(mtd, NULL, 0);
mtd_device_register(mtd, NULL, 0); if (err)
else goto return_error;
mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
platform_set_drvdata(pdev, mtd); platform_set_drvdata(pdev, mtd);
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/platform_data/mtd-nand-pxa3xx.h> #include <linux/platform_data/mtd-nand-pxa3xx.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
#define NAND_STOP_DELAY msecs_to_jiffies(40) #define NAND_STOP_DELAY msecs_to_jiffies(40)
...@@ -45,6 +47,10 @@ ...@@ -45,6 +47,10 @@
*/ */
#define INIT_BUFFER_SIZE 2048 #define INIT_BUFFER_SIZE 2048
/* System control register and bit to enable NAND on some SoCs */
#define GENCONF_SOC_DEVICE_MUX 0x208
#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
/* registers and bit definitions */ /* registers and bit definitions */
#define NDCR (0x00) /* Control register */ #define NDCR (0x00) /* Control register */
#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */ #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
...@@ -174,6 +180,7 @@ enum { ...@@ -174,6 +180,7 @@ enum {
enum pxa3xx_nand_variant { enum pxa3xx_nand_variant {
PXA3XX_NAND_VARIANT_PXA, PXA3XX_NAND_VARIANT_PXA,
PXA3XX_NAND_VARIANT_ARMADA370, PXA3XX_NAND_VARIANT_ARMADA370,
PXA3XX_NAND_VARIANT_ARMADA_8K,
}; };
struct pxa3xx_nand_host { struct pxa3xx_nand_host {
...@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = { ...@@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,armada370-nand", .compatible = "marvell,armada370-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
}, },
{
.compatible = "marvell,armada-8k-nand",
.data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
},
{} {}
}; };
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
...@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) ...@@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
info->retcode = ERR_UNCORERR; info->retcode = ERR_UNCORERR;
if (status & NDSR_CORERR) { if (status & NDSR_CORERR) {
info->retcode = ERR_CORERR; info->retcode = ERR_CORERR;
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 && if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
info->ecc_bch) info->ecc_bch)
info->ecc_err_cnt = NDSR_ERR_CNT(status); info->ecc_err_cnt = NDSR_ERR_CNT(status);
else else
...@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) ...@@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
nand_writel(info, NDCB0, info->ndcb2); nand_writel(info, NDCB0, info->ndcb2);
/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDCB0, info->ndcb3); nand_writel(info, NDCB0, info->ndcb3);
} }
...@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) ...@@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
chip->options |= NAND_BUSWIDTH_16; chip->options |= NAND_BUSWIDTH_16;
/* Device detection must be done with ECC disabled */ /* Device detection must be done with ECC disabled */
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
nand_writel(info, NDECCCTRL, 0x0); nand_writel(info, NDECCCTRL, 0x0);
if (pdata->flash_bbt) if (pdata->flash_bbt)
...@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) ...@@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
* (aka splitted) command handling, * (aka splitted) command handling,
*/ */
if (mtd->writesize > PAGE_CHUNK_SIZE) { if (mtd->writesize > PAGE_CHUNK_SIZE) {
if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
chip->cmdfunc = nand_cmdfunc_extended; chip->cmdfunc = nand_cmdfunc_extended;
} else { } else {
dev_err(&info->pdev->dev, dev_err(&info->pdev->dev,
...@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev) ...@@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
if (!of_id) if (!of_id)
return 0; return 0;
/*
* Some SoCs like A7k/A8k need to enable manually the NAND
* controller to avoid being bootloader dependent. This is done
* through the use of a single bit in the System Functions registers.
*/
if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
pdev->dev.of_node, "marvell,system-controller");
u32 reg;
if (IS_ERR(sysctrl_base))
return PTR_ERR(sysctrl_base);
regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
}
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata) if (!pdata)
return -ENOMEM; return -ENOMEM;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma/qcom_bam_dma.h>
/* NANDc reg offsets */ /* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00 #define NAND_FLASH_CMD 0x00
...@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ ...@@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
*/ */
#define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg))
/* Returns the NAND register physical address */
#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset))
/* Returns the dma address for reg read buffer */
#define reg_buf_dma_addr(chip, vaddr) \
((chip)->reg_read_dma + \
((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32 #define QPIC_PER_CW_CMD_SGL 32
#define QPIC_PER_CW_DATA_SGL 8 #define QPIC_PER_CW_DATA_SGL 8
...@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ ...@@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
/* /*
* This data type corresponds to the BAM transaction which will be used for all * This data type corresponds to the BAM transaction which will be used for all
* NAND transfers. * NAND transfers.
* @bam_ce - the array of BAM command elements
* @cmd_sgl - sgl for NAND BAM command pipe * @cmd_sgl - sgl for NAND BAM command pipe
* @data_sgl - sgl for NAND BAM consumer/producer pipe * @data_sgl - sgl for NAND BAM consumer/producer pipe
* @bam_ce_pos - the index in bam_ce which is available for next sgl
* @bam_ce_start - the index in bam_ce which marks the start position ce
* for current sgl. It will be used for size calculation
* for current sgl
* @cmd_sgl_pos - current index in command sgl. * @cmd_sgl_pos - current index in command sgl.
* @cmd_sgl_start - start index in command sgl. * @cmd_sgl_start - start index in command sgl.
* @tx_sgl_pos - current index in data sgl for tx. * @tx_sgl_pos - current index in data sgl for tx.
...@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ ...@@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \
* @rx_sgl_start - start index in data sgl for rx. * @rx_sgl_start - start index in data sgl for rx.
*/ */
struct bam_transaction { struct bam_transaction {
struct bam_cmd_element *bam_ce;
struct scatterlist *cmd_sgl; struct scatterlist *cmd_sgl;
struct scatterlist *data_sgl; struct scatterlist *data_sgl;
u32 bam_ce_pos;
u32 bam_ce_start;
u32 cmd_sgl_pos; u32 cmd_sgl_pos;
u32 cmd_sgl_start; u32 cmd_sgl_start;
u32 tx_sgl_pos; u32 tx_sgl_pos;
...@@ -307,7 +325,8 @@ struct nandc_regs { ...@@ -307,7 +325,8 @@ struct nandc_regs {
* controller * controller
* @dev: parent device * @dev: parent device
* @base: MMIO base * @base: MMIO base
* @base_dma: physical base address of controller registers * @base_phys: physical base address of controller registers
* @base_dma: dma base address of controller registers
* @core_clk: controller clock * @core_clk: controller clock
* @aon_clk: another controller clock * @aon_clk: another controller clock
* *
...@@ -340,6 +359,7 @@ struct qcom_nand_controller { ...@@ -340,6 +359,7 @@ struct qcom_nand_controller {
struct device *dev; struct device *dev;
void __iomem *base; void __iomem *base;
phys_addr_t base_phys;
dma_addr_t base_dma; dma_addr_t base_dma;
struct clk *core_clk; struct clk *core_clk;
...@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) ...@@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn_size = bam_txn_size =
sizeof(*bam_txn) + num_cw * sizeof(*bam_txn) + num_cw *
((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) +
(sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) +
(sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL));
bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL);
...@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) ...@@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc)
bam_txn = bam_txn_buf; bam_txn = bam_txn_buf;
bam_txn_buf += sizeof(*bam_txn); bam_txn_buf += sizeof(*bam_txn);
bam_txn->bam_ce = bam_txn_buf;
bam_txn_buf +=
sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw;
bam_txn->cmd_sgl = bam_txn_buf; bam_txn->cmd_sgl = bam_txn_buf;
bam_txn_buf += bam_txn_buf +=
sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw;
...@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc) ...@@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc)
if (!nandc->props->is_bam) if (!nandc->props->is_bam)
return; return;
bam_txn->bam_ce_pos = 0;
bam_txn->bam_ce_start = 0;
bam_txn->cmd_sgl_pos = 0; bam_txn->cmd_sgl_pos = 0;
bam_txn->cmd_sgl_start = 0; bam_txn->cmd_sgl_start = 0;
bam_txn->tx_sgl_pos = 0; bam_txn->tx_sgl_pos = 0;
...@@ -733,6 +760,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, ...@@ -733,6 +760,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
return 0; return 0;
} }
/*
* Prepares the command descriptor for BAM DMA which will be used for NAND
* register reads and writes. The command descriptor requires the command
* to be formed in command element type so this function uses the command
* element from bam transaction ce array and fills the same with required
* data. A single SGL can contain multiple command elements so
* NAND_BAM_NEXT_SGL will be used for starting the separate SGL
* after the current command element.
*/
static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
int reg_off, const void *vaddr,
int size, unsigned int flags)
{
int bam_ce_size;
int i, ret;
struct bam_cmd_element *bam_ce_buffer;
struct bam_transaction *bam_txn = nandc->bam_txn;
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos];
/* fill the command desc */
for (i = 0; i < size; i++) {
if (read)
bam_prep_ce(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i));
else
bam_prep_ce_le32(&bam_ce_buffer[i],
nandc_reg_phys(nandc, reg_off + 4 * i),
BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}
bam_txn->bam_ce_pos += size;
/* use the separate sgl after this command */
if (flags & NAND_BAM_NEXT_SGL) {
bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start];
bam_ce_size = (bam_txn->bam_ce_pos -
bam_txn->bam_ce_start) *
sizeof(struct bam_cmd_element);
sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos],
bam_ce_buffer, bam_ce_size);
bam_txn->cmd_sgl_pos++;
bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
if (flags & NAND_BAM_NWD) {
ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_FENCE |
DMA_PREP_CMD);
if (ret)
return ret;
}
}
return 0;
}
/* /*
* Prepares the data descriptor for BAM DMA which will be used for NAND * Prepares the data descriptor for BAM DMA which will be used for NAND
* data reads and writes. * data reads and writes.
...@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, ...@@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
{ {
bool flow_control = false; bool flow_control = false;
void *vaddr; void *vaddr;
int size;
if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
flow_control = true; nandc->reg_read_pos += num_regs;
if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, first); first = dev_cmd_reg_addr(nandc, first);
size = num_regs * sizeof(u32); if (nandc->props->is_bam)
vaddr = nandc->reg_read_buf + nandc->reg_read_pos; return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
nandc->reg_read_pos += num_regs; num_regs, flags);
if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
flow_control = true;
return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control); return prep_adm_dma_desc(nandc, true, first, vaddr,
num_regs * sizeof(u32), flow_control);
} }
/* /*
...@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, ...@@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
bool flow_control = false; bool flow_control = false;
struct nandc_regs *regs = nandc->regs; struct nandc_regs *regs = nandc->regs;
void *vaddr; void *vaddr;
int size;
vaddr = offset_to_nandc_reg(regs, first); vaddr = offset_to_nandc_reg(regs, first);
if (first == NAND_FLASH_CMD)
flow_control = true;
if (first == NAND_ERASED_CW_DETECT_CFG) { if (first == NAND_ERASED_CW_DETECT_CFG) {
if (flags & NAND_ERASED_CW_SET) if (flags & NAND_ERASED_CW_SET)
vaddr = &regs->erased_cw_detect_cfg_set; vaddr = &regs->erased_cw_detect_cfg_set;
...@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, ...@@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
size = num_regs * sizeof(u32); if (nandc->props->is_bam)
return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
num_regs, flags);
if (first == NAND_FLASH_CMD)
flow_control = true;
return prep_adm_dma_desc(nandc, false, first, vaddr, size, return prep_adm_dma_desc(nandc, false, first, vaddr,
flow_control); num_regs * sizeof(u32), flow_control);
} }
/* /*
...@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc) ...@@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc)
} }
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0); r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_CMD);
if (r) if (r)
return r; return r;
} }
...@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev) ...@@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (IS_ERR(nandc->base)) if (IS_ERR(nandc->base))
return PTR_ERR(nandc->base); return PTR_ERR(nandc->base);
nandc->base_phys = res->start;
nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start); nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
nandc->core_clk = devm_clk_get(dev, "core"); nandc->core_clk = devm_clk_get(dev, "core");
......
...@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match); ...@@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match);
static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
{ {
const struct of_device_id *match; const struct flctl_soc_config *config;
struct flctl_soc_config *config;
struct sh_flctl_platform_data *pdata; struct sh_flctl_platform_data *pdata;
match = of_match_device(of_flctl_match, dev); config = of_device_get_match_data(dev);
if (match) if (!config) {
config = (struct flctl_soc_config *)match->data;
else {
dev_err(dev, "%s: no OF configuration attached\n", __func__); dev_err(dev, "%s: no OF configuration attached\n", __func__);
return NULL; return NULL;
} }
......
...@@ -4,11 +4,6 @@ ...@@ -4,11 +4,6 @@
#include <linux/mtd/rawnand.h> #include <linux/mtd/rawnand.h>
struct gpio_nand_platdata { struct gpio_nand_platdata {
int gpio_nce;
int gpio_nwp;
int gpio_cle;
int gpio_ale;
int gpio_rdy;
void (*adjust_parts)(struct gpio_nand_platdata *, size_t); void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
struct mtd_partition *parts; struct mtd_partition *parts;
unsigned int num_parts; unsigned int num_parts;
......
...@@ -177,6 +177,9 @@ enum nand_ecc_algo { ...@@ -177,6 +177,9 @@ enum nand_ecc_algo {
*/ */
#define NAND_NEED_SCRAMBLING 0x00002000 #define NAND_NEED_SCRAMBLING 0x00002000
/* Device needs 3rd row address cycle */
#define NAND_ROW_ADDR_3 0x00004000
/* Options valid for Samsung large page devices */ /* Options valid for Samsung large page devices */
#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
......
...@@ -66,21 +66,4 @@ struct gpmc_nand_regs { ...@@ -66,21 +66,4 @@ struct gpmc_nand_regs {
/* Deprecated. Do not use */ /* Deprecated. Do not use */
void __iomem *gpmc_status; void __iomem *gpmc_status;
}; };
struct omap_nand_platform_data {
int cs;
struct mtd_partition *parts;
int nr_parts;
bool flash_bbt;
enum nand_io xfer_type;
int devsize;
enum omap_ecc ecc_opt;
struct device_node *elm_of_node;
/* deprecated */
struct gpmc_nand_regs reg;
struct device_node *of_node;
bool dev_ready;
};
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment