Commit 5ec8d8c9 authored by Olof Johansson's avatar Olof Johansson

Merge tag 'omap-devel-gpmc-fixed-for-v3.7' of...

Merge tag 'omap-devel-gpmc-fixed-for-v3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into next/cleanup

From Tony Lindgren:

Changes for GPMC (General Purpose Memory Controller) that take it
closer for being just a regular device driver.

* tag 'omap-devel-gpmc-fixed-for-v3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap:
  mtd: nand: omap2: use gpmc provided irqs
  ARM: OMAP2+: gpmc-nand: Modify Interrupt handling
  ARM: OMAP2+: gpmc: Modify interrupt handling
  mtd: onenand: omap2: obtain memory from resource
  mtd: nand: omap2: obtain memory from resource
  ARM: OMAP2+: gpmc-onenand: provide memory as resource
  ARM: OMAP2+: gpmc-nand: update resource with memory
  mtd: nand: omap2: handle nand on gpmc
  ARM: OMAP2+: gpmc-nand: update gpmc-nand regs
  ARM: OMAP2+: gpmc: update nand register helper
parents afdeeccb 5c468455
...@@ -21,15 +21,23 @@ ...@@ -21,15 +21,23 @@
#include <plat/board.h> #include <plat/board.h>
#include <plat/gpmc.h> #include <plat/gpmc.h>
static struct resource gpmc_nand_resource = { static struct resource gpmc_nand_resource[] = {
.flags = IORESOURCE_MEM, {
.flags = IORESOURCE_MEM,
},
{
.flags = IORESOURCE_IRQ,
},
{
.flags = IORESOURCE_IRQ,
},
}; };
static struct platform_device gpmc_nand_device = { static struct platform_device gpmc_nand_device = {
.name = "omap2-nand", .name = "omap2-nand",
.id = 0, .id = 0,
.num_resources = 1, .num_resources = ARRAY_SIZE(gpmc_nand_resource),
.resource = &gpmc_nand_resource, .resource = gpmc_nand_resource,
}; };
static int omap2_nand_gpmc_retime(struct omap_nand_platform_data *gpmc_nand_data) static int omap2_nand_gpmc_retime(struct omap_nand_platform_data *gpmc_nand_data)
...@@ -75,6 +83,7 @@ static int omap2_nand_gpmc_retime(struct omap_nand_platform_data *gpmc_nand_data ...@@ -75,6 +83,7 @@ static int omap2_nand_gpmc_retime(struct omap_nand_platform_data *gpmc_nand_data
gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 0); gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_DEV_SIZE, 0);
gpmc_cs_configure(gpmc_nand_data->cs, gpmc_cs_configure(gpmc_nand_data->cs,
GPMC_CONFIG_DEV_TYPE, GPMC_DEVICETYPE_NAND); GPMC_CONFIG_DEV_TYPE, GPMC_DEVICETYPE_NAND);
gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_WP, 0);
err = gpmc_cs_set_timings(gpmc_nand_data->cs, &t); err = gpmc_cs_set_timings(gpmc_nand_data->cs, &t);
if (err) if (err)
return err; return err;
...@@ -90,12 +99,19 @@ int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data) ...@@ -90,12 +99,19 @@ int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data)
gpmc_nand_device.dev.platform_data = gpmc_nand_data; gpmc_nand_device.dev.platform_data = gpmc_nand_data;
err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE, err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE,
&gpmc_nand_data->phys_base); (unsigned long *)&gpmc_nand_resource[0].start);
if (err < 0) { if (err < 0) {
dev_err(dev, "Cannot request GPMC CS\n"); dev_err(dev, "Cannot request GPMC CS\n");
return err; return err;
} }
gpmc_nand_resource[0].end = gpmc_nand_resource[0].start +
NAND_IO_SIZE - 1;
gpmc_nand_resource[1].start =
gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE);
gpmc_nand_resource[2].start =
gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT);
/* Set timings in GPMC */ /* Set timings in GPMC */
err = omap2_nand_gpmc_retime(gpmc_nand_data); err = omap2_nand_gpmc_retime(gpmc_nand_data);
if (err < 0) { if (err < 0) {
...@@ -108,6 +124,8 @@ int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data) ...@@ -108,6 +124,8 @@ int __init gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data)
gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_RDY_BSY, 1); gpmc_cs_configure(gpmc_nand_data->cs, GPMC_CONFIG_RDY_BSY, 1);
} }
gpmc_update_nand_reg(&gpmc_nand_data->reg, gpmc_nand_data->cs);
err = platform_device_register(&gpmc_nand_device); err = platform_device_register(&gpmc_nand_device);
if (err < 0) { if (err < 0) {
dev_err(dev, "Unable to register NAND device\n"); dev_err(dev, "Unable to register NAND device\n");
......
...@@ -23,11 +23,19 @@ ...@@ -23,11 +23,19 @@
#include <plat/board.h> #include <plat/board.h>
#include <plat/gpmc.h> #include <plat/gpmc.h>
#define ONENAND_IO_SIZE SZ_128K
static struct omap_onenand_platform_data *gpmc_onenand_data; static struct omap_onenand_platform_data *gpmc_onenand_data;
static struct resource gpmc_onenand_resource = {
.flags = IORESOURCE_MEM,
};
static struct platform_device gpmc_onenand_device = { static struct platform_device gpmc_onenand_device = {
.name = "omap2-onenand", .name = "omap2-onenand",
.id = -1, .id = -1,
.num_resources = 1,
.resource = &gpmc_onenand_resource,
}; };
static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base) static int omap2_onenand_set_async_mode(int cs, void __iomem *onenand_base)
...@@ -390,6 +398,8 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr) ...@@ -390,6 +398,8 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
void __init gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) void __init gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
{ {
int err;
gpmc_onenand_data = _onenand_data; gpmc_onenand_data = _onenand_data;
gpmc_onenand_data->onenand_setup = gpmc_onenand_setup; gpmc_onenand_data->onenand_setup = gpmc_onenand_setup;
gpmc_onenand_device.dev.platform_data = gpmc_onenand_data; gpmc_onenand_device.dev.platform_data = gpmc_onenand_data;
...@@ -401,8 +411,19 @@ void __init gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) ...@@ -401,8 +411,19 @@ void __init gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
gpmc_onenand_data->flags |= ONENAND_SYNC_READ; gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
} }
err = gpmc_cs_request(gpmc_onenand_data->cs, ONENAND_IO_SIZE,
(unsigned long *)&gpmc_onenand_resource.start);
if (err < 0) {
pr_err("%s: Cannot request GPMC CS\n", __func__);
return;
}
gpmc_onenand_resource.end = gpmc_onenand_resource.start +
ONENAND_IO_SIZE - 1;
if (platform_device_register(&gpmc_onenand_device) < 0) { if (platform_device_register(&gpmc_onenand_device) < 0) {
printk(KERN_ERR "Unable to register OneNAND device\n"); pr_err("%s: Unable to register OneNAND device\n", __func__);
gpmc_cs_free(gpmc_onenand_data->cs);
return; return;
} }
} }
...@@ -78,6 +78,15 @@ ...@@ -78,6 +78,15 @@
#define ENABLE_PREFETCH (0x1 << 7) #define ENABLE_PREFETCH (0x1 << 7)
#define DMA_MPU_MODE 2 #define DMA_MPU_MODE 2
/* XXX: Only NAND irq has been considered,currently these are the only ones used
*/
#define GPMC_NR_IRQ 2
struct gpmc_client_irq {
unsigned irq;
u32 bitmask;
};
/* Structure to save gpmc cs context */ /* Structure to save gpmc cs context */
struct gpmc_cs_config { struct gpmc_cs_config {
u32 config1; u32 config1;
...@@ -105,6 +114,10 @@ struct omap3_gpmc_regs { ...@@ -105,6 +114,10 @@ struct omap3_gpmc_regs {
struct gpmc_cs_config cs_context[GPMC_CS_NUM]; struct gpmc_cs_config cs_context[GPMC_CS_NUM];
}; };
static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
static struct irq_chip gpmc_irq_chip;
static unsigned gpmc_irq_start;
static struct resource gpmc_mem_root; static struct resource gpmc_mem_root;
static struct resource gpmc_cs_mem[GPMC_CS_NUM]; static struct resource gpmc_cs_mem[GPMC_CS_NUM];
static DEFINE_SPINLOCK(gpmc_mem_lock); static DEFINE_SPINLOCK(gpmc_mem_lock);
...@@ -682,6 +695,117 @@ int gpmc_prefetch_reset(int cs) ...@@ -682,6 +695,117 @@ int gpmc_prefetch_reset(int cs)
} }
EXPORT_SYMBOL(gpmc_prefetch_reset); EXPORT_SYMBOL(gpmc_prefetch_reset);
void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
{
reg->gpmc_status = gpmc_base + GPMC_STATUS;
reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
reg->gpmc_bch_result0 = gpmc_base + GPMC_ECC_BCH_RESULT_0;
}
int gpmc_get_client_irq(unsigned irq_config)
{
int i;
if (hweight32(irq_config) > 1)
return 0;
for (i = 0; i < GPMC_NR_IRQ; i++)
if (gpmc_client_irq[i].bitmask & irq_config)
return gpmc_client_irq[i].irq;
return 0;
}
static int gpmc_irq_endis(unsigned irq, bool endis)
{
int i;
u32 regval;
for (i = 0; i < GPMC_NR_IRQ; i++)
if (irq == gpmc_client_irq[i].irq) {
regval = gpmc_read_reg(GPMC_IRQENABLE);
if (endis)
regval |= gpmc_client_irq[i].bitmask;
else
regval &= ~gpmc_client_irq[i].bitmask;
gpmc_write_reg(GPMC_IRQENABLE, regval);
break;
}
return 0;
}
static void gpmc_irq_disable(struct irq_data *p)
{
gpmc_irq_endis(p->irq, false);
}
static void gpmc_irq_enable(struct irq_data *p)
{
gpmc_irq_endis(p->irq, true);
}
static void gpmc_irq_noop(struct irq_data *data) { }
static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
static int gpmc_setup_irq(int gpmc_irq)
{
int i;
u32 regval;
if (!gpmc_irq)
return -EINVAL;
gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
if (IS_ERR_VALUE(gpmc_irq_start)) {
pr_err("irq_alloc_descs failed\n");
return gpmc_irq_start;
}
gpmc_irq_chip.name = "gpmc";
gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
gpmc_irq_chip.irq_enable = gpmc_irq_enable;
gpmc_irq_chip.irq_disable = gpmc_irq_disable;
gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
gpmc_irq_chip.irq_ack = gpmc_irq_noop;
gpmc_irq_chip.irq_mask = gpmc_irq_noop;
gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
for (i = 0; i < GPMC_NR_IRQ; i++) {
gpmc_client_irq[i].irq = gpmc_irq_start + i;
irq_set_chip_and_handler(gpmc_client_irq[i].irq,
&gpmc_irq_chip, handle_simple_irq);
set_irq_flags(gpmc_client_irq[i].irq,
IRQF_VALID | IRQF_NOAUTOEN);
}
/* Disable interrupts */
gpmc_write_reg(GPMC_IRQENABLE, 0);
/* clear interrupts */
regval = gpmc_read_reg(GPMC_IRQSTATUS);
gpmc_write_reg(GPMC_IRQSTATUS, regval);
return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
}
static void __init gpmc_mem_init(void) static void __init gpmc_mem_init(void)
{ {
int cs; int cs;
...@@ -711,8 +835,8 @@ static void __init gpmc_mem_init(void) ...@@ -711,8 +835,8 @@ static void __init gpmc_mem_init(void)
static int __init gpmc_init(void) static int __init gpmc_init(void)
{ {
u32 l, irq; u32 l;
int cs, ret = -EINVAL; int ret = -EINVAL;
int gpmc_irq; int gpmc_irq;
char *ck = NULL; char *ck = NULL;
...@@ -761,16 +885,7 @@ static int __init gpmc_init(void) ...@@ -761,16 +885,7 @@ static int __init gpmc_init(void)
gpmc_write_reg(GPMC_SYSCONFIG, l); gpmc_write_reg(GPMC_SYSCONFIG, l);
gpmc_mem_init(); gpmc_mem_init();
/* initalize the irq_chained */ ret = gpmc_setup_irq(gpmc_irq);
irq = OMAP_GPMC_IRQ_BASE;
for (cs = 0; cs < GPMC_CS_NUM; cs++) {
irq_set_chip_and_handler(irq, &dummy_irq_chip,
handle_simple_irq);
set_irq_flags(irq, IRQF_VALID);
irq++;
}
ret = request_irq(gpmc_irq, gpmc_handle_irq, IRQF_SHARED, "gpmc", NULL);
if (ret) if (ret)
pr_err("gpmc: irq-%d could not claim: err %d\n", pr_err("gpmc: irq-%d could not claim: err %d\n",
gpmc_irq, ret); gpmc_irq, ret);
...@@ -780,12 +895,19 @@ postcore_initcall(gpmc_init); ...@@ -780,12 +895,19 @@ postcore_initcall(gpmc_init);
static irqreturn_t gpmc_handle_irq(int irq, void *dev) static irqreturn_t gpmc_handle_irq(int irq, void *dev)
{ {
u8 cs; int i;
u32 regval;
regval = gpmc_read_reg(GPMC_IRQSTATUS);
if (!regval)
return IRQ_NONE;
for (i = 0; i < GPMC_NR_IRQ; i++)
if (regval & gpmc_client_irq[i].bitmask)
generic_handle_irq(gpmc_client_irq[i].irq);
/* check cs to invoke the irq */ gpmc_write_reg(GPMC_IRQSTATUS, regval);
cs = ((gpmc_read_reg(GPMC_PREFETCH_CONFIG1)) >> CS_NUM_SHIFT) & 0x7;
if (OMAP_GPMC_IRQ_BASE+cs <= OMAP_GPMC_IRQ_END)
generic_handle_irq(OMAP_GPMC_IRQ_BASE+cs);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -133,6 +133,25 @@ struct gpmc_timings { ...@@ -133,6 +133,25 @@ struct gpmc_timings {
u16 wr_data_mux_bus; /* WRDATAONADMUXBUS */ u16 wr_data_mux_bus; /* WRDATAONADMUXBUS */
}; };
struct gpmc_nand_regs {
void __iomem *gpmc_status;
void __iomem *gpmc_nand_command;
void __iomem *gpmc_nand_address;
void __iomem *gpmc_nand_data;
void __iomem *gpmc_prefetch_config1;
void __iomem *gpmc_prefetch_config2;
void __iomem *gpmc_prefetch_control;
void __iomem *gpmc_prefetch_status;
void __iomem *gpmc_ecc_config;
void __iomem *gpmc_ecc_control;
void __iomem *gpmc_ecc_size_config;
void __iomem *gpmc_ecc1_result;
void __iomem *gpmc_bch_result0;
};
extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
extern int gpmc_get_client_irq(unsigned irq_config);
extern unsigned int gpmc_ns_to_ticks(unsigned int time_ns); extern unsigned int gpmc_ns_to_ticks(unsigned int time_ns);
extern unsigned int gpmc_ps_to_ticks(unsigned int time_ps); extern unsigned int gpmc_ps_to_ticks(unsigned int time_ps);
extern unsigned int gpmc_ticks_to_ns(unsigned int ticks); extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
......
...@@ -26,9 +26,9 @@ struct omap_nand_platform_data { ...@@ -26,9 +26,9 @@ struct omap_nand_platform_data {
bool dev_ready; bool dev_ready;
int gpmc_irq; int gpmc_irq;
enum nand_io xfer_type; enum nand_io xfer_type;
unsigned long phys_base;
int devsize; int devsize;
enum omap_ecc ecc_opt; enum omap_ecc ecc_opt;
struct gpmc_nand_regs reg;
}; };
/* minimum size for IO mapping */ /* minimum size for IO mapping */
......
...@@ -101,6 +101,16 @@ ...@@ -101,6 +101,16 @@
#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
#define PREFETCH_CONFIG1_CS_SHIFT 24
#define ECC_CONFIG_CS_SHIFT 1
#define CS_MASK 0x7
#define ENABLE_PREFETCH (0x1 << 7)
#define DMA_MPU_MODE_SHIFT 2
#define ECCSIZE1_SHIFT 22
#define ECC1RESULTSIZE 0x1
#define ECCCLEAR 0x100
#define ECC1 0x1
/* oob info generated runtime depending on ecc algorithm and layout selected */ /* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo; static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used /* Define some generic bad / good block scan pattern which are used
...@@ -124,15 +134,18 @@ struct omap_nand_info { ...@@ -124,15 +134,18 @@ struct omap_nand_info {
int gpmc_cs; int gpmc_cs;
unsigned long phys_base; unsigned long phys_base;
unsigned long mem_size;
struct completion comp; struct completion comp;
struct dma_chan *dma; struct dma_chan *dma;
int gpmc_irq; int gpmc_irq_fifo;
int gpmc_irq_count;
enum { enum {
OMAP_NAND_IO_READ = 0, /* read */ OMAP_NAND_IO_READ = 0, /* read */
OMAP_NAND_IO_WRITE, /* write */ OMAP_NAND_IO_WRITE, /* write */
} iomode; } iomode;
u_char *buf; u_char *buf;
int buf_len; int buf_len;
struct gpmc_nand_regs reg;
#ifdef CONFIG_MTD_NAND_OMAP_BCH #ifdef CONFIG_MTD_NAND_OMAP_BCH
struct bch_control *bch; struct bch_control *bch;
...@@ -140,6 +153,63 @@ struct omap_nand_info { ...@@ -140,6 +153,63 @@ struct omap_nand_info {
#endif #endif
}; };
/**
* omap_prefetch_enable - configures and starts prefetch transfer
* @cs: cs (chip select) number
* @fifo_th: fifo threshold to be used for read/ write
* @dma_mode: dma mode enable (1) or disable (0)
* @u32_count: number of bytes to be transferred
* @is_write: prefetch read(0) or write post(1) mode
*/
static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
unsigned int u32_count, int is_write, struct omap_nand_info *info)
{
u32 val;
if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
return -1;
if (readl(info->reg.gpmc_prefetch_control))
return -EBUSY;
/* Set the amount of bytes to be prefetched */
writel(u32_count, info->reg.gpmc_prefetch_config2);
/* Set dma/mpu mode, the prefetch read / post write and
* enable the engine. Set which cs is has requested for.
*/
val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
(dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
writel(val, info->reg.gpmc_prefetch_config1);
/* Start the prefetch engine */
writel(0x1, info->reg.gpmc_prefetch_control);
return 0;
}
/**
* omap_prefetch_reset - disables and stops the prefetch engine
*/
static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
{
u32 config1;
/* check if the same module/cs is trying to reset */
config1 = readl(info->reg.gpmc_prefetch_config1);
if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
return -EINVAL;
/* Stop the PFPW engine */
writel(0x0, info->reg.gpmc_prefetch_control);
/* Reset/disable the PFPW engine */
writel(0x0, info->reg.gpmc_prefetch_config1);
return 0;
}
/** /**
* omap_hwcontrol - hardware specific access to control-lines * omap_hwcontrol - hardware specific access to control-lines
* @mtd: MTD device structure * @mtd: MTD device structure
...@@ -158,13 +228,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) ...@@ -158,13 +228,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
if (cmd != NAND_CMD_NONE) { if (cmd != NAND_CMD_NONE) {
if (ctrl & NAND_CLE) if (ctrl & NAND_CLE)
gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd); writeb(cmd, info->reg.gpmc_nand_command);
else if (ctrl & NAND_ALE) else if (ctrl & NAND_ALE)
gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd); writeb(cmd, info->reg.gpmc_nand_address);
else /* NAND_NCE */ else /* NAND_NCE */
gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd); writeb(cmd, info->reg.gpmc_nand_data);
} }
} }
...@@ -198,7 +268,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len) ...@@ -198,7 +268,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
iowrite8(*p++, info->nand.IO_ADDR_W); iowrite8(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */ /* wait until buffer is available for write */
do { do {
status = gpmc_read_status(GPMC_STATUS_BUFFER); status = readl(info->reg.gpmc_status) &
GPMC_STATUS_BUFF_EMPTY;
} while (!status); } while (!status);
} }
} }
...@@ -235,7 +306,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len) ...@@ -235,7 +306,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
iowrite16(*p++, info->nand.IO_ADDR_W); iowrite16(*p++, info->nand.IO_ADDR_W);
/* wait until buffer is available for write */ /* wait until buffer is available for write */
do { do {
status = gpmc_read_status(GPMC_STATUS_BUFFER); status = readl(info->reg.gpmc_status) &
GPMC_STATUS_BUFF_EMPTY;
} while (!status); } while (!status);
} }
} }
...@@ -265,8 +337,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) ...@@ -265,8 +337,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
} }
/* configure and start prefetch transfer */ /* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
if (ret) { if (ret) {
/* PFPW engine is busy, use cpu copy method */ /* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16) if (info->nand.options & NAND_BUSWIDTH_16)
...@@ -275,14 +347,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) ...@@ -275,14 +347,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
omap_read_buf8(mtd, (u_char *)p, len); omap_read_buf8(mtd, (u_char *)p, len);
} else { } else {
do { do {
r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); r_count = readl(info->reg.gpmc_prefetch_status);
r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
r_count = r_count >> 2; r_count = r_count >> 2;
ioread32_rep(info->nand.IO_ADDR_R, p, r_count); ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
p += r_count; p += r_count;
len -= r_count << 2; len -= r_count << 2;
} while (len); } while (len);
/* disable and stop the PFPW engine */ /* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs); omap_prefetch_reset(info->gpmc_cs, info);
} }
} }
...@@ -301,6 +374,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd, ...@@ -301,6 +374,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
int i = 0, ret = 0; int i = 0, ret = 0;
u16 *p = (u16 *)buf; u16 *p = (u16 *)buf;
unsigned long tim, limit; unsigned long tim, limit;
u32 val;
/* take care of subpage writes */ /* take care of subpage writes */
if (len % 2 != 0) { if (len % 2 != 0) {
...@@ -310,8 +384,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, ...@@ -310,8 +384,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
} }
/* configure and start prefetch transfer */ /* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
if (ret) { if (ret) {
/* PFPW engine is busy, use cpu copy method */ /* PFPW engine is busy, use cpu copy method */
if (info->nand.options & NAND_BUSWIDTH_16) if (info->nand.options & NAND_BUSWIDTH_16)
...@@ -320,7 +394,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, ...@@ -320,7 +394,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
omap_write_buf8(mtd, (u_char *)p, len); omap_write_buf8(mtd, (u_char *)p, len);
} else { } else {
while (len) { while (len) {
w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); w_count = readl(info->reg.gpmc_prefetch_status);
w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
w_count = w_count >> 1; w_count = w_count >> 1;
for (i = 0; (i < w_count) && len; i++, len -= 2) for (i = 0; (i < w_count) && len; i++, len -= 2)
iowrite16(*p++, info->nand.IO_ADDR_W); iowrite16(*p++, info->nand.IO_ADDR_W);
...@@ -329,11 +404,14 @@ static void omap_write_buf_pref(struct mtd_info *mtd, ...@@ -329,11 +404,14 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
tim = 0; tim = 0;
limit = (loops_per_jiffy * limit = (loops_per_jiffy *
msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) do {
cpu_relax(); cpu_relax();
val = readl(info->reg.gpmc_prefetch_status);
val = GPMC_PREFETCH_STATUS_COUNT(val);
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */ /* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs); omap_prefetch_reset(info->gpmc_cs, info);
} }
} }
...@@ -365,6 +443,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, ...@@ -365,6 +443,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
unsigned long tim, limit; unsigned long tim, limit;
unsigned n; unsigned n;
int ret; int ret;
u32 val;
if (addr >= high_memory) { if (addr >= high_memory) {
struct page *p1; struct page *p1;
...@@ -396,9 +475,9 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, ...@@ -396,9 +475,9 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
tx->callback_param = &info->comp; tx->callback_param = &info->comp;
dmaengine_submit(tx); dmaengine_submit(tx);
/* configure and start prefetch transfer */ /* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
if (ret) if (ret)
/* PFPW engine is busy, use cpu copy method */ /* PFPW engine is busy, use cpu copy method */
goto out_copy_unmap; goto out_copy_unmap;
...@@ -410,11 +489,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, ...@@ -410,11 +489,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
wait_for_completion(&info->comp); wait_for_completion(&info->comp);
tim = 0; tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
do {
cpu_relax(); cpu_relax();
val = readl(info->reg.gpmc_prefetch_status);
val = GPMC_PREFETCH_STATUS_COUNT(val);
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */ /* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs); omap_prefetch_reset(info->gpmc_cs, info);
dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
return 0; return 0;
...@@ -471,13 +554,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev) ...@@ -471,13 +554,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
{ {
struct omap_nand_info *info = (struct omap_nand_info *) dev; struct omap_nand_info *info = (struct omap_nand_info *) dev;
u32 bytes; u32 bytes;
u32 irq_stat;
irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); bytes = readl(info->reg.gpmc_prefetch_status);
bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
if (irq_stat & 0x2) if (this_irq == info->gpmc_irq_count)
goto done; goto done;
if (info->buf_len && (info->buf_len < bytes)) if (info->buf_len && (info->buf_len < bytes))
...@@ -494,20 +576,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev) ...@@ -494,20 +576,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
(u32 *)info->buf, bytes >> 2); (u32 *)info->buf, bytes >> 2);
info->buf = info->buf + bytes; info->buf = info->buf + bytes;
if (irq_stat & 0x2) if (this_irq == info->gpmc_irq_count)
goto done; goto done;
} }
gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
return IRQ_HANDLED; return IRQ_HANDLED;
done: done:
complete(&info->comp); complete(&info->comp);
/* disable irq */
gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
/* clear status */ disable_irq_nosync(info->gpmc_irq_fifo);
gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); disable_irq_nosync(info->gpmc_irq_count);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -534,22 +613,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) ...@@ -534,22 +613,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
init_completion(&info->comp); init_completion(&info->comp);
/* configure and start prefetch transfer */ /* configure and start prefetch transfer */
ret = gpmc_prefetch_enable(info->gpmc_cs, ret = omap_prefetch_enable(info->gpmc_cs,
PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
if (ret) if (ret)
/* PFPW engine is busy, use cpu copy method */ /* PFPW engine is busy, use cpu copy method */
goto out_copy; goto out_copy;
info->buf_len = len; info->buf_len = len;
/* enable irq */
gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, enable_irq(info->gpmc_irq_count);
(GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); enable_irq(info->gpmc_irq_fifo);
/* waiting for read to complete */ /* waiting for read to complete */
wait_for_completion(&info->comp); wait_for_completion(&info->comp);
/* disable and stop the PFPW engine */ /* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs); omap_prefetch_reset(info->gpmc_cs, info);
return; return;
out_copy: out_copy:
...@@ -572,6 +651,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd, ...@@ -572,6 +651,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
struct omap_nand_info, mtd); struct omap_nand_info, mtd);
int ret = 0; int ret = 0;
unsigned long tim, limit; unsigned long tim, limit;
u32 val;
if (len <= mtd->oobsize) { if (len <= mtd->oobsize) {
omap_write_buf_pref(mtd, buf, len); omap_write_buf_pref(mtd, buf, len);
...@@ -583,27 +663,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd, ...@@ -583,27 +663,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
init_completion(&info->comp); init_completion(&info->comp);
/* configure and start prefetch transfer : size=24 */ /* configure and start prefetch transfer : size=24 */
ret = gpmc_prefetch_enable(info->gpmc_cs, ret = omap_prefetch_enable(info->gpmc_cs,
(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
if (ret) if (ret)
/* PFPW engine is busy, use cpu copy method */ /* PFPW engine is busy, use cpu copy method */
goto out_copy; goto out_copy;
info->buf_len = len; info->buf_len = len;
/* enable irq */
gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, enable_irq(info->gpmc_irq_count);
(GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); enable_irq(info->gpmc_irq_fifo);
/* waiting for write to complete */ /* waiting for write to complete */
wait_for_completion(&info->comp); wait_for_completion(&info->comp);
/* wait for data to flushed-out before reset the prefetch */ /* wait for data to flushed-out before reset the prefetch */
tim = 0; tim = 0;
limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) do {
val = readl(info->reg.gpmc_prefetch_status);
val = GPMC_PREFETCH_STATUS_COUNT(val);
cpu_relax(); cpu_relax();
} while (val && (tim++ < limit));
/* disable and stop the PFPW engine */ /* disable and stop the PFPW engine */
gpmc_prefetch_reset(info->gpmc_cs); omap_prefetch_reset(info->gpmc_cs, info);
return; return;
out_copy: out_copy:
...@@ -843,7 +927,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat, ...@@ -843,7 +927,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
{ {
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd); mtd);
return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code); u32 val;
val = readl(info->reg.gpmc_ecc_config);
if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
return -EINVAL;
/* read ecc result */
val = readl(info->reg.gpmc_ecc1_result);
*ecc_code++ = val; /* P128e, ..., P1e */
*ecc_code++ = val >> 16; /* P128o, ..., P1o */
/* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
*ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
return 0;
} }
/** /**
...@@ -857,8 +954,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) ...@@ -857,8 +954,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
mtd); mtd);
struct nand_chip *chip = mtd->priv; struct nand_chip *chip = mtd->priv;
unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
u32 val;
/* clear ecc and enable bits */
val = ECCCLEAR | ECC1;
writel(val, info->reg.gpmc_ecc_control);
/* program ecc and result sizes */
val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
ECC1RESULTSIZE);
writel(val, info->reg.gpmc_ecc_size_config);
gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); switch (mode) {
case NAND_ECC_READ:
case NAND_ECC_WRITE:
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
break;
case NAND_ECC_READSYN:
writel(ECCCLEAR, info->reg.gpmc_ecc_control);
break;
default:
dev_info(&info->pdev->dev,
"error: unrecognized Mode[%d]!\n", mode);
break;
}
/* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
writel(val, info->reg.gpmc_ecc_config);
} }
/** /**
...@@ -886,10 +1009,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) ...@@ -886,10 +1009,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
else else
timeo += (HZ * 20) / 1000; timeo += (HZ * 20) / 1000;
gpmc_nand_write(info->gpmc_cs, writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
while (time_before(jiffies, timeo)) { while (time_before(jiffies, timeo)) {
status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); status = readb(info->reg.gpmc_nand_data);
if (status & NAND_STATUS_READY) if (status & NAND_STATUS_READY)
break; break;
cond_resched(); cond_resched();
...@@ -909,22 +1031,13 @@ static int omap_dev_ready(struct mtd_info *mtd) ...@@ -909,22 +1031,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd); mtd);
val = gpmc_read_status(GPMC_GET_IRQ_STATUS); val = readl(info->reg.gpmc_status);
if ((val & 0x100) == 0x100) { if ((val & 0x100) == 0x100) {
/* Clear IRQ Interrupt */ return 1;
val |= 0x100;
val &= ~(0x0);
gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
} else { } else {
unsigned int cnt = 0; return 0;
while (cnt++ < 0x1FF) {
if ((val & 0x100) == 0x100)
return 0;
val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
}
} }
return 1;
} }
#ifdef CONFIG_MTD_NAND_OMAP_BCH #ifdef CONFIG_MTD_NAND_OMAP_BCH
...@@ -1155,6 +1268,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) ...@@ -1155,6 +1268,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
int i, offset; int i, offset;
dma_cap_mask_t mask; dma_cap_mask_t mask;
unsigned sig; unsigned sig;
struct resource *res;
pdata = pdev->dev.platform_data; pdata = pdev->dev.platform_data;
if (pdata == NULL) { if (pdata == NULL) {
...@@ -1174,7 +1288,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) ...@@ -1174,7 +1288,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->pdev = pdev; info->pdev = pdev;
info->gpmc_cs = pdata->cs; info->gpmc_cs = pdata->cs;
info->phys_base = pdata->phys_base; info->reg = pdata->reg;
info->mtd.priv = &info->nand; info->mtd.priv = &info->nand;
info->mtd.name = dev_name(&pdev->dev); info->mtd.name = dev_name(&pdev->dev);
...@@ -1183,16 +1297,23 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) ...@@ -1183,16 +1297,23 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.options = pdata->devsize; info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN; info->nand.options |= NAND_SKIP_BBTSCAN;
/* NAND write protect off */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0); if (res == NULL) {
err = -EINVAL;
dev_err(&pdev->dev, "error getting memory resource\n");
goto out_free_info;
}
info->phys_base = res->start;
info->mem_size = resource_size(res);
if (!request_mem_region(info->phys_base, NAND_IO_SIZE, if (!request_mem_region(info->phys_base, info->mem_size,
pdev->dev.driver->name)) { pdev->dev.driver->name)) {
err = -EBUSY; err = -EBUSY;
goto out_free_info; goto out_free_info;
} }
info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE); info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
if (!info->nand.IO_ADDR_R) { if (!info->nand.IO_ADDR_R) {
err = -ENOMEM; err = -ENOMEM;
goto out_release_mem_region; goto out_release_mem_region;
...@@ -1265,17 +1386,39 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) ...@@ -1265,17 +1386,39 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
break; break;
case NAND_OMAP_PREFETCH_IRQ: case NAND_OMAP_PREFETCH_IRQ:
err = request_irq(pdata->gpmc_irq, info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); if (info->gpmc_irq_fifo <= 0) {
dev_err(&pdev->dev, "error getting fifo irq\n");
err = -ENODEV;
goto out_release_mem_region;
}
err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
IRQF_SHARED, "gpmc-nand-fifo", info);
if (err) { if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d", dev_err(&pdev->dev, "requesting irq(%d) error:%d",
pdata->gpmc_irq, err); info->gpmc_irq_fifo, err);
info->gpmc_irq_fifo = 0;
goto out_release_mem_region;
}
info->gpmc_irq_count = platform_get_irq(pdev, 1);
if (info->gpmc_irq_count <= 0) {
dev_err(&pdev->dev, "error getting count irq\n");
err = -ENODEV;
goto out_release_mem_region;
}
err = request_irq(info->gpmc_irq_count, omap_nand_irq,
IRQF_SHARED, "gpmc-nand-count", info);
if (err) {
dev_err(&pdev->dev, "requesting irq(%d) error:%d",
info->gpmc_irq_count, err);
info->gpmc_irq_count = 0;
goto out_release_mem_region; goto out_release_mem_region;
} else {
info->gpmc_irq = pdata->gpmc_irq;
info->nand.read_buf = omap_read_buf_irq_pref;
info->nand.write_buf = omap_write_buf_irq_pref;
} }
info->nand.read_buf = omap_read_buf_irq_pref;
info->nand.write_buf = omap_write_buf_irq_pref;
break; break;
default: default:
...@@ -1363,7 +1506,11 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) ...@@ -1363,7 +1506,11 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
out_release_mem_region: out_release_mem_region:
if (info->dma) if (info->dma)
dma_release_channel(info->dma); dma_release_channel(info->dma);
release_mem_region(info->phys_base, NAND_IO_SIZE); if (info->gpmc_irq_count > 0)
free_irq(info->gpmc_irq_count, info);
if (info->gpmc_irq_fifo > 0)
free_irq(info->gpmc_irq_fifo, info);
release_mem_region(info->phys_base, info->mem_size);
out_free_info: out_free_info:
kfree(info); kfree(info);
...@@ -1381,8 +1528,10 @@ static int omap_nand_remove(struct platform_device *pdev) ...@@ -1381,8 +1528,10 @@ static int omap_nand_remove(struct platform_device *pdev)
if (info->dma) if (info->dma)
dma_release_channel(info->dma); dma_release_channel(info->dma);
if (info->gpmc_irq) if (info->gpmc_irq_count > 0)
free_irq(info->gpmc_irq, info); free_irq(info->gpmc_irq_count, info);
if (info->gpmc_irq_fifo > 0)
free_irq(info->gpmc_irq_fifo, info);
/* Release NAND device, its internal structures and partitions */ /* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd); nand_release(&info->mtd);
......
...@@ -48,13 +48,13 @@ ...@@ -48,13 +48,13 @@
#define DRIVER_NAME "omap2-onenand" #define DRIVER_NAME "omap2-onenand"
#define ONENAND_IO_SIZE SZ_128K
#define ONENAND_BUFRAM_SIZE (1024 * 5) #define ONENAND_BUFRAM_SIZE (1024 * 5)
struct omap2_onenand { struct omap2_onenand {
struct platform_device *pdev; struct platform_device *pdev;
int gpmc_cs; int gpmc_cs;
unsigned long phys_base; unsigned long phys_base;
unsigned int mem_size;
int gpio_irq; int gpio_irq;
struct mtd_info mtd; struct mtd_info mtd;
struct onenand_chip onenand; struct onenand_chip onenand;
...@@ -626,6 +626,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) ...@@ -626,6 +626,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
struct omap2_onenand *c; struct omap2_onenand *c;
struct onenand_chip *this; struct onenand_chip *this;
int r; int r;
struct resource *res;
pdata = pdev->dev.platform_data; pdata = pdev->dev.platform_data;
if (pdata == NULL) { if (pdata == NULL) {
...@@ -647,20 +648,24 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) ...@@ -647,20 +648,24 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
c->gpio_irq = 0; c->gpio_irq = 0;
} }
r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r < 0) { if (res == NULL) {
dev_err(&pdev->dev, "Cannot request GPMC CS\n"); r = -EINVAL;
dev_err(&pdev->dev, "error getting memory resource\n");
goto err_kfree; goto err_kfree;
} }
if (request_mem_region(c->phys_base, ONENAND_IO_SIZE, c->phys_base = res->start;
c->mem_size = resource_size(res);
if (request_mem_region(c->phys_base, c->mem_size,
pdev->dev.driver->name) == NULL) { pdev->dev.driver->name) == NULL) {
dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, " dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
"size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE); c->phys_base, c->mem_size);
r = -EBUSY; r = -EBUSY;
goto err_free_cs; goto err_kfree;
} }
c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE); c->onenand.base = ioremap(c->phys_base, c->mem_size);
if (c->onenand.base == NULL) { if (c->onenand.base == NULL) {
r = -ENOMEM; r = -ENOMEM;
goto err_release_mem_region; goto err_release_mem_region;
...@@ -776,9 +781,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) ...@@ -776,9 +781,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
err_iounmap: err_iounmap:
iounmap(c->onenand.base); iounmap(c->onenand.base);
err_release_mem_region: err_release_mem_region:
release_mem_region(c->phys_base, ONENAND_IO_SIZE); release_mem_region(c->phys_base, c->mem_size);
err_free_cs:
gpmc_cs_free(c->gpmc_cs);
err_kfree: err_kfree:
kfree(c); kfree(c);
...@@ -800,7 +803,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev) ...@@ -800,7 +803,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
gpio_free(c->gpio_irq); gpio_free(c->gpio_irq);
} }
iounmap(c->onenand.base); iounmap(c->onenand.base);
release_mem_region(c->phys_base, ONENAND_IO_SIZE); release_mem_region(c->phys_base, c->mem_size);
gpmc_cs_free(c->gpmc_cs); gpmc_cs_free(c->gpmc_cs);
kfree(c); kfree(c);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment