Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
178f16db
Commit
178f16db
authored
Jun 18, 2010
by
Greg Kroah-Hartman
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://git.infradead.org/users/dwmw2/spectra-2.6
into work
parents
013a468c
bf46b9a9
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
12466 additions
and
0 deletions
+12466
-0
drivers/staging/Kconfig
drivers/staging/Kconfig
+2
-0
drivers/staging/Makefile
drivers/staging/Makefile
+1
-0
drivers/staging/spectra/Kconfig
drivers/staging/spectra/Kconfig
+40
-0
drivers/staging/spectra/Makefile
drivers/staging/spectra/Makefile
+11
-0
drivers/staging/spectra/README
drivers/staging/spectra/README
+29
-0
drivers/staging/spectra/ffsdefs.h
drivers/staging/spectra/ffsdefs.h
+58
-0
drivers/staging/spectra/ffsport.c
drivers/staging/spectra/ffsport.c
+827
-0
drivers/staging/spectra/ffsport.h
drivers/staging/spectra/ffsport.h
+84
-0
drivers/staging/spectra/flash.c
drivers/staging/spectra/flash.c
+4731
-0
drivers/staging/spectra/flash.h
drivers/staging/spectra/flash.h
+198
-0
drivers/staging/spectra/lld.c
drivers/staging/spectra/lld.c
+339
-0
drivers/staging/spectra/lld.h
drivers/staging/spectra/lld.h
+111
-0
drivers/staging/spectra/lld_cdma.c
drivers/staging/spectra/lld_cdma.c
+910
-0
drivers/staging/spectra/lld_cdma.h
drivers/staging/spectra/lld_cdma.h
+123
-0
drivers/staging/spectra/lld_emu.c
drivers/staging/spectra/lld_emu.c
+780
-0
drivers/staging/spectra/lld_emu.h
drivers/staging/spectra/lld_emu.h
+51
-0
drivers/staging/spectra/lld_mtd.c
drivers/staging/spectra/lld_mtd.c
+687
-0
drivers/staging/spectra/lld_mtd.h
drivers/staging/spectra/lld_mtd.h
+51
-0
drivers/staging/spectra/lld_nand.c
drivers/staging/spectra/lld_nand.c
+2601
-0
drivers/staging/spectra/lld_nand.h
drivers/staging/spectra/lld_nand.h
+131
-0
drivers/staging/spectra/nand_regs.h
drivers/staging/spectra/nand_regs.h
+619
-0
drivers/staging/spectra/spectraswconfig.h
drivers/staging/spectra/spectraswconfig.h
+82
-0
No files found.
drivers/staging/Kconfig
View file @
178f16db
...
...
@@ -97,6 +97,8 @@ source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
source "drivers/staging/spectra/Kconfig"
source "drivers/staging/quatech_usb2/Kconfig"
source "drivers/staging/vt6655/Kconfig"
...
...
drivers/staging/Makefile
View file @
178f16db
...
...
@@ -22,6 +22,7 @@ obj-$(CONFIG_R8187SE) += rtl8187se/
obj-$(CONFIG_RTL8192SU)
+=
rtl8192su/
obj-$(CONFIG_RTL8192U)
+=
rtl8192u/
obj-$(CONFIG_RTL8192E)
+=
rtl8192e/
obj-$(CONFIG_SPECTRA)
+=
spectra/
obj-$(CONFIG_TRANZPORT)
+=
frontier/
obj-$(CONFIG_DREAM)
+=
dream/
obj-$(CONFIG_POHMELFS)
+=
pohmelfs/
...
...
drivers/staging/spectra/Kconfig
0 → 100644
View file @
178f16db
menuconfig SPECTRA
tristate "Denali Spectra Flash Translation Layer"
depends on BLOCK
default n
---help---
Enable the FTL pseudo-filesystem used with the NAND Flash
controller on Intel Moorestown Platform to pretend to be a disk
choice
prompt "Compile for"
depends on SPECTRA
default SPECTRA_MRST_HW
config SPECTRA_MRST_HW
bool "Moorestown hardware mode"
help
Driver communicates with the Moorestown hardware's register interface.
in DMA mode.
config SPECTRA_MTD
bool "Linux MTD mode"
depends on MTD
help
Driver communicates with the kernel MTD subsystem instead of its own
built-in hardware driver.
config SPECTRA_EMU
bool "RAM emulator testing"
help
Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
endchoice
config SPECTRA_MRST_HW_DMA
bool
default n
depends on SPECTRA_MRST_HW
help
Use DMA for native hardware interface.
drivers/staging/spectra/Makefile
0 → 100644
View file @
178f16db
#
# Makefile of Intel Moorestown NAND controller driver
#
obj-$(CONFIG_SPECTRA)
+=
spectra.o
spectra-y
:=
ffsport.o flash.o lld.o
spectra-$(CONFIG_SPECTRA_MRST_HW)
+=
lld_nand.o
spectra-$(CONFIG_SPECTRA_MRST_HW_DMA)
+=
lld_cdma.o
spectra-$(CONFIG_SPECTRA_EMU)
+=
lld_emu.o
spectra-$(CONFIG_SPECTRA_MTD)
+=
lld_mtd.o
drivers/staging/spectra/README
0 → 100644
View file @
178f16db
This is a driver for NAND controller of Intel Moorestown platform.
This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
It includes three layer:
block layer interface - file ffsport.c
Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
This driver can be build as modules or build-in.
Dependency:
This driver has dependency on IA Firmware of Intel Moorestown platform.
It need the IA Firmware to create the block table for the first time.
And to validate this driver code without IA Firmware, you can change the
macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
driver will erase the whole nand flash and create a new block table.
TODO:
- Enable Command DMA feature support
- lower the memory footprint
- Remove most of the unnecessary global variables
- Change all the upcase variable / functions name to lowercase
- Some other misc bugs
Please send patches to:
Greg Kroah-Hartman <gregkh@suse.de>
And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
drivers/staging/spectra/ffsdefs.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _FFSDEFS_
#define _FFSDEFS_
#define CLEAR 0
/*use this to clear a field instead of "fail"*/
#define SET 1
/*use this to set a field instead of "pass"*/
#define FAIL 1
/*failed flag*/
#define PASS 0
/*success flag*/
#define ERR -1
/*error flag*/
#define ERASE_CMD 10
#define WRITE_MAIN_CMD 11
#define READ_MAIN_CMD 12
#define WRITE_SPARE_CMD 13
#define READ_SPARE_CMD 14
#define WRITE_MAIN_SPARE_CMD 15
#define READ_MAIN_SPARE_CMD 16
#define MEMCOPY_CMD 17
#define DUMMY_CMD 99
#define EVENT_PASS 0x00
#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
#define EVENT_TIME_OUT 0x03
#define EVENT_PROGRAM_FAILURE 0x04
#define EVENT_ERASE_FAILURE 0x05
#define EVENT_MEMCOPY_FAILURE 0x06
#define EVENT_FAIL 0x07
#define EVENT_NONE 0x22
#define EVENT_DMA_CMD_COMP 0x77
#define EVENT_ECC_TRANSACTION_DONE 0x88
#define EVENT_DMA_CMD_FAIL 0x99
#define CMD_PASS 0
#define CMD_FAIL 1
#define CMD_ABORT 2
#define CMD_NOT_DONE 3
#endif
/* _FFSDEFS_ */
drivers/staging/spectra/ffsport.c
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include "ffsport.h"
#include "flash.h"
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <linux/log2.h>
#include <linux/init.h>
/**** Helper functions used for Div, Remainder operation on u64 ****/
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: GLOB_Calc_Used_Bits
* Inputs: Power of 2 number
* Outputs: Number of Used Bits
* 0, if the argument is 0
* Description: Calculate the number of bits used by a given power of 2 number
* Number can be upto 32 bit
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
int
GLOB_Calc_Used_Bits
(
u32
n
)
{
int
tot_bits
=
0
;
if
(
n
>=
1
<<
16
)
{
n
>>=
16
;
tot_bits
+=
16
;
}
if
(
n
>=
1
<<
8
)
{
n
>>=
8
;
tot_bits
+=
8
;
}
if
(
n
>=
1
<<
4
)
{
n
>>=
4
;
tot_bits
+=
4
;
}
if
(
n
>=
1
<<
2
)
{
n
>>=
2
;
tot_bits
+=
2
;
}
if
(
n
>=
1
<<
1
)
tot_bits
+=
1
;
return
((
n
==
0
)
?
(
0
)
:
tot_bits
);
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: GLOB_u64_Div
* Inputs: Number of u64
* A power of 2 number as Division
* Outputs: Quotient of the Divisor operation
* Description: It divides the address by divisor by using bit shift operation
* (essentially without explicitely using "/").
* Divisor is a power of 2 number and Divided is of u64
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u64
GLOB_u64_Div
(
u64
addr
,
u32
divisor
)
{
return
(
u64
)(
addr
>>
GLOB_Calc_Used_Bits
(
divisor
));
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: GLOB_u64_Remainder
* Inputs: Number of u64
* Divisor Type (1 -PageAddress, 2- BlockAddress)
* Outputs: Remainder of the Division operation
* Description: It calculates the remainder of a number (of u64) by
* divisor(power of 2 number ) by using bit shifting and multiply
* operation(essentially without explicitely using "/").
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u64
GLOB_u64_Remainder
(
u64
addr
,
u32
divisor_type
)
{
u64
result
=
0
;
if
(
divisor_type
==
1
)
{
/* Remainder -- Page */
result
=
(
addr
>>
DeviceInfo
.
nBitsInPageDataSize
);
result
=
result
*
DeviceInfo
.
wPageDataSize
;
}
else
if
(
divisor_type
==
2
)
{
/* Remainder -- Block */
result
=
(
addr
>>
DeviceInfo
.
nBitsInBlockDataSize
);
result
=
result
*
DeviceInfo
.
wBlockDataSize
;
}
result
=
addr
-
result
;
return
result
;
}
#define NUM_DEVICES 1
#define PARTITIONS 8
#define GLOB_SBD_NAME "nd"
#define GLOB_SBD_IRQ_NUM (29)
#define GLOB_VERSION "driver version 20091110"
#define GLOB_SBD_IOCTL_GC (0x7701)
#define GLOB_SBD_IOCTL_WL (0x7702)
#define GLOB_SBD_IOCTL_FORMAT (0x7703)
#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
static
int
reserved_mb
=
0
;
module_param
(
reserved_mb
,
int
,
0
);
MODULE_PARM_DESC
(
reserved_mb
,
"Reserved space for OS image, in MiB (default 25 MiB)"
);
int
nand_debug_level
;
module_param
(
nand_debug_level
,
int
,
0644
);
MODULE_PARM_DESC
(
nand_debug_level
,
"debug level value: 1-3"
);
MODULE_LICENSE
(
"GPL"
);
struct
spectra_nand_dev
{
struct
pci_dev
*
dev
;
u64
size
;
u16
users
;
spinlock_t
qlock
;
void
__iomem
*
ioaddr
;
/* Mapped address */
struct
request_queue
*
queue
;
struct
task_struct
*
thread
;
struct
gendisk
*
gd
;
u8
*
tmp_buf
;
};
static
int
GLOB_SBD_majornum
;
static
char
*
GLOB_version
=
GLOB_VERSION
;
static
struct
spectra_nand_dev
nand_device
[
NUM_DEVICES
];
static
struct
mutex
spectra_lock
;
static
int
res_blks_os
=
1
;
struct
spectra_indentfy_dev_tag
IdentifyDeviceData
;
static
int
force_flush_cache
(
void
)
{
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
ERR
==
GLOB_FTL_Flush_Cache
())
{
printk
(
KERN_ERR
"Fail to Flush FTL Cache!
\n
"
);
return
-
EFAULT
;
}
#if CMD_DMA
if
(
glob_ftl_execute_cmds
())
return
-
EIO
;
else
return
0
;
#endif
return
0
;
}
struct
ioctl_rw_page_info
{
u8
*
data
;
unsigned
int
page
;
};
static
int
ioctl_read_page_data
(
unsigned
long
arg
)
{
u8
*
buf
;
struct
ioctl_rw_page_info
info
;
int
result
=
PASS
;
if
(
copy_from_user
(
&
info
,
(
void
__user
*
)
arg
,
sizeof
(
info
)))
return
-
EFAULT
;
buf
=
kmalloc
(
IdentifyDeviceData
.
PageDataSize
,
GFP_ATOMIC
);
if
(
!
buf
)
{
printk
(
KERN_ERR
"ioctl_read_page_data: "
"failed to allocate memory
\n
"
);
return
-
ENOMEM
;
}
mutex_lock
(
&
spectra_lock
);
result
=
GLOB_FTL_Page_Read
(
buf
,
(
u64
)
info
.
page
*
IdentifyDeviceData
.
PageDataSize
);
mutex_unlock
(
&
spectra_lock
);
if
(
copy_to_user
((
void
__user
*
)
info
.
data
,
buf
,
IdentifyDeviceData
.
PageDataSize
))
{
printk
(
KERN_ERR
"ioctl_read_page_data: "
"failed to copy user data
\n
"
);
kfree
(
buf
);
return
-
EFAULT
;
}
kfree
(
buf
);
return
result
;
}
static
int
ioctl_write_page_data
(
unsigned
long
arg
)
{
u8
*
buf
;
struct
ioctl_rw_page_info
info
;
int
result
=
PASS
;
if
(
copy_from_user
(
&
info
,
(
void
__user
*
)
arg
,
sizeof
(
info
)))
return
-
EFAULT
;
buf
=
kmalloc
(
IdentifyDeviceData
.
PageDataSize
,
GFP_ATOMIC
);
if
(
!
buf
)
{
printk
(
KERN_ERR
"ioctl_write_page_data: "
"failed to allocate memory
\n
"
);
return
-
ENOMEM
;
}
if
(
copy_from_user
(
buf
,
(
void
__user
*
)
info
.
data
,
IdentifyDeviceData
.
PageDataSize
))
{
printk
(
KERN_ERR
"ioctl_write_page_data: "
"failed to copy user data
\n
"
);
kfree
(
buf
);
return
-
EFAULT
;
}
mutex_lock
(
&
spectra_lock
);
result
=
GLOB_FTL_Page_Write
(
buf
,
(
u64
)
info
.
page
*
IdentifyDeviceData
.
PageDataSize
);
mutex_unlock
(
&
spectra_lock
);
kfree
(
buf
);
return
result
;
}
/* Return how many blocks should be reserved for bad block replacement */
static
int
get_res_blk_num_bad_blk
(
void
)
{
return
IdentifyDeviceData
.
wDataBlockNum
/
10
;
}
/* Return how many blocks should be reserved for OS image */
static
int
get_res_blk_num_os
(
void
)
{
u32
res_blks
,
blk_size
;
blk_size
=
IdentifyDeviceData
.
PageDataSize
*
IdentifyDeviceData
.
PagesPerBlock
;
res_blks
=
(
reserved_mb
*
1024
*
1024
)
/
blk_size
;
if
((
res_blks
<
1
)
||
(
res_blks
>=
IdentifyDeviceData
.
wDataBlockNum
))
res_blks
=
1
;
/* Reserved 1 block for block table */
return
res_blks
;
}
static
void
SBD_prepare_flush
(
struct
request_queue
*
q
,
struct
request
*
rq
)
{
rq
->
cmd_type
=
REQ_TYPE_LINUX_BLOCK
;
/* rq->timeout = 5 * HZ; */
rq
->
cmd
[
0
]
=
REQ_LB_OP_FLUSH
;
}
/* Transfer a full request. */
static
int
do_transfer
(
struct
spectra_nand_dev
*
tr
,
struct
request
*
req
)
{
u64
start_addr
,
addr
;
u32
logical_start_sect
,
hd_start_sect
;
u32
nsect
,
hd_sects
;
u32
rsect
,
tsect
=
0
;
char
*
buf
;
u32
ratio
=
IdentifyDeviceData
.
PageDataSize
>>
9
;
start_addr
=
(
u64
)(
blk_rq_pos
(
req
))
<<
9
;
/* Add a big enough offset to prevent the OS Image from
* being accessed or damaged by file system */
start_addr
+=
IdentifyDeviceData
.
PageDataSize
*
IdentifyDeviceData
.
PagesPerBlock
*
res_blks_os
;
if
(
req
->
cmd_type
==
REQ_TYPE_LINUX_BLOCK
&&
req
->
cmd
[
0
]
==
REQ_LB_OP_FLUSH
)
{
if
(
force_flush_cache
())
/* Fail to flush cache */
return
-
EIO
;
else
return
0
;
}
if
(
!
blk_fs_request
(
req
))
return
-
EIO
;
if
(
blk_rq_pos
(
req
)
+
blk_rq_cur_sectors
(
req
)
>
get_capacity
(
tr
->
gd
))
{
printk
(
KERN_ERR
"Spectra error: request over the NAND "
"capacity!sector %d, current_nr_sectors %d, "
"while capacity is %d
\n
"
,
(
int
)
blk_rq_pos
(
req
),
blk_rq_cur_sectors
(
req
),
(
int
)
get_capacity
(
tr
->
gd
));
return
-
EIO
;
}
logical_start_sect
=
start_addr
>>
9
;
hd_start_sect
=
logical_start_sect
/
ratio
;
rsect
=
logical_start_sect
-
hd_start_sect
*
ratio
;
addr
=
(
u64
)
hd_start_sect
*
ratio
*
512
;
buf
=
req
->
buffer
;
nsect
=
blk_rq_cur_sectors
(
req
);
if
(
rsect
)
tsect
=
(
ratio
-
rsect
)
<
nsect
?
(
ratio
-
rsect
)
:
nsect
;
switch
(
rq_data_dir
(
req
))
{
case
READ
:
/* Read the first NAND page */
if
(
rsect
)
{
if
(
GLOB_FTL_Page_Read
(
tr
->
tmp_buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
memcpy
(
buf
,
tr
->
tmp_buf
+
(
rsect
<<
9
),
tsect
<<
9
);
addr
+=
IdentifyDeviceData
.
PageDataSize
;
buf
+=
tsect
<<
9
;
nsect
-=
tsect
;
}
/* Read the other NAND pages */
for
(
hd_sects
=
nsect
/
ratio
;
hd_sects
>
0
;
hd_sects
--
)
{
if
(
GLOB_FTL_Page_Read
(
buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
addr
+=
IdentifyDeviceData
.
PageDataSize
;
buf
+=
IdentifyDeviceData
.
PageDataSize
;
}
/* Read the last NAND pages */
if
(
nsect
%
ratio
)
{
if
(
GLOB_FTL_Page_Read
(
tr
->
tmp_buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
memcpy
(
buf
,
tr
->
tmp_buf
,
(
nsect
%
ratio
)
<<
9
);
}
#if CMD_DMA
if
(
glob_ftl_execute_cmds
())
return
-
EIO
;
else
return
0
;
#endif
return
0
;
case
WRITE
:
/* Write the first NAND page */
if
(
rsect
)
{
if
(
GLOB_FTL_Page_Read
(
tr
->
tmp_buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
memcpy
(
tr
->
tmp_buf
+
(
rsect
<<
9
),
buf
,
tsect
<<
9
);
if
(
GLOB_FTL_Page_Write
(
tr
->
tmp_buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
addr
+=
IdentifyDeviceData
.
PageDataSize
;
buf
+=
tsect
<<
9
;
nsect
-=
tsect
;
}
/* Write the other NAND pages */
for
(
hd_sects
=
nsect
/
ratio
;
hd_sects
>
0
;
hd_sects
--
)
{
if
(
GLOB_FTL_Page_Write
(
buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
addr
+=
IdentifyDeviceData
.
PageDataSize
;
buf
+=
IdentifyDeviceData
.
PageDataSize
;
}
/* Write the last NAND pages */
if
(
nsect
%
ratio
)
{
if
(
GLOB_FTL_Page_Read
(
tr
->
tmp_buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
memcpy
(
tr
->
tmp_buf
,
buf
,
(
nsect
%
ratio
)
<<
9
);
if
(
GLOB_FTL_Page_Write
(
tr
->
tmp_buf
,
addr
))
{
printk
(
KERN_ERR
"Error in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
return
-
EIO
;
}
}
#if CMD_DMA
if
(
glob_ftl_execute_cmds
())
return
-
EIO
;
else
return
0
;
#endif
return
0
;
default:
printk
(
KERN_NOTICE
"Unknown request %u
\n
"
,
rq_data_dir
(
req
));
return
-
EIO
;
}
}
/* This function is copied from drivers/mtd/mtd_blkdevs.c */
static
int
spectra_trans_thread
(
void
*
arg
)
{
struct
spectra_nand_dev
*
tr
=
arg
;
struct
request_queue
*
rq
=
tr
->
queue
;
struct
request
*
req
=
NULL
;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current
->
flags
|=
PF_MEMALLOC
;
spin_lock_irq
(
rq
->
queue_lock
);
while
(
!
kthread_should_stop
())
{
int
res
;
if
(
!
req
)
{
req
=
blk_fetch_request
(
rq
);
if
(
!
req
)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
spin_unlock_irq
(
rq
->
queue_lock
);
schedule
();
spin_lock_irq
(
rq
->
queue_lock
);
continue
;
}
}
spin_unlock_irq
(
rq
->
queue_lock
);
mutex_lock
(
&
spectra_lock
);
res
=
do_transfer
(
tr
,
req
);
mutex_unlock
(
&
spectra_lock
);
spin_lock_irq
(
rq
->
queue_lock
);
if
(
!
__blk_end_request_cur
(
req
,
res
))
req
=
NULL
;
}
if
(
req
)
__blk_end_request_all
(
req
,
-
EIO
);
spin_unlock_irq
(
rq
->
queue_lock
);
return
0
;
}
/* Request function that "handles clustering". */
static
void
GLOB_SBD_request
(
struct
request_queue
*
rq
)
{
struct
spectra_nand_dev
*
pdev
=
rq
->
queuedata
;
wake_up_process
(
pdev
->
thread
);
}
static
int
GLOB_SBD_open
(
struct
block_device
*
bdev
,
fmode_t
mode
)
{
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
0
;
}
static
int
GLOB_SBD_release
(
struct
gendisk
*
disk
,
fmode_t
mode
)
{
int
ret
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
mutex_lock
(
&
spectra_lock
);
ret
=
force_flush_cache
();
mutex_unlock
(
&
spectra_lock
);
return
0
;
}
static
int
GLOB_SBD_getgeo
(
struct
block_device
*
bdev
,
struct
hd_geometry
*
geo
)
{
geo
->
heads
=
4
;
geo
->
sectors
=
16
;
geo
->
cylinders
=
get_capacity
(
bdev
->
bd_disk
)
/
(
4
*
16
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"heads: %d, sectors: %d, cylinders: %d
\n
"
,
geo
->
heads
,
geo
->
sectors
,
geo
->
cylinders
);
return
0
;
}
int
GLOB_SBD_ioctl
(
struct
block_device
*
bdev
,
fmode_t
mode
,
unsigned
int
cmd
,
unsigned
long
arg
)
{
int
ret
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
switch
(
cmd
)
{
case
GLOB_SBD_IOCTL_GC
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: Garbage Collection "
"being performed
\n
"
);
if
(
PASS
!=
GLOB_FTL_Garbage_Collection
())
return
-
EFAULT
;
return
0
;
case
GLOB_SBD_IOCTL_WL
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: Static Wear Leveling "
"being performed
\n
"
);
if
(
PASS
!=
GLOB_FTL_Wear_Leveling
())
return
-
EFAULT
;
return
0
;
case
GLOB_SBD_IOCTL_FORMAT
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: Flash format "
"being performed
\n
"
);
if
(
PASS
!=
GLOB_FTL_Flash_Format
())
return
-
EFAULT
;
return
0
;
case
GLOB_SBD_IOCTL_FLUSH_CACHE
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: Cache flush "
"being performed
\n
"
);
mutex_lock
(
&
spectra_lock
);
ret
=
force_flush_cache
();
mutex_unlock
(
&
spectra_lock
);
return
ret
;
case
GLOB_SBD_IOCTL_COPY_BLK_TABLE
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: "
"Copy block table
\n
"
);
if
(
copy_to_user
((
void
__user
*
)
arg
,
get_blk_table_start_addr
(),
get_blk_table_len
()))
return
-
EFAULT
;
return
0
;
case
GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: "
"Copy wear leveling table
\n
"
);
if
(
copy_to_user
((
void
__user
*
)
arg
,
get_wear_leveling_table_start_addr
(),
get_wear_leveling_table_len
()))
return
-
EFAULT
;
return
0
;
case
GLOB_SBD_IOCTL_GET_NAND_INFO
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: "
"Get NAND info
\n
"
);
if
(
copy_to_user
((
void
__user
*
)
arg
,
&
IdentifyDeviceData
,
sizeof
(
IdentifyDeviceData
)))
return
-
EFAULT
;
return
0
;
case
GLOB_SBD_IOCTL_WRITE_DATA
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: "
"Write one page data
\n
"
);
return
ioctl_write_page_data
(
arg
);
case
GLOB_SBD_IOCTL_READ_DATA
:
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra IOCTL: "
"Read one page data
\n
"
);
return
ioctl_read_page_data
(
arg
);
}
return
-
ENOTTY
;
}
static
struct
block_device_operations
GLOB_SBD_ops
=
{
.
owner
=
THIS_MODULE
,
.
open
=
GLOB_SBD_open
,
.
release
=
GLOB_SBD_release
,
.
locked_ioctl
=
GLOB_SBD_ioctl
,
.
getgeo
=
GLOB_SBD_getgeo
,
};
static
int
SBD_setup_device
(
struct
spectra_nand_dev
*
dev
,
int
which
)
{
int
res_blks
;
u32
sects
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
memset
(
dev
,
0
,
sizeof
(
struct
spectra_nand_dev
));
nand_dbg_print
(
NAND_DBG_WARN
,
"Reserved %d blocks "
"for OS image, %d blocks for bad block replacement.
\n
"
,
get_res_blk_num_os
(),
get_res_blk_num_bad_blk
());
res_blks
=
get_res_blk_num_bad_blk
()
+
get_res_blk_num_os
();
dev
->
size
=
(
u64
)
IdentifyDeviceData
.
PageDataSize
*
IdentifyDeviceData
.
PagesPerBlock
*
(
IdentifyDeviceData
.
wDataBlockNum
-
res_blks
);
res_blks_os
=
get_res_blk_num_os
();
spin_lock_init
(
&
dev
->
qlock
);
dev
->
tmp_buf
=
kmalloc
(
IdentifyDeviceData
.
PageDataSize
,
GFP_ATOMIC
);
if
(
!
dev
->
tmp_buf
)
{
printk
(
KERN_ERR
"Failed to kmalloc memory in %s Line %d, exit.
\n
"
,
__FILE__
,
__LINE__
);
goto
out_vfree
;
}
dev
->
queue
=
blk_init_queue
(
GLOB_SBD_request
,
&
dev
->
qlock
);
if
(
dev
->
queue
==
NULL
)
{
printk
(
KERN_ERR
"Spectra: Request queue could not be initialized."
" Aborting
\n
"
);
goto
out_vfree
;
}
dev
->
queue
->
queuedata
=
dev
;
/* As Linux block layer doens't support >4KB hardware sector, */
/* Here we force report 512 byte hardware sector size to Kernel */
blk_queue_logical_block_size
(
dev
->
queue
,
512
);
blk_queue_ordered
(
dev
->
queue
,
QUEUE_ORDERED_DRAIN_FLUSH
,
SBD_prepare_flush
);
dev
->
thread
=
kthread_run
(
spectra_trans_thread
,
dev
,
"nand_thd"
);
if
(
IS_ERR
(
dev
->
thread
))
{
blk_cleanup_queue
(
dev
->
queue
);
unregister_blkdev
(
GLOB_SBD_majornum
,
GLOB_SBD_NAME
);
return
PTR_ERR
(
dev
->
thread
);
}
dev
->
gd
=
alloc_disk
(
PARTITIONS
);
if
(
!
dev
->
gd
)
{
printk
(
KERN_ERR
"Spectra: Could not allocate disk. Aborting
\n
"
);
goto
out_vfree
;
}
dev
->
gd
->
major
=
GLOB_SBD_majornum
;
dev
->
gd
->
first_minor
=
which
*
PARTITIONS
;
dev
->
gd
->
fops
=
&
GLOB_SBD_ops
;
dev
->
gd
->
queue
=
dev
->
queue
;
dev
->
gd
->
private_data
=
dev
;
snprintf
(
dev
->
gd
->
disk_name
,
32
,
"%s%c"
,
GLOB_SBD_NAME
,
which
+
'a'
);
sects
=
dev
->
size
>>
9
;
nand_dbg_print
(
NAND_DBG_WARN
,
"Capacity sects: %d
\n
"
,
sects
);
set_capacity
(
dev
->
gd
,
sects
);
add_disk
(
dev
->
gd
);
return
0
;
out_vfree:
return
-
ENOMEM
;
}
/*
static ssize_t show_nand_block_num(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
(int)IdentifyDeviceData.wDataBlockNum);
}
static ssize_t show_nand_pages_per_block(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
(int)IdentifyDeviceData.PagesPerBlock);
}
static ssize_t show_nand_page_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
(int)IdentifyDeviceData.PageDataSize);
}
static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
static void create_sysfs_entry(struct device *dev)
{
if (device_create_file(dev, &dev_attr_nand_block_num))
printk(KERN_ERR "Spectra: "
"failed to create sysfs entry nand_block_num.\n");
if (device_create_file(dev, &dev_attr_nand_pages_per_block))
printk(KERN_ERR "Spectra: "
"failed to create sysfs entry nand_pages_per_block.\n");
if (device_create_file(dev, &dev_attr_nand_page_size))
printk(KERN_ERR "Spectra: "
"failed to create sysfs entry nand_page_size.\n");
}
*/
static
int
GLOB_SBD_init
(
void
)
{
int
i
;
/* Set debug output level (0~3) here. 3 is most verbose */
printk
(
KERN_ALERT
"Spectra: %s
\n
"
,
GLOB_version
);
mutex_init
(
&
spectra_lock
);
GLOB_SBD_majornum
=
register_blkdev
(
0
,
GLOB_SBD_NAME
);
if
(
GLOB_SBD_majornum
<=
0
)
{
printk
(
KERN_ERR
"Unable to get the major %d for Spectra"
,
GLOB_SBD_majornum
);
return
-
EBUSY
;
}
if
(
PASS
!=
GLOB_FTL_Flash_Init
())
{
printk
(
KERN_ERR
"Spectra: Unable to Initialize Flash Device. "
"Aborting
\n
"
);
goto
out_flash_register
;
}
/* create_sysfs_entry(&dev->dev); */
if
(
PASS
!=
GLOB_FTL_IdentifyDevice
(
&
IdentifyDeviceData
))
{
printk
(
KERN_ERR
"Spectra: Unable to Read Flash Device. "
"Aborting
\n
"
);
goto
out_flash_register
;
}
else
{
nand_dbg_print
(
NAND_DBG_WARN
,
"In GLOB_SBD_init: "
"Num blocks=%d, pagesperblock=%d, "
"pagedatasize=%d, ECCBytesPerSector=%d
\n
"
,
(
int
)
IdentifyDeviceData
.
NumBlocks
,
(
int
)
IdentifyDeviceData
.
PagesPerBlock
,
(
int
)
IdentifyDeviceData
.
PageDataSize
,
(
int
)
IdentifyDeviceData
.
wECCBytesPerSector
);
}
printk
(
KERN_ALERT
"Spectra: searching block table, please wait ...
\n
"
);
if
(
GLOB_FTL_Init
()
!=
PASS
)
{
printk
(
KERN_ERR
"Spectra: Unable to Initialize FTL Layer. "
"Aborting
\n
"
);
goto
out_ftl_flash_register
;
}
printk
(
KERN_ALERT
"Spectra: block table has been found.
\n
"
);
for
(
i
=
0
;
i
<
NUM_DEVICES
;
i
++
)
if
(
SBD_setup_device
(
&
nand_device
[
i
],
i
)
==
-
ENOMEM
)
goto
out_ftl_flash_register
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra: module loaded with major number %d
\n
"
,
GLOB_SBD_majornum
);
return
0
;
out_ftl_flash_register:
GLOB_FTL_Cache_Release
();
out_flash_register:
GLOB_FTL_Flash_Release
();
unregister_blkdev
(
GLOB_SBD_majornum
,
GLOB_SBD_NAME
);
printk
(
KERN_ERR
"Spectra: Module load failed.
\n
"
);
return
-
ENOMEM
;
}
static
void
__exit
GLOB_SBD_exit
(
void
)
{
int
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
for
(
i
=
0
;
i
<
NUM_DEVICES
;
i
++
)
{
struct
spectra_nand_dev
*
dev
=
&
nand_device
[
i
];
if
(
dev
->
gd
)
{
del_gendisk
(
dev
->
gd
);
put_disk
(
dev
->
gd
);
}
if
(
dev
->
queue
)
blk_cleanup_queue
(
dev
->
queue
);
kfree
(
dev
->
tmp_buf
);
}
unregister_blkdev
(
GLOB_SBD_majornum
,
GLOB_SBD_NAME
);
mutex_lock
(
&
spectra_lock
);
force_flush_cache
();
mutex_unlock
(
&
spectra_lock
);
GLOB_FTL_Cache_Release
();
GLOB_FTL_Flash_Release
();
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra FTL module (major number %d) unloaded.
\n
"
,
GLOB_SBD_majornum
);
}
module_init
(
GLOB_SBD_init
);
module_exit
(
GLOB_SBD_exit
);
drivers/staging/spectra/ffsport.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _FFSPORT_
#define _FFSPORT_
#include "ffsdefs.h"
#if defined __GNUC__
#define PACKED
#define PACKED_GNU __attribute__ ((packed))
#define UNALIGNED
#endif
#include <linux/semaphore.h>
#include <linux/string.h>
/* for strcpy(), stricmp(), etc */
#include <linux/mm.h>
/* for kmalloc(), kfree() */
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/kernel.h>
/* printk() */
#include <linux/fs.h>
/* everything... */
#include <linux/errno.h>
/* error codes */
#include <linux/types.h>
/* size_t */
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/pci.h>
#include "flash.h"
#define VERBOSE 1
#define NAND_DBG_WARN 1
#define NAND_DBG_DEBUG 2
#define NAND_DBG_TRACE 3
extern
int
nand_debug_level
;
#ifdef VERBOSE
#define nand_dbg_print(level, args...) \
do { \
if (level <= nand_debug_level) \
printk(KERN_ALERT args); \
} while (0)
#else
#define nand_dbg_print(level, args...)
#endif
#ifdef SUPPORT_BIG_ENDIAN
#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
(u16)((u16)(w) >> 8))
#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
(((u32)(dw) << 8) & 0x00ff0000) | \
(((u32)(dw) >> 8) & 0x0000ff00) | \
((u32)(dw) >> 24))
#else
#define INVERTUINT16(w) w
#define INVERTUINT32(dw) dw
#endif
extern
int
GLOB_Calc_Used_Bits
(
u32
n
);
extern
u64
GLOB_u64_Div
(
u64
addr
,
u32
divisor
);
extern
u64
GLOB_u64_Remainder
(
u64
addr
,
u32
divisor_type
);
#endif
/* _FFSPORT_ */
drivers/staging/spectra/flash.c
0 → 100644
View file @
178f16db
This source diff could not be displayed because it is too large. You can
view the blob
instead.
drivers/staging/spectra/flash.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _FLASH_INTERFACE_
#define _FLASH_INTERFACE_
#include "ffsport.h"
#include "spectraswconfig.h"
#define MAX_BYTE_VALUE 0xFF
#define MAX_WORD_VALUE 0xFFFF
#define MAX_U32_VALUE 0xFFFFFFFF
#define MAX_BLOCKNODE_VALUE 0xFFFFFF
#define DISCARD_BLOCK 0x800000
#define SPARE_BLOCK 0x400000
#define BAD_BLOCK 0xC00000
#define UNHIT_CACHE_ITEM 0xFFFF
#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
#define IN_PROGRESS_BLOCK_TABLE 0x00
#define CURRENT_BLOCK_TABLE 0x01
#define BTSIG_OFFSET (0)
#define BTSIG_BYTES (5)
#define BTSIG_DELTA (3)
#define MAX_READ_COUNTER 0x2710
#define FIRST_BT_ID (1)
#define LAST_BT_ID (254)
#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
struct
device_info_tag
{
u16
wDeviceMaker
;
u16
wDeviceID
;
u32
wDeviceType
;
u32
wSpectraStartBlock
;
u32
wSpectraEndBlock
;
u32
wTotalBlocks
;
u16
wPagesPerBlock
;
u16
wPageSize
;
u16
wPageDataSize
;
u16
wPageSpareSize
;
u16
wNumPageSpareFlag
;
u16
wECCBytesPerSector
;
u32
wBlockSize
;
u32
wBlockDataSize
;
u32
wDataBlockNum
;
u8
bPlaneNum
;
u16
wDeviceMainAreaSize
;
u16
wDeviceSpareAreaSize
;
u16
wDevicesConnected
;
u16
wDeviceWidth
;
u16
wHWRevision
;
u16
wHWFeatures
;
u16
wONFIDevFeatures
;
u16
wONFIOptCommands
;
u16
wONFITimingMode
;
u16
wONFIPgmCacheTimingMode
;
u16
MLCDevice
;
u16
wSpareSkipBytes
;
u8
nBitsInPageNumber
;
u8
nBitsInPageDataSize
;
u8
nBitsInBlockDataSize
;
};
extern
struct
device_info_tag
DeviceInfo
;
/* Cache item format */
struct
flash_cache_item_tag
{
u64
address
;
u16
use_cnt
;
u16
changed
;
u8
*
buf
;
};
struct
flash_cache_tag
{
u32
cache_item_size
;
/* Size in bytes of each cache item */
u16
pages_per_item
;
/* How many NAND pages in each cache item */
u16
LRU
;
/* No. of the least recently used cache item */
struct
flash_cache_item_tag
array
[
CACHE_ITEM_NUM
];
};
/*
*Data structure for each list node of the managment table
* used for the Level 2 Cache. Each node maps one logical NAND block.
*/
struct
spectra_l2_cache_list
{
struct
list_head
list
;
u32
logical_blk_num
;
/* Logical block number */
u32
pages_array
[];
/* Page map array of this logical block.
* Array index is the logical block number,
* and for every item of this arry:
* high 16 bit is index of the L2 cache block num,
* low 16 bit is the phy page num
* of the above L2 cache block.
* This array will be kmalloc during run time.
*/
};
struct
spectra_l2_cache_info
{
u32
blk_array
[
BLK_NUM_FOR_L2_CACHE
];
u16
cur_blk_idx
;
/* idx to the phy block number of current using */
u16
cur_page_num
;
/* pages number of current using */
struct
spectra_l2_cache_list
table
;
/* First node of the table */
};
#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
struct
flash_cache_mod_item_tag
{
u64
address
;
u8
changed
;
};
struct
flash_cache_delta_list_tag
{
u8
item
;
/* used cache item */
struct
flash_cache_mod_item_tag
cache
;
};
#endif
extern
struct
flash_cache_tag
Cache
;
extern
u8
*
buf_read_page_main_spare
;
extern
u8
*
buf_write_page_main_spare
;
extern
u8
*
buf_read_page_spare
;
extern
u8
*
buf_get_bad_block
;
extern
u8
*
cdma_desc_buf
;
extern
u8
*
memcp_desc_buf
;
/* struture used for IndentfyDevice function */
struct
spectra_indentfy_dev_tag
{
u32
NumBlocks
;
u16
PagesPerBlock
;
u16
PageDataSize
;
u16
wECCBytesPerSector
;
u32
wDataBlockNum
;
};
int
GLOB_FTL_Flash_Init
(
void
);
int
GLOB_FTL_Flash_Release
(
void
);
/*void GLOB_FTL_Erase_Flash(void);*/
int
GLOB_FTL_Block_Erase
(
u64
block_addr
);
int
GLOB_FTL_Is_BadBlock
(
u32
block_num
);
int
GLOB_FTL_IdentifyDevice
(
struct
spectra_indentfy_dev_tag
*
dev_data
);
int
GLOB_FTL_Event_Status
(
int
*
);
u16
glob_ftl_execute_cmds
(
void
);
/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
int
FTL_Read_Disturbance
(
u32
dwBlockAddr
);
/*Flash r/w based on cache*/
int
GLOB_FTL_Page_Read
(
u8
*
read_data
,
u64
page_addr
);
int
GLOB_FTL_Page_Write
(
u8
*
write_data
,
u64
page_addr
);
int
GLOB_FTL_Wear_Leveling
(
void
);
int
GLOB_FTL_Flash_Format
(
void
);
int
GLOB_FTL_Init
(
void
);
int
GLOB_FTL_Flush_Cache
(
void
);
int
GLOB_FTL_Garbage_Collection
(
void
);
int
GLOB_FTL_BT_Garbage_Collection
(
void
);
void
GLOB_FTL_Cache_Release
(
void
);
u8
*
get_blk_table_start_addr
(
void
);
u8
*
get_wear_leveling_table_start_addr
(
void
);
unsigned
long
get_blk_table_len
(
void
);
unsigned
long
get_wear_leveling_table_len
(
void
);
#if DEBUG_BNDRY
void
debug_boundary_lineno_error
(
int
chnl
,
int
limit
,
int
no
,
int
lineno
,
char
*
filename
);
#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
limit, no, __LINE__, __FILE__)
#else
#define debug_boundary_error(chnl, limit, no) ;
#endif
#endif
/*_FLASH_INTERFACE_*/
drivers/staging/spectra/lld.c
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include "spectraswconfig.h"
#include "ffsport.h"
#include "ffsdefs.h"
#include "lld.h"
#include "lld_nand.h"
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
#if FLASH_EMU
/* vector all the LLD calls to the LLD_EMU code */
#include "lld_emu.h"
#include "lld_cdma.h"
/* common functions: */
u16
GLOB_LLD_Flash_Reset
(
void
)
{
return
emu_Flash_Reset
();
}
u16
GLOB_LLD_Read_Device_ID
(
void
)
{
return
emu_Read_Device_ID
();
}
int
GLOB_LLD_Flash_Release
(
void
)
{
return
emu_Flash_Release
();
}
u16
GLOB_LLD_Flash_Init
(
void
)
{
return
emu_Flash_Init
();
}
u16
GLOB_LLD_Erase_Block
(
u32
block_add
)
{
return
emu_Erase_Block
(
block_add
);
}
u16
GLOB_LLD_Write_Page_Main
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
emu_Write_Page_Main
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
emu_Read_Page_Main
(
read_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
return
emu_Read_Page_Main
(
read_data
,
block
,
page
,
page_count
);
}
u16
GLOB_LLD_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
emu_Write_Page_Main_Spare
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
emu_Read_Page_Main_Spare
(
read_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Write_Page_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
emu_Write_Page_Spare
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Spare
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
emu_Read_Page_Spare
(
read_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Get_Bad_Block
(
u32
block
)
{
return
emu_Get_Bad_Block
(
block
);
}
#endif
/* FLASH_EMU */
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
#if FLASH_MTD
/* vector all the LLD calls to the LLD_MTD code */
#include "lld_mtd.h"
#include "lld_cdma.h"
/* common functions: */
u16
GLOB_LLD_Flash_Reset
(
void
)
{
return
mtd_Flash_Reset
();
}
u16
GLOB_LLD_Read_Device_ID
(
void
)
{
return
mtd_Read_Device_ID
();
}
int
GLOB_LLD_Flash_Release
(
void
)
{
return
mtd_Flash_Release
();
}
u16
GLOB_LLD_Flash_Init
(
void
)
{
return
mtd_Flash_Init
();
}
u16
GLOB_LLD_Erase_Block
(
u32
block_add
)
{
return
mtd_Erase_Block
(
block_add
);
}
u16
GLOB_LLD_Write_Page_Main
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
mtd_Write_Page_Main
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
mtd_Read_Page_Main
(
read_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
return
mtd_Read_Page_Main
(
read_data
,
block
,
page
,
page_count
);
}
u16
GLOB_LLD_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
mtd_Write_Page_Main_Spare
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
mtd_Read_Page_Main_Spare
(
read_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Write_Page_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
mtd_Write_Page_Spare
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Spare
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
mtd_Read_Page_Spare
(
read_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Get_Bad_Block
(
u32
block
)
{
return
mtd_Get_Bad_Block
(
block
);
}
#endif
/* FLASH_MTD */
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
#if FLASH_NAND
/* vector all the LLD calls to the NAND controller code */
#include "lld_nand.h"
#include "lld_cdma.h"
#include "flash.h"
/* common functions for LLD_NAND */
void
GLOB_LLD_ECC_Control
(
int
enable
)
{
NAND_ECC_Ctrl
(
enable
);
}
/* common functions for LLD_NAND */
u16
GLOB_LLD_Flash_Reset
(
void
)
{
return
NAND_Flash_Reset
();
}
u16
GLOB_LLD_Read_Device_ID
(
void
)
{
return
NAND_Read_Device_ID
();
}
u16
GLOB_LLD_UnlockArrayAll
(
void
)
{
return
NAND_UnlockArrayAll
();
}
u16
GLOB_LLD_Flash_Init
(
void
)
{
return
NAND_Flash_Init
();
}
int
GLOB_LLD_Flash_Release
(
void
)
{
return
nand_release_spectra
();
}
u16
GLOB_LLD_Erase_Block
(
u32
block_add
)
{
return
NAND_Erase_Block
(
block_add
);
}
u16
GLOB_LLD_Write_Page_Main
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
NAND_Write_Page_Main
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
if
(
page_count
==
1
)
/* Using polling to improve read speed */
return
NAND_Read_Page_Main_Polling
(
read_data
,
block
,
page
,
1
);
else
return
NAND_Read_Page_Main
(
read_data
,
block
,
page
,
page_count
);
}
u16
GLOB_LLD_Read_Page_Main_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
return
NAND_Read_Page_Main_Polling
(
read_data
,
block
,
page
,
page_count
);
}
u16
GLOB_LLD_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
NAND_Write_Page_Main_Spare
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Write_Page_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
NAND_Write_Page_Spare
(
write_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
return
NAND_Read_Page_Main_Spare
(
read_data
,
block
,
page
,
page_count
);
}
u16
GLOB_LLD_Read_Page_Spare
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
)
{
return
NAND_Read_Page_Spare
(
read_data
,
block
,
Page
,
PageCount
);
}
u16
GLOB_LLD_Get_Bad_Block
(
u32
block
)
{
return
NAND_Get_Bad_Block
(
block
);
}
#if CMD_DMA
u16
GLOB_LLD_Event_Status
(
void
)
{
return
CDMA_Event_Status
();
}
u16
glob_lld_execute_cmds
(
void
)
{
return
CDMA_Execute_CMDs
();
}
u16
GLOB_LLD_MemCopy_CMD
(
u8
*
dest
,
u8
*
src
,
u32
ByteCount
,
u16
flag
)
{
/* Replace the hardware memcopy with software memcpy function */
if
(
CDMA_Execute_CMDs
())
return
FAIL
;
memcpy
(
dest
,
src
,
ByteCount
);
return
PASS
;
/* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
}
u16
GLOB_LLD_Erase_Block_cdma
(
u32
block
,
u16
flags
)
{
return
CDMA_Data_CMD
(
ERASE_CMD
,
0
,
block
,
0
,
0
,
flags
);
}
u16
GLOB_LLD_Write_Page_Main_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
)
{
return
CDMA_Data_CMD
(
WRITE_MAIN_CMD
,
data
,
block
,
page
,
count
,
0
);
}
u16
GLOB_LLD_Read_Page_Main_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
,
u16
flags
)
{
return
CDMA_Data_CMD
(
READ_MAIN_CMD
,
data
,
block
,
page
,
count
,
flags
);
}
u16
GLOB_LLD_Write_Page_Main_Spare_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
,
u16
flags
)
{
return
CDMA_Data_CMD
(
WRITE_MAIN_SPARE_CMD
,
data
,
block
,
page
,
count
,
flags
);
}
u16
GLOB_LLD_Read_Page_Main_Spare_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
)
{
return
CDMA_Data_CMD
(
READ_MAIN_SPARE_CMD
,
data
,
block
,
page
,
count
,
LLD_CMD_FLAG_MODE_CDMA
);
}
#endif
/* CMD_DMA */
#endif
/* FLASH_NAND */
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
/* end of LLD.c */
drivers/staging/spectra/lld.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _LLD_
#define _LLD_
#include "ffsport.h"
#include "spectraswconfig.h"
#include "flash.h"
#define GOOD_BLOCK 0
#define DEFECTIVE_BLOCK 1
#define READ_ERROR 2
#define CLK_X 5
#define CLK_MULTI 4
/* Typedefs */
/* prototypes: API for LLD */
/* Currently, Write_Page_Main
* MemCopy
* Read_Page_Main_Spare
* do not have flag because they were not implemented prior to this
* They are not being added to keep changes to a minimum for now.
* Currently, they are not required (only reqd for Wr_P_M_S.)
* Later on, these NEED to be changed.
*/
extern
void
GLOB_LLD_ECC_Control
(
int
enable
);
extern
u16
GLOB_LLD_Flash_Reset
(
void
);
extern
u16
GLOB_LLD_Read_Device_ID
(
void
);
extern
u16
GLOB_LLD_UnlockArrayAll
(
void
);
extern
u16
GLOB_LLD_Flash_Init
(
void
);
extern
int
GLOB_LLD_Flash_Release
(
void
);
extern
u16
GLOB_LLD_Erase_Block
(
u32
block_add
);
extern
u16
GLOB_LLD_Write_Page_Main
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
);
extern
u16
GLOB_LLD_Read_Page_Main
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
extern
u16
GLOB_LLD_Read_Page_Main_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
extern
u16
GLOB_LLD_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
);
extern
u16
GLOB_LLD_Write_Page_Spare
(
u8
*
write_data
,
u32
block
,
u16
Page
,
u16
PageCount
);
extern
u16
GLOB_LLD_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
extern
u16
GLOB_LLD_Read_Page_Spare
(
u8
*
read_data
,
u32
block
,
u16
Page
,
u16
PageCount
);
extern
u16
GLOB_LLD_Get_Bad_Block
(
u32
block
);
extern
u16
GLOB_LLD_Event_Status
(
void
);
extern
u16
GLOB_LLD_MemCopy_CMD
(
u8
*
dest
,
u8
*
src
,
u32
ByteCount
,
u16
flag
);
extern
u16
glob_lld_execute_cmds
(
void
);
extern
u16
GLOB_LLD_Erase_Block_cdma
(
u32
block
,
u16
flags
);
extern
u16
GLOB_LLD_Write_Page_Main_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
);
extern
u16
GLOB_LLD_Read_Page_Main_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
,
u16
flags
);
extern
u16
GLOB_LLD_Write_Page_Main_Spare_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
,
u16
flags
);
extern
u16
GLOB_LLD_Read_Page_Main_Spare_cdma
(
u8
*
data
,
u32
block
,
u16
page
,
u16
count
);
#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
#define LLD_CMD_FLAG_MODE_CDMA (0x8)
#endif
/*_LLD_ */
drivers/staging/spectra/lld_cdma.c
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "spectraswconfig.h"
#include "lld.h"
#include "lld_nand.h"
#include "lld_cdma.h"
#include "lld_emu.h"
#include "flash.h"
#include "nand_regs.h"
#define MAX_PENDING_CMDS 4
#define MODE_02 (0x2 << 26)
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: CDMA_Data_Cmd
* Inputs: cmd code (aligned for hw)
* data: pointer to source or destination
* block: block address
* page: page address
* num: num pages to transfer
* Outputs: PASS
* Description: This function takes the parameters and puts them
* into the "pending commands" array.
* It does not parse or validate the parameters.
* The array index is same as the tag.
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
CDMA_Data_CMD
(
u8
cmd
,
u8
*
data
,
u32
block
,
u16
page
,
u16
num
,
u16
flags
)
{
u8
bank
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
0
==
cmd
)
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Illegal cmd (0)
\n
"
,
__FILE__
,
__LINE__
);
/* If a command of another bank comes, then first execute */
/* pending commands of the current bank, then set the new */
/* bank as current bank */
bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
if
(
bank
!=
info
.
flash_bank
)
{
nand_dbg_print
(
NAND_DBG_WARN
,
"Will access new bank. old bank: %d, new bank: %d
\n
"
,
info
.
flash_bank
,
bank
);
if
(
CDMA_Execute_CMDs
())
{
printk
(
KERN_ERR
"CDMA_Execute_CMDs fail!
\n
"
);
return
FAIL
;
}
info
.
flash_bank
=
bank
;
}
info
.
pcmds
[
info
.
pcmds_num
].
CMD
=
cmd
;
info
.
pcmds
[
info
.
pcmds_num
].
DataAddr
=
data
;
info
.
pcmds
[
info
.
pcmds_num
].
Block
=
block
;
info
.
pcmds
[
info
.
pcmds_num
].
Page
=
page
;
info
.
pcmds
[
info
.
pcmds_num
].
PageCount
=
num
;
info
.
pcmds
[
info
.
pcmds_num
].
DataDestAddr
=
0
;
info
.
pcmds
[
info
.
pcmds_num
].
DataSrcAddr
=
0
;
info
.
pcmds
[
info
.
pcmds_num
].
MemCopyByteCnt
=
0
;
info
.
pcmds
[
info
.
pcmds_num
].
Flags
=
flags
;
info
.
pcmds
[
info
.
pcmds_num
].
Status
=
0xB0B
;
switch
(
cmd
)
{
case
WRITE_MAIN_SPARE_CMD
:
Conv_Main_Spare_Data_Log2Phy_Format
(
data
,
num
);
break
;
case
WRITE_SPARE_CMD
:
Conv_Spare_Data_Log2Phy_Format
(
data
);
break
;
default:
break
;
}
info
.
pcmds_num
++
;
if
(
info
.
pcmds_num
>=
MAX_PENDING_CMDS
)
{
if
(
CDMA_Execute_CMDs
())
{
printk
(
KERN_ERR
"CDMA_Execute_CMDs fail!
\n
"
);
return
FAIL
;
}
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: CDMA_MemCopy_CMD
* Inputs: dest: pointer to destination
* src: pointer to source
* count: num bytes to transfer
* Outputs: PASS
* Description: This function takes the parameters and puts them
* into the "pending commands" array.
* It does not parse or validate the parameters.
* The array index is same as the tag.
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
CDMA_MemCopy_CMD
(
u8
*
dest
,
u8
*
src
,
u32
byte_cnt
,
u16
flags
)
{
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
info
.
pcmds
[
info
.
pcmds_num
].
CMD
=
MEMCOPY_CMD
;
info
.
pcmds
[
info
.
pcmds_num
].
DataAddr
=
0
;
info
.
pcmds
[
info
.
pcmds_num
].
Block
=
0
;
info
.
pcmds
[
info
.
pcmds_num
].
Page
=
0
;
info
.
pcmds
[
info
.
pcmds_num
].
PageCount
=
0
;
info
.
pcmds
[
info
.
pcmds_num
].
DataDestAddr
=
dest
;
info
.
pcmds
[
info
.
pcmds_num
].
DataSrcAddr
=
src
;
info
.
pcmds
[
info
.
pcmds_num
].
MemCopyByteCnt
=
byte_cnt
;
info
.
pcmds
[
info
.
pcmds_num
].
Flags
=
flags
;
info
.
pcmds
[
info
.
pcmds_num
].
Status
=
0xB0B
;
info
.
pcmds_num
++
;
if
(
info
.
pcmds_num
>=
MAX_PENDING_CMDS
)
{
if
(
CDMA_Execute_CMDs
())
{
printk
(
KERN_ERR
"CDMA_Execute_CMDs fail!
\n
"
);
return
FAIL
;
}
}
return
PASS
;
}
#if 0
/* Prints the PendingCMDs array */
void print_pending_cmds(void)
{
u16 i;
nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
__FILE__, __LINE__, __func__);
for (i = 0; i < info.pcmds_num; i++) {
nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
switch (info.pcmds[i].CMD) {
case ERASE_CMD:
nand_dbg_print(NAND_DBG_DEBUG,
"Erase Command (0x%x)\n",
info.pcmds[i].CMD);
break;
case WRITE_MAIN_CMD:
nand_dbg_print(NAND_DBG_DEBUG,
"Write Main Command (0x%x)\n",
info.pcmds[i].CMD);
break;
case WRITE_MAIN_SPARE_CMD:
nand_dbg_print(NAND_DBG_DEBUG,
"Write Main Spare Command (0x%x)\n",
info.pcmds[i].CMD);
break;
case READ_MAIN_SPARE_CMD:
nand_dbg_print(NAND_DBG_DEBUG,
"Read Main Spare Command (0x%x)\n",
info.pcmds[i].CMD);
break;
case READ_MAIN_CMD:
nand_dbg_print(NAND_DBG_DEBUG,
"Read Main Command (0x%x)\n",
info.pcmds[i].CMD);
break;
case MEMCOPY_CMD:
nand_dbg_print(NAND_DBG_DEBUG,
"Memcopy Command (0x%x)\n",
info.pcmds[i].CMD);
break;
case DUMMY_CMD:
nand_dbg_print(NAND_DBG_DEBUG,
"Dummy Command (0x%x)\n",
info.pcmds[i].CMD);
break;
default:
nand_dbg_print(NAND_DBG_DEBUG,
"Illegal Command (0x%x)\n",
info.pcmds[i].CMD);
break;
}
nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
(u32)info.pcmds[i].DataAddr);
nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
info.pcmds[i].Block);
nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
info.pcmds[i].Page);
nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
info.pcmds[i].PageCount);
nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
(u32)info.pcmds[i].DataDestAddr);
nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
(u32)info.pcmds[i].DataSrcAddr);
nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
info.pcmds[i].MemCopyByteCnt);
nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
info.pcmds[i].Flags);
nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
info.pcmds[i].Status);
}
}
/* Print the CDMA descriptors */
void print_cdma_descriptors(void)
{
struct cdma_descriptor *pc;
int i;
pc = (struct cdma_descriptor *)info.cdma_desc_buf;
nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
for (i = 0; i < info.cdma_num; i++) {
nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
nand_dbg_print(NAND_DBG_DEBUG,
"NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
pc[i].NxtPointerHi, pc[i].NxtPointerLo);
nand_dbg_print(NAND_DBG_DEBUG,
"FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
pc[i].FlashPointerHi, pc[i].FlashPointerLo);
nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
pc[i].CommandType);
nand_dbg_print(NAND_DBG_DEBUG,
"MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
pc[i].MemAddrHi, pc[i].MemAddrLo);
nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
pc[i].CommandFlags);
nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
pc[i].Channel, pc[i].Status);
nand_dbg_print(NAND_DBG_DEBUG,
"MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
nand_dbg_print(NAND_DBG_DEBUG,
"Reserved12: 0x%x, Reserved13: 0x%x, "
"Reserved14: 0x%x, pcmd: %d\n",
pc[i].Reserved12, pc[i].Reserved13,
pc[i].Reserved14, pc[i].pcmd);
}
}
/* Print the Memory copy descriptors */
static void print_memcp_descriptors(void)
{
struct memcpy_descriptor *pm;
int i;
pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
for (i = 0; i < info.cdma_num; i++) {
nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
nand_dbg_print(NAND_DBG_DEBUG,
"NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
pm[i].NxtPointerHi, pm[i].NxtPointerLo);
nand_dbg_print(NAND_DBG_DEBUG,
"SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
pm[i].SrcAddrHi, pm[i].SrcAddrLo);
nand_dbg_print(NAND_DBG_DEBUG,
"DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
pm[i].DestAddrHi, pm[i].DestAddrLo);
nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
pm[i].XferSize);
nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
pm[i].MemCopyFlags);
nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
pm[i].MemCopyStatus);
nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
pm[i].reserved9);
nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
pm[i].reserved10);
nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
pm[i].reserved11);
nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
pm[i].reserved12);
nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
pm[i].reserved13);
nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
pm[i].reserved14);
nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
pm[i].reserved15);
}
}
#endif
/* Reset cdma_descriptor chain to 0 */
static
void
reset_cdma_desc
(
int
i
)
{
struct
cdma_descriptor
*
ptr
;
BUG_ON
(
i
>=
MAX_DESCS
);
ptr
=
(
struct
cdma_descriptor
*
)
info
.
cdma_desc_buf
;
ptr
[
i
].
NxtPointerHi
=
0
;
ptr
[
i
].
NxtPointerLo
=
0
;
ptr
[
i
].
FlashPointerHi
=
0
;
ptr
[
i
].
FlashPointerLo
=
0
;
ptr
[
i
].
CommandType
=
0
;
ptr
[
i
].
MemAddrHi
=
0
;
ptr
[
i
].
MemAddrLo
=
0
;
ptr
[
i
].
CommandFlags
=
0
;
ptr
[
i
].
Channel
=
0
;
ptr
[
i
].
Status
=
0
;
ptr
[
i
].
MemCopyPointerHi
=
0
;
ptr
[
i
].
MemCopyPointerLo
=
0
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: CDMA_UpdateEventStatus
* Inputs: none
* Outputs: none
* Description: This function update the event status of all the channels
* when an error condition is reported.
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
void
CDMA_UpdateEventStatus
(
void
)
{
int
i
,
j
,
active_chan
;
struct
cdma_descriptor
*
ptr
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
ptr
=
(
struct
cdma_descriptor
*
)
info
.
cdma_desc_buf
;
for
(
j
=
0
;
j
<
info
.
cdma_num
;
j
++
)
{
/* Check for the descriptor with failure */
if
((
ptr
[
j
].
Status
&
CMD_DMA_DESC_FAIL
))
break
;
}
/* All the previous cmd's status for this channel must be good */
for
(
i
=
0
;
i
<
j
;
i
++
)
{
if
(
ptr
[
i
].
pcmd
!=
0xff
)
info
.
pcmds
[
ptr
[
i
].
pcmd
].
Status
=
CMD_PASS
;
}
/* Abort the channel with type 0 reset command. It resets the */
/* selected channel after the descriptor completes the flash */
/* operation and status has been updated for the descriptor. */
/* Memory Copy and Sync associated with this descriptor will */
/* not be executed */
active_chan
=
ioread32
(
FlashReg
+
CHNL_ACTIVE
);
if
((
active_chan
&
(
1
<<
info
.
flash_bank
))
==
(
1
<<
info
.
flash_bank
))
{
iowrite32
(
MODE_02
|
(
0
<<
4
),
FlashMem
);
/* Type 0 reset */
iowrite32
((
0xF
<<
4
)
|
info
.
flash_bank
,
FlashMem
+
0x10
);
}
else
{
/* Should not reached here */
printk
(
KERN_ERR
"Error! Used bank is not set in"
" reg CHNL_ACTIVE
\n
"
);
}
}
static
void
cdma_trans
(
u16
chan
)
{
u32
addr
;
addr
=
info
.
cdma_desc
;
iowrite32
(
MODE_10
|
(
chan
<<
24
),
FlashMem
);
iowrite32
((
1
<<
7
)
|
chan
,
FlashMem
+
0x10
);
iowrite32
(
MODE_10
|
(
chan
<<
24
)
|
((
0x0FFFF
&
(
addr
>>
16
))
<<
8
),
FlashMem
);
iowrite32
((
1
<<
7
)
|
(
1
<<
4
)
|
0
,
FlashMem
+
0x10
);
iowrite32
(
MODE_10
|
(
chan
<<
24
)
|
((
0x0FFFF
&
addr
)
<<
8
),
FlashMem
);
iowrite32
((
1
<<
7
)
|
(
1
<<
5
)
|
0
,
FlashMem
+
0x10
);
iowrite32
(
MODE_10
|
(
chan
<<
24
),
FlashMem
);
iowrite32
((
1
<<
7
)
|
(
1
<<
5
)
|
(
1
<<
4
)
|
0
,
FlashMem
+
0x10
);
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
* Inputs: tag_count: the number of pending cmds to do
* Outputs: PASS/FAIL
* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
* for each pending command, start the CDMA engine, and return.
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
CDMA_Execute_CMDs
(
void
)
{
int
i
,
ret
;
u64
flash_add
;
u32
ptr
;
dma_addr_t
map_addr
,
next_ptr
;
u16
status
=
PASS
;
u16
tmp_c
;
struct
cdma_descriptor
*
pc
;
struct
memcpy_descriptor
*
pm
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
/* No pending cmds to execute, just exit */
if
(
0
==
info
.
pcmds_num
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"No pending cmds to execute. Just exit.
\n
"
);
return
PASS
;
}
for
(
i
=
0
;
i
<
MAX_DESCS
;
i
++
)
reset_cdma_desc
(
i
);
pc
=
(
struct
cdma_descriptor
*
)
info
.
cdma_desc_buf
;
pm
=
(
struct
memcpy_descriptor
*
)
info
.
memcp_desc_buf
;
info
.
cdma_desc
=
virt_to_bus
(
info
.
cdma_desc_buf
);
info
.
memcp_desc
=
virt_to_bus
(
info
.
memcp_desc_buf
);
next_ptr
=
info
.
cdma_desc
;
info
.
cdma_num
=
0
;
for
(
i
=
0
;
i
<
info
.
pcmds_num
;
i
++
)
{
if
(
info
.
pcmds
[
i
].
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
info
.
pcmds
[
i
].
Status
=
CMD_NOT_DONE
;
continue
;
}
next_ptr
+=
sizeof
(
struct
cdma_descriptor
);
pc
[
info
.
cdma_num
].
NxtPointerHi
=
next_ptr
>>
16
;
pc
[
info
.
cdma_num
].
NxtPointerLo
=
next_ptr
&
0xffff
;
/* Use the Block offset within a bank */
tmp_c
=
info
.
pcmds
[
i
].
Block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
flash_add
=
(
u64
)(
info
.
pcmds
[
i
].
Block
-
tmp_c
*
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)(
info
.
pcmds
[
i
].
Page
)
*
DeviceInfo
.
wPageDataSize
;
ptr
=
MODE_10
|
(
info
.
flash_bank
<<
24
)
|
(
u32
)
GLOB_u64_Div
(
flash_add
,
DeviceInfo
.
wPageDataSize
);
pc
[
info
.
cdma_num
].
FlashPointerHi
=
ptr
>>
16
;
pc
[
info
.
cdma_num
].
FlashPointerLo
=
ptr
&
0xffff
;
if
((
info
.
pcmds
[
i
].
CMD
==
WRITE_MAIN_SPARE_CMD
)
||
(
info
.
pcmds
[
i
].
CMD
==
READ_MAIN_SPARE_CMD
))
{
/* Descriptor to set Main+Spare Access Mode */
pc
[
info
.
cdma_num
].
CommandType
=
0x43
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
pc
[
info
.
cdma_num
].
MemAddrHi
=
0
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
0
;
pc
[
info
.
cdma_num
].
Channel
=
0
;
pc
[
info
.
cdma_num
].
Status
=
0
;
pc
[
info
.
cdma_num
].
pcmd
=
i
;
info
.
cdma_num
++
;
BUG_ON
(
info
.
cdma_num
>=
MAX_DESCS
);
reset_cdma_desc
(
info
.
cdma_num
);
next_ptr
+=
sizeof
(
struct
cdma_descriptor
);
pc
[
info
.
cdma_num
].
NxtPointerHi
=
next_ptr
>>
16
;
pc
[
info
.
cdma_num
].
NxtPointerLo
=
next_ptr
&
0xffff
;
pc
[
info
.
cdma_num
].
FlashPointerHi
=
ptr
>>
16
;
pc
[
info
.
cdma_num
].
FlashPointerLo
=
ptr
&
0xffff
;
}
switch
(
info
.
pcmds
[
i
].
CMD
)
{
case
ERASE_CMD
:
pc
[
info
.
cdma_num
].
CommandType
=
1
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
pc
[
info
.
cdma_num
].
MemAddrHi
=
0
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
0
;
break
;
case
WRITE_MAIN_CMD
:
pc
[
info
.
cdma_num
].
CommandType
=
0x2100
|
info
.
pcmds
[
i
].
PageCount
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
map_addr
=
virt_to_bus
(
info
.
pcmds
[
i
].
DataAddr
);
pc
[
info
.
cdma_num
].
MemAddrHi
=
map_addr
>>
16
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
map_addr
&
0xffff
;
break
;
case
READ_MAIN_CMD
:
pc
[
info
.
cdma_num
].
CommandType
=
0x2000
|
info
.
pcmds
[
i
].
PageCount
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
map_addr
=
virt_to_bus
(
info
.
pcmds
[
i
].
DataAddr
);
pc
[
info
.
cdma_num
].
MemAddrHi
=
map_addr
>>
16
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
map_addr
&
0xffff
;
break
;
case
WRITE_MAIN_SPARE_CMD
:
pc
[
info
.
cdma_num
].
CommandType
=
0x2100
|
info
.
pcmds
[
i
].
PageCount
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
map_addr
=
virt_to_bus
(
info
.
pcmds
[
i
].
DataAddr
);
pc
[
info
.
cdma_num
].
MemAddrHi
=
map_addr
>>
16
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
map_addr
&
0xffff
;
break
;
case
READ_MAIN_SPARE_CMD
:
pc
[
info
.
cdma_num
].
CommandType
=
0x2000
|
info
.
pcmds
[
i
].
PageCount
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
map_addr
=
virt_to_bus
(
info
.
pcmds
[
i
].
DataAddr
);
pc
[
info
.
cdma_num
].
MemAddrHi
=
map_addr
>>
16
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
map_addr
&
0xffff
;
break
;
case
MEMCOPY_CMD
:
pc
[
info
.
cdma_num
].
CommandType
=
0xFFFF
;
/* NOP cmd */
/* Set bit 11 to let the CDMA engine continue to */
/* execute only after it has finished processing */
/* the memcopy descriptor. */
/* Also set bit 10 and bit 9 to 1 */
pc
[
info
.
cdma_num
].
CommandFlags
=
0x0E40
;
map_addr
=
info
.
memcp_desc
+
info
.
cdma_num
*
sizeof
(
struct
memcpy_descriptor
);
pc
[
info
.
cdma_num
].
MemCopyPointerHi
=
map_addr
>>
16
;
pc
[
info
.
cdma_num
].
MemCopyPointerLo
=
map_addr
&
0xffff
;
pm
[
info
.
cdma_num
].
NxtPointerHi
=
0
;
pm
[
info
.
cdma_num
].
NxtPointerLo
=
0
;
map_addr
=
virt_to_bus
(
info
.
pcmds
[
i
].
DataSrcAddr
);
pm
[
info
.
cdma_num
].
SrcAddrHi
=
map_addr
>>
16
;
pm
[
info
.
cdma_num
].
SrcAddrLo
=
map_addr
&
0xffff
;
map_addr
=
virt_to_bus
(
info
.
pcmds
[
i
].
DataDestAddr
);
pm
[
info
.
cdma_num
].
DestAddrHi
=
map_addr
>>
16
;
pm
[
info
.
cdma_num
].
DestAddrLo
=
map_addr
&
0xffff
;
pm
[
info
.
cdma_num
].
XferSize
=
info
.
pcmds
[
i
].
MemCopyByteCnt
;
pm
[
info
.
cdma_num
].
MemCopyFlags
=
(
0
<<
15
|
0
<<
14
|
27
<<
8
|
0x40
);
pm
[
info
.
cdma_num
].
MemCopyStatus
=
0
;
break
;
case
DUMMY_CMD
:
default:
pc
[
info
.
cdma_num
].
CommandType
=
0XFFFF
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
pc
[
info
.
cdma_num
].
MemAddrHi
=
0
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
0
;
break
;
}
pc
[
info
.
cdma_num
].
Channel
=
0
;
pc
[
info
.
cdma_num
].
Status
=
0
;
pc
[
info
.
cdma_num
].
pcmd
=
i
;
info
.
cdma_num
++
;
BUG_ON
(
info
.
cdma_num
>=
MAX_DESCS
);
if
((
info
.
pcmds
[
i
].
CMD
==
WRITE_MAIN_SPARE_CMD
)
||
(
info
.
pcmds
[
i
].
CMD
==
READ_MAIN_SPARE_CMD
))
{
/* Descriptor to set back Main Area Access Mode */
reset_cdma_desc
(
info
.
cdma_num
);
next_ptr
+=
sizeof
(
struct
cdma_descriptor
);
pc
[
info
.
cdma_num
].
NxtPointerHi
=
next_ptr
>>
16
;
pc
[
info
.
cdma_num
].
NxtPointerLo
=
next_ptr
&
0xffff
;
pc
[
info
.
cdma_num
].
FlashPointerHi
=
ptr
>>
16
;
pc
[
info
.
cdma_num
].
FlashPointerLo
=
ptr
&
0xffff
;
pc
[
info
.
cdma_num
].
CommandType
=
0x42
;
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
1
<<
9
)
|
(
0
<<
8
)
|
0x40
;
pc
[
info
.
cdma_num
].
MemAddrHi
=
0
;
pc
[
info
.
cdma_num
].
MemAddrLo
=
0
;
pc
[
info
.
cdma_num
].
Channel
=
0
;
pc
[
info
.
cdma_num
].
Status
=
0
;
pc
[
info
.
cdma_num
].
pcmd
=
i
;
info
.
cdma_num
++
;
BUG_ON
(
info
.
cdma_num
>=
MAX_DESCS
);
}
}
/* Add a dummy descriptor at end of the CDMA chain */
reset_cdma_desc
(
info
.
cdma_num
);
ptr
=
MODE_10
|
(
info
.
flash_bank
<<
24
);
pc
[
info
.
cdma_num
].
FlashPointerHi
=
ptr
>>
16
;
pc
[
info
.
cdma_num
].
FlashPointerLo
=
ptr
&
0xffff
;
pc
[
info
.
cdma_num
].
CommandType
=
0xFFFF
;
/* NOP command */
/* Set Command Flags for the last CDMA descriptor: */
/* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
pc
[
info
.
cdma_num
].
CommandFlags
=
(
0
<<
10
)
|
(
0
<<
9
)
|
(
1
<<
8
)
|
0x40
;
pc
[
info
.
cdma_num
].
pcmd
=
0xff
;
/* Set it to an illegal value */
info
.
cdma_num
++
;
BUG_ON
(
info
.
cdma_num
>=
MAX_DESCS
);
iowrite32
(
1
,
FlashReg
+
GLOBAL_INT_ENABLE
);
/* Enable Interrupt */
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
/* Wait for DMA to be enabled before issuing the next command */
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
cdma_trans
(
info
.
flash_bank
);
ret
=
wait_for_completion_timeout
(
&
info
.
complete
,
50
*
HZ
);
if
(
!
ret
)
printk
(
KERN_ERR
"Wait for completion timeout "
"in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
status
=
info
.
ret
;
info
.
pcmds_num
=
0
;
/* Clear the pending cmds number to 0 */
return
status
;
}
int
is_cdma_interrupt
(
void
)
{
u32
ints_b0
,
ints_b1
,
ints_b2
,
ints_b3
,
ints_cdma
;
u32
int_en_mask
;
u32
cdma_int_en_mask
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
/* Set the global Enable masks for only those interrupts
* that are supported */
cdma_int_en_mask
=
(
DMA_INTR__DESC_COMP_CHANNEL0
|
DMA_INTR__DESC_COMP_CHANNEL1
|
DMA_INTR__DESC_COMP_CHANNEL2
|
DMA_INTR__DESC_COMP_CHANNEL3
|
DMA_INTR__MEMCOPY_DESC_COMP
);
int_en_mask
=
(
INTR_STATUS0__ECC_ERR
|
INTR_STATUS0__PROGRAM_FAIL
|
INTR_STATUS0__ERASE_FAIL
);
ints_b0
=
ioread32
(
FlashReg
+
INTR_STATUS0
)
&
int_en_mask
;
ints_b1
=
ioread32
(
FlashReg
+
INTR_STATUS1
)
&
int_en_mask
;
ints_b2
=
ioread32
(
FlashReg
+
INTR_STATUS2
)
&
int_en_mask
;
ints_b3
=
ioread32
(
FlashReg
+
INTR_STATUS3
)
&
int_en_mask
;
ints_cdma
=
ioread32
(
FlashReg
+
DMA_INTR
)
&
cdma_int_en_mask
;
nand_dbg_print
(
NAND_DBG_WARN
,
"ints_bank0 to ints_bank3: "
"0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x
\n
"
,
ints_b0
,
ints_b1
,
ints_b2
,
ints_b3
,
ints_cdma
);
if
(
ints_b0
||
ints_b1
||
ints_b2
||
ints_b3
||
ints_cdma
)
{
return
1
;
}
else
{
iowrite32
(
ints_b0
,
FlashReg
+
INTR_STATUS0
);
iowrite32
(
ints_b1
,
FlashReg
+
INTR_STATUS1
);
iowrite32
(
ints_b2
,
FlashReg
+
INTR_STATUS2
);
iowrite32
(
ints_b3
,
FlashReg
+
INTR_STATUS3
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Not a NAND controller interrupt! Ignore it.
\n
"
);
return
0
;
}
}
static
void
update_event_status
(
void
)
{
int
i
;
struct
cdma_descriptor
*
ptr
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
ptr
=
(
struct
cdma_descriptor
*
)
info
.
cdma_desc_buf
;
for
(
i
=
0
;
i
<
info
.
cdma_num
;
i
++
)
{
if
(
ptr
[
i
].
pcmd
!=
0xff
)
info
.
pcmds
[
ptr
[
i
].
pcmd
].
Status
=
CMD_PASS
;
if
((
ptr
[
i
].
CommandType
==
0x41
)
||
(
ptr
[
i
].
CommandType
==
0x42
)
||
(
ptr
[
i
].
CommandType
==
0x43
))
continue
;
switch
(
info
.
pcmds
[
ptr
[
i
].
pcmd
].
CMD
)
{
case
READ_MAIN_SPARE_CMD
:
Conv_Main_Spare_Data_Phy2Log_Format
(
info
.
pcmds
[
ptr
[
i
].
pcmd
].
DataAddr
,
info
.
pcmds
[
ptr
[
i
].
pcmd
].
PageCount
);
break
;
case
READ_SPARE_CMD
:
Conv_Spare_Data_Phy2Log_Format
(
info
.
pcmds
[
ptr
[
i
].
pcmd
].
DataAddr
);
break
;
}
}
}
static
u16
do_ecc_for_desc
(
u32
ch
,
u8
*
buf
,
u16
page
)
{
u16
event
=
EVENT_NONE
;
u16
err_byte
;
u16
err_page
=
0
;
u8
err_sector
;
u8
err_device
;
u16
ecc_correction_info
;
u16
err_address
;
u32
eccSectorSize
;
u8
*
err_pos
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
eccSectorSize
=
ECC_SECTOR_SIZE
*
(
DeviceInfo
.
wDevicesConnected
);
do
{
if
(
0
==
ch
)
err_page
=
ioread32
(
FlashReg
+
ERR_PAGE_ADDR0
);
else
if
(
1
==
ch
)
err_page
=
ioread32
(
FlashReg
+
ERR_PAGE_ADDR1
);
else
if
(
2
==
ch
)
err_page
=
ioread32
(
FlashReg
+
ERR_PAGE_ADDR2
);
else
if
(
3
==
ch
)
err_page
=
ioread32
(
FlashReg
+
ERR_PAGE_ADDR3
);
err_address
=
ioread32
(
FlashReg
+
ECC_ERROR_ADDRESS
);
err_byte
=
err_address
&
ECC_ERROR_ADDRESS__OFFSET
;
err_sector
=
((
err_address
&
ECC_ERROR_ADDRESS__SECTOR_NR
)
>>
12
);
ecc_correction_info
=
ioread32
(
FlashReg
+
ERR_CORRECTION_INFO
);
err_device
=
((
ecc_correction_info
&
ERR_CORRECTION_INFO__DEVICE_NR
)
>>
8
);
if
(
ecc_correction_info
&
ERR_CORRECTION_INFO__ERROR_TYPE
)
{
event
=
EVENT_UNCORRECTABLE_DATA_ERROR
;
}
else
{
event
=
EVENT_CORRECTABLE_DATA_ERROR_FIXED
;
if
(
err_byte
<
ECC_SECTOR_SIZE
)
{
err_pos
=
buf
+
(
err_page
-
page
)
*
DeviceInfo
.
wPageDataSize
+
err_sector
*
eccSectorSize
+
err_byte
*
DeviceInfo
.
wDevicesConnected
+
err_device
;
*
err_pos
^=
ecc_correction_info
&
ERR_CORRECTION_INFO__BYTEMASK
;
}
}
}
while
(
!
(
ecc_correction_info
&
ERR_CORRECTION_INFO__LAST_ERR_INFO
));
return
event
;
}
static
u16
process_ecc_int
(
u32
c
,
u16
*
p_desc_num
)
{
struct
cdma_descriptor
*
ptr
;
u16
j
;
int
event
=
EVENT_PASS
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
c
!=
info
.
flash_bank
)
printk
(
KERN_ERR
"Error!info.flash_bank is %d, while c is %d
\n
"
,
info
.
flash_bank
,
c
);
ptr
=
(
struct
cdma_descriptor
*
)
info
.
cdma_desc_buf
;
for
(
j
=
0
;
j
<
info
.
cdma_num
;
j
++
)
if
((
ptr
[
j
].
Status
&
CMD_DMA_DESC_COMP
)
!=
CMD_DMA_DESC_COMP
)
break
;
*
p_desc_num
=
j
;
/* Pass the descripter number found here */
if
(
j
>=
info
.
cdma_num
)
{
printk
(
KERN_ERR
"Can not find the correct descriptor number "
"when ecc interrupt triggered!"
"info.cdma_num: %d, j: %d
\n
"
,
info
.
cdma_num
,
j
);
return
EVENT_UNCORRECTABLE_DATA_ERROR
;
}
event
=
do_ecc_for_desc
(
c
,
info
.
pcmds
[
ptr
[
j
].
pcmd
].
DataAddr
,
info
.
pcmds
[
ptr
[
j
].
pcmd
].
Page
);
if
(
EVENT_UNCORRECTABLE_DATA_ERROR
==
event
)
{
printk
(
KERN_ERR
"Uncorrectable ECC error!"
"info.cdma_num: %d, j: %d, "
"pending cmd CMD: 0x%x, "
"Block: 0x%x, Page: 0x%x, PageCount: 0x%x
\n
"
,
info
.
cdma_num
,
j
,
info
.
pcmds
[
ptr
[
j
].
pcmd
].
CMD
,
info
.
pcmds
[
ptr
[
j
].
pcmd
].
Block
,
info
.
pcmds
[
ptr
[
j
].
pcmd
].
Page
,
info
.
pcmds
[
ptr
[
j
].
pcmd
].
PageCount
);
if
(
ptr
[
j
].
pcmd
!=
0xff
)
info
.
pcmds
[
ptr
[
j
].
pcmd
].
Status
=
CMD_FAIL
;
CDMA_UpdateEventStatus
();
}
return
event
;
}
static
void
process_prog_erase_fail_int
(
u16
desc_num
)
{
struct
cdma_descriptor
*
ptr
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
ptr
=
(
struct
cdma_descriptor
*
)
info
.
cdma_desc_buf
;
if
(
ptr
[
desc_num
].
pcmd
!=
0xFF
)
info
.
pcmds
[
ptr
[
desc_num
].
pcmd
].
Status
=
CMD_FAIL
;
CDMA_UpdateEventStatus
();
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: CDMA_Event_Status (for use with CMD_DMA)
* Inputs: none
* Outputs: Event_Status code
* Description: This function is called after an interrupt has happened
* It reads the HW status register and ...tbd
* It returns the appropriate event status
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
CDMA_Event_Status
(
void
)
{
u32
ints_addr
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u32
dma_intr_bit
[
4
]
=
{
DMA_INTR__DESC_COMP_CHANNEL0
,
DMA_INTR__DESC_COMP_CHANNEL1
,
DMA_INTR__DESC_COMP_CHANNEL2
,
DMA_INTR__DESC_COMP_CHANNEL3
};
u32
cdma_int_status
,
int_status
;
u32
ecc_enable
=
0
;
u16
event
=
EVENT_PASS
;
u16
cur_desc
=
0
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
ecc_enable
=
ioread32
(
FlashReg
+
ECC_ENABLE
);
while
(
1
)
{
int_status
=
ioread32
(
FlashReg
+
ints_addr
[
info
.
flash_bank
]);
if
(
ecc_enable
&&
(
int_status
&
INTR_STATUS0__ECC_ERR
))
{
event
=
process_ecc_int
(
info
.
flash_bank
,
&
cur_desc
);
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
ints_addr
[
info
.
flash_bank
]);
if
(
EVENT_UNCORRECTABLE_DATA_ERROR
==
event
)
{
nand_dbg_print
(
NAND_DBG_WARN
,
"ints_bank0 to ints_bank3: "
"0x%x, 0x%x, 0x%x, 0x%x, "
"ints_cdma: 0x%x
\n
"
,
ioread32
(
FlashReg
+
INTR_STATUS0
),
ioread32
(
FlashReg
+
INTR_STATUS1
),
ioread32
(
FlashReg
+
INTR_STATUS2
),
ioread32
(
FlashReg
+
INTR_STATUS3
),
ioread32
(
FlashReg
+
DMA_INTR
));
break
;
}
}
else
if
(
int_status
&
INTR_STATUS0__PROGRAM_FAIL
)
{
printk
(
KERN_ERR
"NAND program fail interrupt!
\n
"
);
process_prog_erase_fail_int
(
cur_desc
);
event
=
EVENT_PROGRAM_FAILURE
;
break
;
}
else
if
(
int_status
&
INTR_STATUS0__ERASE_FAIL
)
{
printk
(
KERN_ERR
"NAND erase fail interrupt!
\n
"
);
process_prog_erase_fail_int
(
cur_desc
);
event
=
EVENT_ERASE_FAILURE
;
break
;
}
else
{
cdma_int_status
=
ioread32
(
FlashReg
+
DMA_INTR
);
if
(
cdma_int_status
&
dma_intr_bit
[
info
.
flash_bank
])
{
iowrite32
(
dma_intr_bit
[
info
.
flash_bank
],
FlashReg
+
DMA_INTR
);
update_event_status
();
event
=
EVENT_PASS
;
break
;
}
}
}
int_status
=
ioread32
(
FlashReg
+
ints_addr
[
info
.
flash_bank
]);
iowrite32
(
int_status
,
FlashReg
+
ints_addr
[
info
.
flash_bank
]);
cdma_int_status
=
ioread32
(
FlashReg
+
DMA_INTR
);
iowrite32
(
cdma_int_status
,
FlashReg
+
DMA_INTR
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
return
event
;
}
drivers/staging/spectra/lld_cdma.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
/* header for LLD_CDMA.c module */
#ifndef _LLD_CDMA_
#define _LLD_CDMA_
#include "flash.h"
#define DEBUG_SYNC 1
/*/////////// CDMA specific MACRO definition */
#define MAX_DESCS (255)
#define MAX_CHANS (4)
#define MAX_SYNC_POINTS (16)
#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
#define CHANNEL_SYNC_MASK (0x000F)
#define CHANNEL_DMA_MASK (0x00F0)
#define CHANNEL_ID_MASK (0x0300)
#define CHANNEL_CONT_MASK (0x4000)
#define CHANNEL_INTR_MASK (0x8000)
#define CHANNEL_SYNC_OFFSET (0)
#define CHANNEL_DMA_OFFSET (4)
#define CHANNEL_ID_OFFSET (8)
#define CHANNEL_CONT_OFFSET (14)
#define CHANNEL_INTR_OFFSET (15)
u16
CDMA_Data_CMD
(
u8
cmd
,
u8
*
data
,
u32
block
,
u16
page
,
u16
num
,
u16
flags
);
u16
CDMA_MemCopy_CMD
(
u8
*
dest
,
u8
*
src
,
u32
byte_cnt
,
u16
flags
);
u16
CDMA_Execute_CMDs
(
void
);
void
print_pending_cmds
(
void
);
void
print_cdma_descriptors
(
void
);
extern
u8
g_SBDCmdIndex
;
extern
struct
mrst_nand_info
info
;
/*/////////// prototypes: APIs for LLD_CDMA */
int
is_cdma_interrupt
(
void
);
u16
CDMA_Event_Status
(
void
);
/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
struct
cdma_descriptor
{
u32
NxtPointerHi
;
u32
NxtPointerLo
;
u32
FlashPointerHi
;
u32
FlashPointerLo
;
u32
CommandType
;
u32
MemAddrHi
;
u32
MemAddrLo
;
u32
CommandFlags
;
u32
Channel
;
u32
Status
;
u32
MemCopyPointerHi
;
u32
MemCopyPointerLo
;
u32
Reserved12
;
u32
Reserved13
;
u32
Reserved14
;
u32
pcmd
;
/* pending cmd num related to this descriptor */
};
/* This struct holds one MemCopy descriptor as defined by the HW */
struct
memcpy_descriptor
{
u32
NxtPointerHi
;
u32
NxtPointerLo
;
u32
SrcAddrHi
;
u32
SrcAddrLo
;
u32
DestAddrHi
;
u32
DestAddrLo
;
u32
XferSize
;
u32
MemCopyFlags
;
u32
MemCopyStatus
;
u32
reserved9
;
u32
reserved10
;
u32
reserved11
;
u32
reserved12
;
u32
reserved13
;
u32
reserved14
;
u32
reserved15
;
};
/* Pending CMD table entries (includes MemCopy parameters */
struct
pending_cmd
{
u8
CMD
;
u8
*
DataAddr
;
u32
Block
;
u16
Page
;
u16
PageCount
;
u8
*
DataDestAddr
;
u8
*
DataSrcAddr
;
u32
MemCopyByteCnt
;
u16
Flags
;
u16
Status
;
};
#if DEBUG_SYNC
extern
u32
debug_sync_cnt
;
#endif
/* Definitions for CMD DMA descriptor chain fields */
#define CMD_DMA_DESC_COMP 0x8000
#define CMD_DMA_DESC_FAIL 0x4000
#endif
/*_LLD_CDMA_*/
drivers/staging/spectra/lld_emu.c
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "flash.h"
#include "ffsdefs.h"
#include "lld_emu.h"
#include "lld.h"
#if CMD_DMA
#include "lld_cdma.h"
#endif
#define GLOB_LLD_PAGES 64
#define GLOB_LLD_PAGE_SIZE (512+16)
#define GLOB_LLD_PAGE_DATA_SIZE 512
#define GLOB_LLD_BLOCKS 2048
#if (CMD_DMA && FLASH_EMU)
#include "lld_cdma.h"
u32
totalUsedBanks
;
u32
valid_banks
[
MAX_CHANS
];
#endif
#if FLASH_EMU
/* This is for entire module */
static
u8
*
flash_memory
[
GLOB_LLD_BLOCKS
*
GLOB_LLD_PAGES
];
/* Read nand emu file and then fill it's content to flash_memory */
int
emu_load_file_to_mem
(
void
)
{
mm_segment_t
fs
;
struct
file
*
nef_filp
=
NULL
;
struct
inode
*
inode
=
NULL
;
loff_t
nef_size
=
0
;
loff_t
tmp_file_offset
,
file_offset
;
ssize_t
nread
;
int
i
,
rc
=
-
EINVAL
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
fs
=
get_fs
();
set_fs
(
get_ds
());
nef_filp
=
filp_open
(
"/root/nand_emu_file"
,
O_RDWR
|
O_LARGEFILE
,
0
);
if
(
IS_ERR
(
nef_filp
))
{
printk
(
KERN_ERR
"filp_open error: "
"Unable to open nand emu file!
\n
"
);
return
PTR_ERR
(
nef_filp
);
}
if
(
nef_filp
->
f_path
.
dentry
)
{
inode
=
nef_filp
->
f_path
.
dentry
->
d_inode
;
}
else
{
printk
(
KERN_ERR
"Can not get valid inode!
\n
"
);
goto
out
;
}
nef_size
=
i_size_read
(
inode
->
i_mapping
->
host
);
if
(
nef_size
<=
0
)
{
printk
(
KERN_ERR
"Invalid nand emu file size: "
"0x%llx
\n
"
,
nef_size
);
goto
out
;
}
else
{
nand_dbg_print
(
NAND_DBG_DEBUG
,
"nand emu file size: %lld
\n
"
,
nef_size
);
}
file_offset
=
0
;
for
(
i
=
0
;
i
<
GLOB_LLD_BLOCKS
*
GLOB_LLD_PAGES
;
i
++
)
{
tmp_file_offset
=
file_offset
;
nread
=
vfs_read
(
nef_filp
,
(
char
__user
*
)
flash_memory
[
i
],
GLOB_LLD_PAGE_SIZE
,
&
tmp_file_offset
);
if
(
nread
<
GLOB_LLD_PAGE_SIZE
)
{
printk
(
KERN_ERR
"%s, Line %d - "
"nand emu file partial read: "
"%d bytes
\n
"
,
__FILE__
,
__LINE__
,
(
int
)
nread
);
goto
out
;
}
file_offset
+=
GLOB_LLD_PAGE_SIZE
;
}
rc
=
0
;
out:
filp_close
(
nef_filp
,
current
->
files
);
set_fs
(
fs
);
return
rc
;
}
/* Write contents of flash_memory to nand emu file */
int
emu_write_mem_to_file
(
void
)
{
mm_segment_t
fs
;
struct
file
*
nef_filp
=
NULL
;
struct
inode
*
inode
=
NULL
;
loff_t
nef_size
=
0
;
loff_t
tmp_file_offset
,
file_offset
;
ssize_t
nwritten
;
int
i
,
rc
=
-
EINVAL
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
fs
=
get_fs
();
set_fs
(
get_ds
());
nef_filp
=
filp_open
(
"/root/nand_emu_file"
,
O_RDWR
|
O_LARGEFILE
,
0
);
if
(
IS_ERR
(
nef_filp
))
{
printk
(
KERN_ERR
"filp_open error: "
"Unable to open nand emu file!
\n
"
);
return
PTR_ERR
(
nef_filp
);
}
if
(
nef_filp
->
f_path
.
dentry
)
{
inode
=
nef_filp
->
f_path
.
dentry
->
d_inode
;
}
else
{
printk
(
KERN_ERR
"Invalid "
"nef_filp->f_path.dentry value!
\n
"
);
goto
out
;
}
nef_size
=
i_size_read
(
inode
->
i_mapping
->
host
);
if
(
nef_size
<=
0
)
{
printk
(
KERN_ERR
"Invalid "
"nand emu file size: 0x%llx
\n
"
,
nef_size
);
goto
out
;
}
else
{
nand_dbg_print
(
NAND_DBG_DEBUG
,
"nand emu file size: "
"%lld
\n
"
,
nef_size
);
}
file_offset
=
0
;
for
(
i
=
0
;
i
<
GLOB_LLD_BLOCKS
*
GLOB_LLD_PAGES
;
i
++
)
{
tmp_file_offset
=
file_offset
;
nwritten
=
vfs_write
(
nef_filp
,
(
char
__user
*
)
flash_memory
[
i
],
GLOB_LLD_PAGE_SIZE
,
&
tmp_file_offset
);
if
(
nwritten
<
GLOB_LLD_PAGE_SIZE
)
{
printk
(
KERN_ERR
"%s, Line %d - "
"nand emu file partial write: "
"%d bytes
\n
"
,
__FILE__
,
__LINE__
,
(
int
)
nwritten
);
goto
out
;
}
file_offset
+=
GLOB_LLD_PAGE_SIZE
;
}
rc
=
0
;
out:
filp_close
(
nef_filp
,
current
->
files
);
set_fs
(
fs
);
return
rc
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Flash_Init
* Inputs: none
* Outputs: PASS=0 (notice 0=ok here)
* Description: Creates & initializes the flash RAM array.
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Flash_Init
(
void
)
{
int
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
flash_memory
[
0
]
=
(
u8
*
)
vmalloc
(
GLOB_LLD_PAGE_SIZE
*
GLOB_LLD_BLOCKS
*
GLOB_LLD_PAGES
*
sizeof
(
u8
));
if
(
!
flash_memory
[
0
])
{
printk
(
KERN_ERR
"Fail to allocate memory "
"for nand emulator!
\n
"
);
return
ERR
;
}
memset
((
char
*
)(
flash_memory
[
0
]),
0xFF
,
GLOB_LLD_PAGE_SIZE
*
GLOB_LLD_BLOCKS
*
GLOB_LLD_PAGES
*
sizeof
(
u8
));
for
(
i
=
1
;
i
<
GLOB_LLD_BLOCKS
*
GLOB_LLD_PAGES
;
i
++
)
flash_memory
[
i
]
=
flash_memory
[
i
-
1
]
+
GLOB_LLD_PAGE_SIZE
;
emu_load_file_to_mem
();
/* Load nand emu file to mem */
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Flash_Release
* Inputs: none
* Outputs: PASS=0 (notice 0=ok here)
* Description: Releases the flash.
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
int
emu_Flash_Release
(
void
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
emu_write_mem_to_file
();
/* Write back mem to nand emu file */
vfree
(
flash_memory
[
0
]);
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Read_Device_ID
* Inputs: none
* Outputs: PASS=1 FAIL=0
* Description: Reads the info from the controller registers.
* Sets up DeviceInfo structure with device parameters
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Read_Device_ID
(
void
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
DeviceInfo
.
wDeviceMaker
=
0
;
DeviceInfo
.
wDeviceType
=
8
;
DeviceInfo
.
wSpectraStartBlock
=
36
;
DeviceInfo
.
wSpectraEndBlock
=
GLOB_LLD_BLOCKS
-
1
;
DeviceInfo
.
wTotalBlocks
=
GLOB_LLD_BLOCKS
;
DeviceInfo
.
wPagesPerBlock
=
GLOB_LLD_PAGES
;
DeviceInfo
.
wPageSize
=
GLOB_LLD_PAGE_SIZE
;
DeviceInfo
.
wPageDataSize
=
GLOB_LLD_PAGE_DATA_SIZE
;
DeviceInfo
.
wPageSpareSize
=
GLOB_LLD_PAGE_SIZE
-
GLOB_LLD_PAGE_DATA_SIZE
;
DeviceInfo
.
wBlockSize
=
DeviceInfo
.
wPageSize
*
GLOB_LLD_PAGES
;
DeviceInfo
.
wBlockDataSize
=
DeviceInfo
.
wPageDataSize
*
GLOB_LLD_PAGES
;
DeviceInfo
.
wDataBlockNum
=
(
u32
)
(
DeviceInfo
.
wSpectraEndBlock
-
DeviceInfo
.
wSpectraStartBlock
+
1
);
DeviceInfo
.
MLCDevice
=
1
;
/* Emulate MLC device */
DeviceInfo
.
nBitsInPageNumber
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wPagesPerBlock
);
DeviceInfo
.
nBitsInPageDataSize
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wPageDataSize
);
DeviceInfo
.
nBitsInBlockDataSize
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wBlockDataSize
);
#if CMD_DMA
totalUsedBanks
=
4
;
valid_banks
[
0
]
=
1
;
valid_banks
[
1
]
=
1
;
valid_banks
[
2
]
=
1
;
valid_banks
[
3
]
=
1
;
#endif
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Flash_Reset
* Inputs: none
* Outputs: PASS=0 (notice 0=ok here)
* Description: Reset the flash
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Flash_Reset
(
void
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Erase_Block
* Inputs: Address
* Outputs: PASS=0 (notice 0=ok here)
* Description: Erase a block
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Erase_Block
(
u32
block_add
)
{
int
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
block_add
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"emu_Erase_Block error! "
"Too big block address: %d
\n
"
,
block_add
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Erasing block %d
\n
"
,
(
int
)
block_add
);
for
(
i
=
block_add
*
GLOB_LLD_PAGES
;
i
<
((
block_add
+
1
)
*
GLOB_LLD_PAGES
);
i
++
)
{
if
(
flash_memory
[
i
])
{
memset
((
u8
*
)(
flash_memory
[
i
]),
0xFF
,
DeviceInfo
.
wPageSize
*
sizeof
(
u8
));
}
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Write_Page_Main
* Inputs: Write buffer address pointer
* Block number
* Page number
* Number of pages to process
* Outputs: PASS=0 (notice 0=ok here)
* Description: Write the data in the buffer to main area of flash
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Write_Page_Main
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
int
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
return
FAIL
;
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
return
FAIL
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"emu_Write_Page_Main: "
"lba %u Page %u PageCount %u
\n
"
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
,
(
unsigned
int
)
PageCount
);
for
(
i
=
0
;
i
<
PageCount
;
i
++
)
{
if
(
NULL
==
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
])
{
printk
(
KERN_ERR
"Run out of memory
\n
"
);
return
FAIL
;
}
memcpy
((
u8
*
)
(
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
]),
write_data
,
DeviceInfo
.
wPageDataSize
);
write_data
+=
DeviceInfo
.
wPageDataSize
;
Page
++
;
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Read_Page_Main
* Inputs: Read buffer address pointer
* Block number
* Page number
* Number of pages to process
* Outputs: PASS=0 (notice 0=ok here)
* Description: Read the data from the flash main area to the buffer
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Read_Page_Main
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
int
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
return
FAIL
;
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
return
FAIL
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"emu_Read_Page_Main: "
"lba %u Page %u PageCount %u
\n
"
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
,
(
unsigned
int
)
PageCount
);
for
(
i
=
0
;
i
<
PageCount
;
i
++
)
{
if
(
NULL
==
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
])
{
memset
(
read_data
,
0xFF
,
DeviceInfo
.
wPageDataSize
);
}
else
{
memcpy
(
read_data
,
(
u8
*
)
(
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
]),
DeviceInfo
.
wPageDataSize
);
}
read_data
+=
DeviceInfo
.
wPageDataSize
;
Page
++
;
}
return
PASS
;
}
#ifndef ELDORA
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Read_Page_Main_Spare
* Inputs: Write Buffer
* Address
* Buffer size
* Outputs: PASS=0 (notice 0=ok here)
* Description: Read from flash main+spare area
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
int
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"Read Page Main+Spare "
"Error: Block Address too big
\n
"
);
return
FAIL
;
}
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"Read Page Main+Spare "
"Error: Page number too big
\n
"
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Read Page Main + Spare - "
"No. of pages %u block %u start page %u
\n
"
,
(
unsigned
int
)
PageCount
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
);
for
(
i
=
0
;
i
<
PageCount
;
i
++
)
{
if
(
NULL
==
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
])
{
memset
(
read_data
,
0xFF
,
DeviceInfo
.
wPageSize
);
}
else
{
memcpy
(
read_data
,
(
u8
*
)
(
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
]),
DeviceInfo
.
wPageSize
);
}
read_data
+=
DeviceInfo
.
wPageSize
;
Page
++
;
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Write_Page_Main_Spare
* Inputs: Write buffer
* address
* buffer length
* Outputs: PASS=0 (notice 0=ok here)
* Description: Write the buffer to main+spare area of flash
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
page_count
)
{
u16
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"Write Page Main + Spare "
"Error: Block Address too big
\n
"
);
return
FAIL
;
}
if
(
Page
+
page_count
>
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"Write Page Main + Spare "
"Error: Page number too big
\n
"
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Write Page Main+Spare - "
"No. of pages %u block %u start page %u
\n
"
,
(
unsigned
int
)
page_count
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
);
for
(
i
=
0
;
i
<
page_count
;
i
++
)
{
if
(
NULL
==
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
])
{
printk
(
KERN_ERR
"Run out of memory!
\n
"
);
return
FAIL
;
}
memcpy
((
u8
*
)
(
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
]),
write_data
,
DeviceInfo
.
wPageSize
);
write_data
+=
DeviceInfo
.
wPageSize
;
Page
++
;
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Write_Page_Spare
* Inputs: Write buffer
* Address
* buffer size
* Outputs: PASS=0 (notice 0=ok here)
* Description: Write the buffer in the spare area
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Write_Page_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"Read Page Spare Error: "
"Block Address too big
\n
"
);
return
FAIL
;
}
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"Read Page Spare Error: "
"Page number too big
\n
"
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Write Page Spare- "
"block %u page %u
\n
"
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
);
if
(
NULL
==
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
])
{
printk
(
KERN_ERR
"Run out of memory!
\n
"
);
return
FAIL
;
}
memcpy
((
u8
*
)
(
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
]
+
DeviceInfo
.
wPageDataSize
),
write_data
,
(
DeviceInfo
.
wPageSize
-
DeviceInfo
.
wPageDataSize
));
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Read_Page_Spare
* Inputs: Write Buffer
* Address
* Buffer size
* Outputs: PASS=0 (notice 0=ok here)
* Description: Read data from the spare area
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_Read_Page_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"Read Page Spare "
"Error: Block Address too big
\n
"
);
return
FAIL
;
}
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"Read Page Spare "
"Error: Page number too big
\n
"
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Read Page Spare- "
"block %u page %u
\n
"
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
);
if
(
NULL
==
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
])
{
memset
(
write_data
,
0xFF
,
(
DeviceInfo
.
wPageSize
-
DeviceInfo
.
wPageDataSize
));
}
else
{
memcpy
(
write_data
,
(
u8
*
)
(
flash_memory
[
Block
*
GLOB_LLD_PAGES
+
Page
]
+
DeviceInfo
.
wPageDataSize
),
(
DeviceInfo
.
wPageSize
-
DeviceInfo
.
wPageDataSize
));
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Enable_Disable_Interrupts
* Inputs: enable or disable
* Outputs: none
* Description: NOP
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
void
emu_Enable_Disable_Interrupts
(
u16
INT_ENABLE
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
}
u16
emu_Get_Bad_Block
(
u32
block
)
{
return
0
;
}
#if CMD_DMA
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Support for CDMA functions
************************************
* emu_CDMA_Flash_Init
* CDMA_process_data command (use LLD_CDMA)
* CDMA_MemCopy_CMD (use LLD_CDMA)
* emu_CDMA_execute all commands
* emu_CDMA_Event_Status
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_CDMA_Flash_Init
(
void
)
{
u16
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
for
(
i
=
0
;
i
<
MAX_DESCS
+
MAX_CHANS
;
i
++
)
{
PendingCMD
[
i
].
CMD
=
0
;
PendingCMD
[
i
].
Tag
=
0
;
PendingCMD
[
i
].
DataAddr
=
0
;
PendingCMD
[
i
].
Block
=
0
;
PendingCMD
[
i
].
Page
=
0
;
PendingCMD
[
i
].
PageCount
=
0
;
PendingCMD
[
i
].
DataDestAddr
=
0
;
PendingCMD
[
i
].
DataSrcAddr
=
0
;
PendingCMD
[
i
].
MemCopyByteCnt
=
0
;
PendingCMD
[
i
].
ChanSync
[
0
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
1
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
2
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
3
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
4
]
=
0
;
PendingCMD
[
i
].
Status
=
3
;
}
return
PASS
;
}
static
void
emu_isr
(
int
irq
,
void
*
dev_id
)
{
/* TODO: ... */
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: CDMA_Execute_CMDs
* Inputs: tag_count: the number of pending cmds to do
* Outputs: PASS/FAIL
* Description: execute each command in the pending CMD array
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_CDMA_Execute_CMDs
(
u16
tag_count
)
{
u16
i
,
j
;
u8
CMD
;
/* cmd parameter */
u8
*
data
;
u32
block
;
u16
page
;
u16
count
;
u16
status
=
PASS
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
nand_dbg_print
(
NAND_DBG_TRACE
,
"At start of Execute CMDs: "
"Tag Count %u
\n
"
,
tag_count
);
for
(
i
=
0
;
i
<
totalUsedBanks
;
i
++
)
{
PendingCMD
[
i
].
CMD
=
DUMMY_CMD
;
PendingCMD
[
i
].
Tag
=
0xFF
;
PendingCMD
[
i
].
Block
=
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
)
*
i
;
for
(
j
=
0
;
j
<=
MAX_CHANS
;
j
++
)
PendingCMD
[
i
].
ChanSync
[
j
]
=
0
;
}
CDMA_Execute_CMDs
(
tag_count
);
print_pending_cmds
(
tag_count
);
#if DEBUG_SYNC
}
debug_sync_cnt
++
;
#endif
for
(
i
=
MAX_CHANS
;
i
<
tag_count
+
MAX_CHANS
;
i
++
)
{
CMD
=
PendingCMD
[
i
].
CMD
;
data
=
PendingCMD
[
i
].
DataAddr
;
block
=
PendingCMD
[
i
].
Block
;
page
=
PendingCMD
[
i
].
Page
;
count
=
PendingCMD
[
i
].
PageCount
;
switch
(
CMD
)
{
case
ERASE_CMD
:
emu_Erase_Block
(
block
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
WRITE_MAIN_CMD
:
emu_Write_Page_Main
(
data
,
block
,
page
,
count
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
WRITE_MAIN_SPARE_CMD
:
emu_Write_Page_Main_Spare
(
data
,
block
,
page
,
count
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
READ_MAIN_CMD
:
emu_Read_Page_Main
(
data
,
block
,
page
,
count
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
MEMCOPY_CMD
:
memcpy
(
PendingCMD
[
i
].
DataDestAddr
,
PendingCMD
[
i
].
DataSrcAddr
,
PendingCMD
[
i
].
MemCopyByteCnt
);
case
DUMMY_CMD
:
PendingCMD
[
i
].
Status
=
PASS
;
break
;
default:
PendingCMD
[
i
].
Status
=
FAIL
;
break
;
}
}
/*
* Temperory adding code to reset PendingCMD array for basic testing.
* It should be done at the end of event status function.
*/
for
(
i
=
tag_count
+
MAX_CHANS
;
i
<
MAX_DESCS
;
i
++
)
{
PendingCMD
[
i
].
CMD
=
0
;
PendingCMD
[
i
].
Tag
=
0
;
PendingCMD
[
i
].
DataAddr
=
0
;
PendingCMD
[
i
].
Block
=
0
;
PendingCMD
[
i
].
Page
=
0
;
PendingCMD
[
i
].
PageCount
=
0
;
PendingCMD
[
i
].
DataDestAddr
=
0
;
PendingCMD
[
i
].
DataSrcAddr
=
0
;
PendingCMD
[
i
].
MemCopyByteCnt
=
0
;
PendingCMD
[
i
].
ChanSync
[
0
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
1
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
2
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
3
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
4
]
=
0
;
PendingCMD
[
i
].
Status
=
CMD_NOT_DONE
;
}
nand_dbg_print
(
NAND_DBG_TRACE
,
"At end of Execute CMDs.
\n
"
);
emu_isr
(
0
,
0
);
/* This is a null isr now. Need fill it in future */
return
status
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: emu_Event_Status
* Inputs: none
* Outputs: Event_Status code
* Description: This function can also be used to force errors
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
emu_CDMA_Event_Status
(
void
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
EVENT_PASS
;
}
#endif
/* CMD_DMA */
#endif
/* !ELDORA */
#endif
/* FLASH_EMU */
drivers/staging/spectra/lld_emu.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _LLD_EMU_
#define _LLD_EMU_
#include "ffsport.h"
#include "ffsdefs.h"
/* prototypes: emulator API functions */
extern
u16
emu_Flash_Reset
(
void
);
extern
u16
emu_Flash_Init
(
void
);
extern
int
emu_Flash_Release
(
void
);
extern
u16
emu_Read_Device_ID
(
void
);
extern
u16
emu_Erase_Block
(
u32
block_addr
);
extern
u16
emu_Write_Page_Main
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
emu_Read_Page_Main
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
emu_Event_Status
(
void
);
extern
void
emu_Enable_Disable_Interrupts
(
u16
INT_ENABLE
);
extern
u16
emu_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
emu_Write_Page_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
emu_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
emu_Read_Page_Spare
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
emu_Get_Bad_Block
(
u32
block
);
u16
emu_CDMA_Flash_Init
(
void
);
u16
emu_CDMA_Execute_CMDs
(
u16
tag_count
);
u16
emu_CDMA_Event_Status
(
void
);
#endif
/*_LLD_EMU_*/
drivers/staging/spectra/lld_mtd.c
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include "flash.h"
#include "ffsdefs.h"
#include "lld_emu.h"
#include "lld.h"
#if CMD_DMA
#include "lld_cdma.h"
#endif
#define GLOB_LLD_PAGES 64
#define GLOB_LLD_PAGE_SIZE (512+16)
#define GLOB_LLD_PAGE_DATA_SIZE 512
#define GLOB_LLD_BLOCKS 2048
#if CMD_DMA
#include "lld_cdma.h"
u32
totalUsedBanks
;
u32
valid_banks
[
MAX_CHANS
];
#endif
static
struct
mtd_info
*
spectra_mtd
;
static
int
mtddev
=
-
1
;
module_param
(
mtddev
,
int
,
0
);
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Flash_Init
* Inputs: none
* Outputs: PASS=0 (notice 0=ok here)
* Description: Creates & initializes the flash RAM array.
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Flash_Init
(
void
)
{
if
(
mtddev
==
-
1
)
{
printk
(
KERN_ERR
"No MTD device specified. Give mtddev parameter
\n
"
);
return
FAIL
;
}
spectra_mtd
=
get_mtd_device
(
NULL
,
mtddev
);
if
(
!
spectra_mtd
)
{
printk
(
KERN_ERR
"Failed to obtain MTD device #%d
\n
"
,
mtddev
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Flash_Release
* Inputs: none
* Outputs: PASS=0 (notice 0=ok here)
* Description: Releases the flash.
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
int
mtd_Flash_Release
(
void
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
!
spectra_mtd
)
return
PASS
;
put_mtd_device
(
spectra_mtd
);
spectra_mtd
=
NULL
;
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Read_Device_ID
* Inputs: none
* Outputs: PASS=1 FAIL=0
* Description: Reads the info from the controller registers.
* Sets up DeviceInfo structure with device parameters
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Read_Device_ID
(
void
)
{
uint64_t
tmp
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
!
spectra_mtd
)
return
FAIL
;
DeviceInfo
.
wDeviceMaker
=
0
;
DeviceInfo
.
wDeviceType
=
8
;
DeviceInfo
.
wSpectraStartBlock
=
SPECTRA_START_BLOCK
;
tmp
=
spectra_mtd
->
size
;
do_div
(
tmp
,
spectra_mtd
->
erasesize
);
DeviceInfo
.
wTotalBlocks
=
tmp
;
DeviceInfo
.
wSpectraEndBlock
=
DeviceInfo
.
wTotalBlocks
-
1
;
DeviceInfo
.
wPagesPerBlock
=
spectra_mtd
->
erasesize
/
spectra_mtd
->
writesize
;
DeviceInfo
.
wPageSize
=
spectra_mtd
->
writesize
+
spectra_mtd
->
oobsize
;
DeviceInfo
.
wPageDataSize
=
spectra_mtd
->
writesize
;
DeviceInfo
.
wPageSpareSize
=
spectra_mtd
->
oobsize
;
DeviceInfo
.
wBlockSize
=
DeviceInfo
.
wPageSize
*
DeviceInfo
.
wPagesPerBlock
;
DeviceInfo
.
wBlockDataSize
=
DeviceInfo
.
wPageDataSize
*
DeviceInfo
.
wPagesPerBlock
;
DeviceInfo
.
wDataBlockNum
=
(
u32
)
(
DeviceInfo
.
wSpectraEndBlock
-
DeviceInfo
.
wSpectraStartBlock
+
1
);
DeviceInfo
.
MLCDevice
=
0
;
//spectra_mtd->celltype & NAND_CI_CELLTYPE_MSK;
DeviceInfo
.
nBitsInPageNumber
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wPagesPerBlock
);
DeviceInfo
.
nBitsInPageDataSize
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wPageDataSize
);
DeviceInfo
.
nBitsInBlockDataSize
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wBlockDataSize
);
#if CMD_DMA
totalUsedBanks
=
4
;
valid_banks
[
0
]
=
1
;
valid_banks
[
1
]
=
1
;
valid_banks
[
2
]
=
1
;
valid_banks
[
3
]
=
1
;
#endif
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Flash_Reset
* Inputs: none
* Outputs: PASS=0 (notice 0=ok here)
* Description: Reset the flash
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Flash_Reset
(
void
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
PASS
;
}
void
erase_callback
(
struct
erase_info
*
e
)
{
complete
((
void
*
)
e
->
priv
);
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Erase_Block
* Inputs: Address
* Outputs: PASS=0 (notice 0=ok here)
* Description: Erase a block
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Erase_Block
(
u32
block_add
)
{
struct
erase_info
erase
;
DECLARE_COMPLETION_ONSTACK
(
comp
);
int
ret
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
block_add
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"mtd_Erase_Block error! "
"Too big block address: %d
\n
"
,
block_add
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Erasing block %d
\n
"
,
(
int
)
block_add
);
erase
.
mtd
=
spectra_mtd
;
erase
.
callback
=
erase_callback
;
erase
.
addr
=
block_add
*
spectra_mtd
->
erasesize
;
erase
.
len
=
spectra_mtd
->
erasesize
;
erase
.
priv
=
(
unsigned
long
)
&
comp
;
ret
=
spectra_mtd
->
erase
(
spectra_mtd
,
&
erase
);
if
(
!
ret
)
{
wait_for_completion
(
&
comp
);
if
(
erase
.
state
!=
MTD_ERASE_DONE
)
ret
=
-
EIO
;
}
if
(
ret
)
{
printk
(
KERN_WARNING
"mtd_Erase_Block error! "
"erase of region [0x%llx, 0x%llx] failed
\n
"
,
erase
.
addr
,
erase
.
len
);
return
FAIL
;
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Write_Page_Main
* Inputs: Write buffer address pointer
* Block number
* Page number
* Number of pages to process
* Outputs: PASS=0 (notice 0=ok here)
* Description: Write the data in the buffer to main area of flash
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Write_Page_Main
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
size_t
retlen
;
int
ret
=
0
;
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
return
FAIL
;
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
return
FAIL
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"mtd_Write_Page_Main: "
"lba %u Page %u PageCount %u
\n
"
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
,
(
unsigned
int
)
PageCount
);
while
(
PageCount
)
{
ret
=
spectra_mtd
->
write
(
spectra_mtd
,
(
Block
*
spectra_mtd
->
erasesize
)
+
(
Page
*
spectra_mtd
->
writesize
),
DeviceInfo
.
wPageDataSize
,
&
retlen
,
write_data
);
if
(
ret
)
{
printk
(
KERN_ERR
"%s failed %d
\n
"
,
__func__
,
ret
);
return
FAIL
;
}
write_data
+=
DeviceInfo
.
wPageDataSize
;
Page
++
;
PageCount
--
;
}
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Read_Page_Main
* Inputs: Read buffer address pointer
* Block number
* Page number
* Number of pages to process
* Outputs: PASS=0 (notice 0=ok here)
* Description: Read the data from the flash main area to the buffer
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Read_Page_Main
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
size_t
retlen
;
int
ret
=
0
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
return
FAIL
;
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
return
FAIL
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"mtd_Read_Page_Main: "
"lba %u Page %u PageCount %u
\n
"
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
,
(
unsigned
int
)
PageCount
);
while
(
PageCount
)
{
ret
=
spectra_mtd
->
read
(
spectra_mtd
,
(
Block
*
spectra_mtd
->
erasesize
)
+
(
Page
*
spectra_mtd
->
writesize
),
DeviceInfo
.
wPageDataSize
,
&
retlen
,
read_data
);
if
(
ret
)
{
printk
(
KERN_ERR
"%s failed %d
\n
"
,
__func__
,
ret
);
return
FAIL
;
}
read_data
+=
DeviceInfo
.
wPageDataSize
;
Page
++
;
PageCount
--
;
}
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
PASS
;
}
#ifndef ELDORA
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Read_Page_Main_Spare
* Inputs: Write Buffer
* Address
* Buffer size
* Outputs: PASS=0 (notice 0=ok here)
* Description: Read from flash main+spare area
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"Read Page Main+Spare "
"Error: Block Address too big
\n
"
);
return
FAIL
;
}
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"Read Page Main+Spare "
"Error: Page number %d+%d too big in block %d
\n
"
,
Page
,
PageCount
,
Block
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Read Page Main + Spare - "
"No. of pages %u block %u start page %u
\n
"
,
(
unsigned
int
)
PageCount
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
);
while
(
PageCount
)
{
struct
mtd_oob_ops
ops
;
int
ret
;
ops
.
mode
=
MTD_OOB_AUTO
;
ops
.
datbuf
=
read_data
;
ops
.
len
=
DeviceInfo
.
wPageDataSize
;
ops
.
oobbuf
=
read_data
+
DeviceInfo
.
wPageDataSize
+
BTSIG_OFFSET
;
ops
.
ooblen
=
BTSIG_BYTES
;
ops
.
ooboffs
=
0
;
ret
=
spectra_mtd
->
read_oob
(
spectra_mtd
,
(
Block
*
spectra_mtd
->
erasesize
)
+
(
Page
*
spectra_mtd
->
writesize
),
&
ops
);
if
(
ret
)
{
printk
(
KERN_ERR
"%s failed %d
\n
"
,
__func__
,
ret
);
return
FAIL
;
}
read_data
+=
DeviceInfo
.
wPageSize
;
Page
++
;
PageCount
--
;
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Write_Page_Main_Spare
* Inputs: Write buffer
* address
* buffer length
* Outputs: PASS=0 (notice 0=ok here)
* Description: Write the buffer to main+spare area of flash
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
page_count
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"Write Page Main + Spare "
"Error: Block Address too big
\n
"
);
return
FAIL
;
}
if
(
Page
+
page_count
>
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"Write Page Main + Spare "
"Error: Page number %d+%d too big in block %d
\n
"
,
Page
,
page_count
,
Block
);
WARN_ON
(
1
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Write Page Main+Spare - "
"No. of pages %u block %u start page %u
\n
"
,
(
unsigned
int
)
page_count
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
);
while
(
page_count
)
{
struct
mtd_oob_ops
ops
;
int
ret
;
ops
.
mode
=
MTD_OOB_AUTO
;
ops
.
datbuf
=
write_data
;
ops
.
len
=
DeviceInfo
.
wPageDataSize
;
ops
.
oobbuf
=
write_data
+
DeviceInfo
.
wPageDataSize
+
BTSIG_OFFSET
;
ops
.
ooblen
=
BTSIG_BYTES
;
ops
.
ooboffs
=
0
;
ret
=
spectra_mtd
->
write_oob
(
spectra_mtd
,
(
Block
*
spectra_mtd
->
erasesize
)
+
(
Page
*
spectra_mtd
->
writesize
),
&
ops
);
if
(
ret
)
{
printk
(
KERN_ERR
"%s failed %d
\n
"
,
__func__
,
ret
);
return
FAIL
;
}
write_data
+=
DeviceInfo
.
wPageSize
;
Page
++
;
page_count
--
;
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Write_Page_Spare
* Inputs: Write buffer
* Address
* buffer size
* Outputs: PASS=0 (notice 0=ok here)
* Description: Write the buffer in the spare area
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Write_Page_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
WARN_ON
(
1
);
return
FAIL
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Read_Page_Spare
* Inputs: Write Buffer
* Address
* Buffer size
* Outputs: PASS=0 (notice 0=ok here)
* Description: Read data from the spare area
*
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_Read_Page_Spare
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
Block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"Read Page Spare "
"Error: Block Address too big
\n
"
);
return
FAIL
;
}
if
(
Page
+
PageCount
>
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"Read Page Spare "
"Error: Page number too big
\n
"
);
return
FAIL
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Read Page Spare- "
"block %u page %u (%u pages)
\n
"
,
(
unsigned
int
)
Block
,
(
unsigned
int
)
Page
,
PageCount
);
while
(
PageCount
)
{
struct
mtd_oob_ops
ops
;
int
ret
;
ops
.
mode
=
MTD_OOB_AUTO
;
ops
.
datbuf
=
NULL
;
ops
.
len
=
0
;
ops
.
oobbuf
=
read_data
;
ops
.
ooblen
=
BTSIG_BYTES
;
ops
.
ooboffs
=
0
;
ret
=
spectra_mtd
->
read_oob
(
spectra_mtd
,
(
Block
*
spectra_mtd
->
erasesize
)
+
(
Page
*
spectra_mtd
->
writesize
),
&
ops
);
if
(
ret
)
{
printk
(
KERN_ERR
"%s failed %d
\n
"
,
__func__
,
ret
);
return
FAIL
;
}
read_data
+=
DeviceInfo
.
wPageSize
;
Page
++
;
PageCount
--
;
}
return
PASS
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Enable_Disable_Interrupts
* Inputs: enable or disable
* Outputs: none
* Description: NOP
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
void
mtd_Enable_Disable_Interrupts
(
u16
INT_ENABLE
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
}
u16
mtd_Get_Bad_Block
(
u32
block
)
{
return
0
;
}
#if CMD_DMA
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Support for CDMA functions
************************************
* mtd_CDMA_Flash_Init
* CDMA_process_data command (use LLD_CDMA)
* CDMA_MemCopy_CMD (use LLD_CDMA)
* mtd_CDMA_execute all commands
* mtd_CDMA_Event_Status
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_CDMA_Flash_Init
(
void
)
{
u16
i
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
for
(
i
=
0
;
i
<
MAX_DESCS
+
MAX_CHANS
;
i
++
)
{
PendingCMD
[
i
].
CMD
=
0
;
PendingCMD
[
i
].
Tag
=
0
;
PendingCMD
[
i
].
DataAddr
=
0
;
PendingCMD
[
i
].
Block
=
0
;
PendingCMD
[
i
].
Page
=
0
;
PendingCMD
[
i
].
PageCount
=
0
;
PendingCMD
[
i
].
DataDestAddr
=
0
;
PendingCMD
[
i
].
DataSrcAddr
=
0
;
PendingCMD
[
i
].
MemCopyByteCnt
=
0
;
PendingCMD
[
i
].
ChanSync
[
0
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
1
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
2
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
3
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
4
]
=
0
;
PendingCMD
[
i
].
Status
=
3
;
}
return
PASS
;
}
static
void
mtd_isr
(
int
irq
,
void
*
dev_id
)
{
/* TODO: ... */
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: CDMA_Execute_CMDs
* Inputs: tag_count: the number of pending cmds to do
* Outputs: PASS/FAIL
* Description: execute each command in the pending CMD array
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_CDMA_Execute_CMDs
(
u16
tag_count
)
{
u16
i
,
j
;
u8
CMD
;
/* cmd parameter */
u8
*
data
;
u32
block
;
u16
page
;
u16
count
;
u16
status
=
PASS
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
nand_dbg_print
(
NAND_DBG_TRACE
,
"At start of Execute CMDs: "
"Tag Count %u
\n
"
,
tag_count
);
for
(
i
=
0
;
i
<
totalUsedBanks
;
i
++
)
{
PendingCMD
[
i
].
CMD
=
DUMMY_CMD
;
PendingCMD
[
i
].
Tag
=
0xFF
;
PendingCMD
[
i
].
Block
=
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
)
*
i
;
for
(
j
=
0
;
j
<=
MAX_CHANS
;
j
++
)
PendingCMD
[
i
].
ChanSync
[
j
]
=
0
;
}
CDMA_Execute_CMDs
(
tag_count
);
#ifdef VERBOSE
print_pending_cmds
(
tag_count
);
#endif
#if DEBUG_SYNC
}
debug_sync_cnt
++
;
#endif
for
(
i
=
MAX_CHANS
;
i
<
tag_count
+
MAX_CHANS
;
i
++
)
{
CMD
=
PendingCMD
[
i
].
CMD
;
data
=
PendingCMD
[
i
].
DataAddr
;
block
=
PendingCMD
[
i
].
Block
;
page
=
PendingCMD
[
i
].
Page
;
count
=
PendingCMD
[
i
].
PageCount
;
switch
(
CMD
)
{
case
ERASE_CMD
:
mtd_Erase_Block
(
block
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
WRITE_MAIN_CMD
:
mtd_Write_Page_Main
(
data
,
block
,
page
,
count
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
WRITE_MAIN_SPARE_CMD
:
mtd_Write_Page_Main_Spare
(
data
,
block
,
page
,
count
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
READ_MAIN_CMD
:
mtd_Read_Page_Main
(
data
,
block
,
page
,
count
);
PendingCMD
[
i
].
Status
=
PASS
;
break
;
case
MEMCOPY_CMD
:
memcpy
(
PendingCMD
[
i
].
DataDestAddr
,
PendingCMD
[
i
].
DataSrcAddr
,
PendingCMD
[
i
].
MemCopyByteCnt
);
case
DUMMY_CMD
:
PendingCMD
[
i
].
Status
=
PASS
;
break
;
default:
PendingCMD
[
i
].
Status
=
FAIL
;
break
;
}
}
/*
* Temperory adding code to reset PendingCMD array for basic testing.
* It should be done at the end of event status function.
*/
for
(
i
=
tag_count
+
MAX_CHANS
;
i
<
MAX_DESCS
;
i
++
)
{
PendingCMD
[
i
].
CMD
=
0
;
PendingCMD
[
i
].
Tag
=
0
;
PendingCMD
[
i
].
DataAddr
=
0
;
PendingCMD
[
i
].
Block
=
0
;
PendingCMD
[
i
].
Page
=
0
;
PendingCMD
[
i
].
PageCount
=
0
;
PendingCMD
[
i
].
DataDestAddr
=
0
;
PendingCMD
[
i
].
DataSrcAddr
=
0
;
PendingCMD
[
i
].
MemCopyByteCnt
=
0
;
PendingCMD
[
i
].
ChanSync
[
0
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
1
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
2
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
3
]
=
0
;
PendingCMD
[
i
].
ChanSync
[
4
]
=
0
;
PendingCMD
[
i
].
Status
=
CMD_NOT_DONE
;
}
nand_dbg_print
(
NAND_DBG_TRACE
,
"At end of Execute CMDs.
\n
"
);
mtd_isr
(
0
,
0
);
/* This is a null isr now. Need fill it in future */
return
status
;
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
* Function: mtd_Event_Status
* Inputs: none
* Outputs: Event_Status code
* Description: This function can also be used to force errors
*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
u16
mtd_CDMA_Event_Status
(
void
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
return
EVENT_PASS
;
}
#endif
/* CMD_DMA */
#endif
/* !ELDORA */
drivers/staging/spectra/lld_mtd.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _LLD_MTD_
#define _LLD_MTD_
#include "ffsport.h"
#include "ffsdefs.h"
/* prototypes: MTD API functions */
extern
u16
mtd_Flash_Reset
(
void
);
extern
u16
mtd_Flash_Init
(
void
);
extern
int
mtd_Flash_Release
(
void
);
extern
u16
mtd_Read_Device_ID
(
void
);
extern
u16
mtd_Erase_Block
(
u32
block_addr
);
extern
u16
mtd_Write_Page_Main
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
mtd_Read_Page_Main
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
mtd_Event_Status
(
void
);
extern
void
mtd_Enable_Disable_Interrupts
(
u16
INT_ENABLE
);
extern
u16
mtd_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
mtd_Write_Page_Spare
(
u8
*
write_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
mtd_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
mtd_Read_Page_Spare
(
u8
*
read_data
,
u32
Block
,
u16
Page
,
u16
PageCount
);
extern
u16
mtd_Get_Bad_Block
(
u32
block
);
u16
mtd_CDMA_Flash_Init
(
void
);
u16
mtd_CDMA_Execute_CMDs
(
u16
tag_count
);
u16
mtd_CDMA_Event_Status
(
void
);
#endif
/*_LLD_MTD_*/
drivers/staging/spectra/lld_nand.c
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include "lld.h"
#include "lld_nand.h"
#include "lld_cdma.h"
#include "spectraswconfig.h"
#include "flash.h"
#include "ffsdefs.h"
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include "nand_regs.h"
#define SPECTRA_NAND_NAME "nd"
#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
#define MAX_PAGES_PER_RW 128
#define INT_IDLE_STATE 0
#define INT_READ_PAGE_MAIN 0x01
#define INT_WRITE_PAGE_MAIN 0x02
#define INT_PIPELINE_READ_AHEAD 0x04
#define INT_PIPELINE_WRITE_AHEAD 0x08
#define INT_MULTI_PLANE_READ 0x10
#define INT_MULTI_PLANE_WRITE 0x11
static
u32
enable_ecc
;
struct
mrst_nand_info
info
;
int
totalUsedBanks
;
u32
GLOB_valid_banks
[
LLD_MAX_FLASH_BANKS
];
void
__iomem
*
FlashReg
;
void
__iomem
*
FlashMem
;
u16
conf_parameters
[]
=
{
0x0000
,
0x0000
,
0x01F4
,
0x01F4
,
0x01F4
,
0x01F4
,
0x0000
,
0x0000
,
0x0001
,
0x0000
,
0x0000
,
0x0000
,
0x0000
,
0x0040
,
0x0001
,
0x000A
,
0x000A
,
0x000A
,
0x0000
,
0x0000
,
0x0005
,
0x0012
,
0x000C
};
u16
NAND_Get_Bad_Block
(
u32
block
)
{
u32
status
=
PASS
;
u32
flag_bytes
=
0
;
u32
skip_bytes
=
DeviceInfo
.
wSpareSkipBytes
;
u32
page
,
i
;
u8
*
pReadSpareBuf
=
buf_get_bad_block
;
if
(
enable_ecc
)
flag_bytes
=
DeviceInfo
.
wNumPageSpareFlag
;
for
(
page
=
0
;
page
<
2
;
page
++
)
{
status
=
NAND_Read_Page_Spare
(
pReadSpareBuf
,
block
,
page
,
1
);
if
(
status
!=
PASS
)
return
READ_ERROR
;
for
(
i
=
flag_bytes
;
i
<
(
flag_bytes
+
skip_bytes
);
i
++
)
if
(
pReadSpareBuf
[
i
]
!=
0xff
)
return
DEFECTIVE_BLOCK
;
}
for
(
page
=
1
;
page
<
3
;
page
++
)
{
status
=
NAND_Read_Page_Spare
(
pReadSpareBuf
,
block
,
DeviceInfo
.
wPagesPerBlock
-
page
,
1
);
if
(
status
!=
PASS
)
return
READ_ERROR
;
for
(
i
=
flag_bytes
;
i
<
(
flag_bytes
+
skip_bytes
);
i
++
)
if
(
pReadSpareBuf
[
i
]
!=
0xff
)
return
DEFECTIVE_BLOCK
;
}
return
GOOD_BLOCK
;
}
u16
NAND_Flash_Reset
(
void
)
{
u32
i
;
u32
intr_status_rst_comp
[
4
]
=
{
INTR_STATUS0__RST_COMP
,
INTR_STATUS1__RST_COMP
,
INTR_STATUS2__RST_COMP
,
INTR_STATUS3__RST_COMP
};
u32
intr_status_time_out
[
4
]
=
{
INTR_STATUS0__TIME_OUT
,
INTR_STATUS1__TIME_OUT
,
INTR_STATUS2__TIME_OUT
,
INTR_STATUS3__TIME_OUT
};
u32
intr_status
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u32
device_reset_banks
[
4
]
=
{
DEVICE_RESET__BANK0
,
DEVICE_RESET__BANK1
,
DEVICE_RESET__BANK2
,
DEVICE_RESET__BANK3
};
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
for
(
i
=
0
;
i
<
LLD_MAX_FLASH_BANKS
;
i
++
)
iowrite32
(
intr_status_rst_comp
[
i
]
|
intr_status_time_out
[
i
],
FlashReg
+
intr_status
[
i
]);
for
(
i
=
0
;
i
<
LLD_MAX_FLASH_BANKS
;
i
++
)
{
iowrite32
(
device_reset_banks
[
i
],
FlashReg
+
DEVICE_RESET
);
while
(
!
(
ioread32
(
FlashReg
+
intr_status
[
i
])
&
(
intr_status_rst_comp
[
i
]
|
intr_status_time_out
[
i
])))
;
if
(
ioread32
(
FlashReg
+
intr_status
[
i
])
&
intr_status_time_out
[
i
])
nand_dbg_print
(
NAND_DBG_WARN
,
"NAND Reset operation timed out on bank %d
\n
"
,
i
);
}
for
(
i
=
0
;
i
<
LLD_MAX_FLASH_BANKS
;
i
++
)
iowrite32
(
intr_status_rst_comp
[
i
]
|
intr_status_time_out
[
i
],
FlashReg
+
intr_status
[
i
]);
return
PASS
;
}
static
void
NAND_ONFi_Timing_Mode
(
u16
mode
)
{
u16
Trea
[
6
]
=
{
40
,
30
,
25
,
20
,
20
,
16
};
u16
Trp
[
6
]
=
{
50
,
25
,
17
,
15
,
12
,
10
};
u16
Treh
[
6
]
=
{
30
,
15
,
15
,
10
,
10
,
7
};
u16
Trc
[
6
]
=
{
100
,
50
,
35
,
30
,
25
,
20
};
u16
Trhoh
[
6
]
=
{
0
,
15
,
15
,
15
,
15
,
15
};
u16
Trloh
[
6
]
=
{
0
,
0
,
0
,
0
,
5
,
5
};
u16
Tcea
[
6
]
=
{
100
,
45
,
30
,
25
,
25
,
25
};
u16
Tadl
[
6
]
=
{
200
,
100
,
100
,
100
,
70
,
70
};
u16
Trhw
[
6
]
=
{
200
,
100
,
100
,
100
,
100
,
100
};
u16
Trhz
[
6
]
=
{
200
,
100
,
100
,
100
,
100
,
100
};
u16
Twhr
[
6
]
=
{
120
,
80
,
80
,
60
,
60
,
60
};
u16
Tcs
[
6
]
=
{
70
,
35
,
25
,
25
,
20
,
15
};
u16
TclsRising
=
1
;
u16
data_invalid_rhoh
,
data_invalid_rloh
,
data_invalid
;
u16
dv_window
=
0
;
u16
en_lo
,
en_hi
;
u16
acc_clks
;
u16
addr_2_data
,
re_2_we
,
re_2_re
,
we_2_re
,
cs_cnt
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
en_lo
=
CEIL_DIV
(
Trp
[
mode
],
CLK_X
);
en_hi
=
CEIL_DIV
(
Treh
[
mode
],
CLK_X
);
#if ONFI_BLOOM_TIME
if
((
en_hi
*
CLK_X
)
<
(
Treh
[
mode
]
+
2
))
en_hi
++
;
#endif
if
((
en_lo
+
en_hi
)
*
CLK_X
<
Trc
[
mode
])
en_lo
+=
CEIL_DIV
((
Trc
[
mode
]
-
(
en_lo
+
en_hi
)
*
CLK_X
),
CLK_X
);
if
((
en_lo
+
en_hi
)
<
CLK_MULTI
)
en_lo
+=
CLK_MULTI
-
en_lo
-
en_hi
;
while
(
dv_window
<
8
)
{
data_invalid_rhoh
=
en_lo
*
CLK_X
+
Trhoh
[
mode
];
data_invalid_rloh
=
(
en_lo
+
en_hi
)
*
CLK_X
+
Trloh
[
mode
];
data_invalid
=
data_invalid_rhoh
<
data_invalid_rloh
?
data_invalid_rhoh
:
data_invalid_rloh
;
dv_window
=
data_invalid
-
Trea
[
mode
];
if
(
dv_window
<
8
)
en_lo
++
;
}
acc_clks
=
CEIL_DIV
(
Trea
[
mode
],
CLK_X
);
while
(((
acc_clks
*
CLK_X
)
-
Trea
[
mode
])
<
3
)
acc_clks
++
;
if
((
data_invalid
-
acc_clks
*
CLK_X
)
<
2
)
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d: Warning!
\n
"
,
__FILE__
,
__LINE__
);
addr_2_data
=
CEIL_DIV
(
Tadl
[
mode
],
CLK_X
);
re_2_we
=
CEIL_DIV
(
Trhw
[
mode
],
CLK_X
);
re_2_re
=
CEIL_DIV
(
Trhz
[
mode
],
CLK_X
);
we_2_re
=
CEIL_DIV
(
Twhr
[
mode
],
CLK_X
);
cs_cnt
=
CEIL_DIV
((
Tcs
[
mode
]
-
Trp
[
mode
]),
CLK_X
);
if
(
!
TclsRising
)
cs_cnt
=
CEIL_DIV
(
Tcs
[
mode
],
CLK_X
);
if
(
cs_cnt
==
0
)
cs_cnt
=
1
;
if
(
Tcea
[
mode
])
{
while
(((
cs_cnt
*
CLK_X
)
+
Trea
[
mode
])
<
Tcea
[
mode
])
cs_cnt
++
;
}
#if MODE5_WORKAROUND
if
(
mode
==
5
)
acc_clks
=
5
;
#endif
/* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
if
((
ioread32
(
FlashReg
+
MANUFACTURER_ID
)
==
0
)
&&
(
ioread32
(
FlashReg
+
DEVICE_ID
)
==
0x88
))
acc_clks
=
6
;
iowrite32
(
acc_clks
,
FlashReg
+
ACC_CLKS
);
iowrite32
(
re_2_we
,
FlashReg
+
RE_2_WE
);
iowrite32
(
re_2_re
,
FlashReg
+
RE_2_RE
);
iowrite32
(
we_2_re
,
FlashReg
+
WE_2_RE
);
iowrite32
(
addr_2_data
,
FlashReg
+
ADDR_2_DATA
);
iowrite32
(
en_lo
,
FlashReg
+
RDWR_EN_LO_CNT
);
iowrite32
(
en_hi
,
FlashReg
+
RDWR_EN_HI_CNT
);
iowrite32
(
cs_cnt
,
FlashReg
+
CS_SETUP_CNT
);
}
static
void
index_addr
(
u32
address
,
u32
data
)
{
iowrite32
(
address
,
FlashMem
);
iowrite32
(
data
,
FlashMem
+
0x10
);
}
static
void
index_addr_read_data
(
u32
address
,
u32
*
pdata
)
{
iowrite32
(
address
,
FlashMem
);
*
pdata
=
ioread32
(
FlashMem
+
0x10
);
}
static
void
set_ecc_config
(
void
)
{
#if SUPPORT_8BITECC
if
((
ioread32
(
FlashReg
+
DEVICE_MAIN_AREA_SIZE
)
<
4096
)
||
(
ioread32
(
FlashReg
+
DEVICE_SPARE_AREA_SIZE
)
<=
128
))
iowrite32
(
8
,
FlashReg
+
ECC_CORRECTION
);
#endif
if
((
ioread32
(
FlashReg
+
ECC_CORRECTION
)
&
ECC_CORRECTION__VALUE
)
==
1
)
{
DeviceInfo
.
wECCBytesPerSector
=
4
;
DeviceInfo
.
wECCBytesPerSector
*=
DeviceInfo
.
wDevicesConnected
;
DeviceInfo
.
wNumPageSpareFlag
=
DeviceInfo
.
wPageSpareSize
-
DeviceInfo
.
wPageDataSize
/
(
ECC_SECTOR_SIZE
*
DeviceInfo
.
wDevicesConnected
)
*
DeviceInfo
.
wECCBytesPerSector
-
DeviceInfo
.
wSpareSkipBytes
;
}
else
{
DeviceInfo
.
wECCBytesPerSector
=
(
ioread32
(
FlashReg
+
ECC_CORRECTION
)
&
ECC_CORRECTION__VALUE
)
*
13
/
8
;
if
((
DeviceInfo
.
wECCBytesPerSector
)
%
2
==
0
)
DeviceInfo
.
wECCBytesPerSector
+=
2
;
else
DeviceInfo
.
wECCBytesPerSector
+=
1
;
DeviceInfo
.
wECCBytesPerSector
*=
DeviceInfo
.
wDevicesConnected
;
DeviceInfo
.
wNumPageSpareFlag
=
DeviceInfo
.
wPageSpareSize
-
DeviceInfo
.
wPageDataSize
/
(
ECC_SECTOR_SIZE
*
DeviceInfo
.
wDevicesConnected
)
*
DeviceInfo
.
wECCBytesPerSector
-
DeviceInfo
.
wSpareSkipBytes
;
}
}
static
u16
get_onfi_nand_para
(
void
)
{
int
i
;
u16
blks_lun_l
,
blks_lun_h
,
n_of_luns
;
u32
blockperlun
,
id
;
iowrite32
(
DEVICE_RESET__BANK0
,
FlashReg
+
DEVICE_RESET
);
while
(
!
((
ioread32
(
FlashReg
+
INTR_STATUS0
)
&
INTR_STATUS0__RST_COMP
)
|
(
ioread32
(
FlashReg
+
INTR_STATUS0
)
&
INTR_STATUS0__TIME_OUT
)))
;
if
(
ioread32
(
FlashReg
+
INTR_STATUS0
)
&
INTR_STATUS0__RST_COMP
)
{
iowrite32
(
DEVICE_RESET__BANK1
,
FlashReg
+
DEVICE_RESET
);
while
(
!
((
ioread32
(
FlashReg
+
INTR_STATUS1
)
&
INTR_STATUS1__RST_COMP
)
|
(
ioread32
(
FlashReg
+
INTR_STATUS1
)
&
INTR_STATUS1__TIME_OUT
)))
;
if
(
ioread32
(
FlashReg
+
INTR_STATUS1
)
&
INTR_STATUS1__RST_COMP
)
{
iowrite32
(
DEVICE_RESET__BANK2
,
FlashReg
+
DEVICE_RESET
);
while
(
!
((
ioread32
(
FlashReg
+
INTR_STATUS2
)
&
INTR_STATUS2__RST_COMP
)
|
(
ioread32
(
FlashReg
+
INTR_STATUS2
)
&
INTR_STATUS2__TIME_OUT
)))
;
if
(
ioread32
(
FlashReg
+
INTR_STATUS2
)
&
INTR_STATUS2__RST_COMP
)
{
iowrite32
(
DEVICE_RESET__BANK3
,
FlashReg
+
DEVICE_RESET
);
while
(
!
((
ioread32
(
FlashReg
+
INTR_STATUS3
)
&
INTR_STATUS3__RST_COMP
)
|
(
ioread32
(
FlashReg
+
INTR_STATUS3
)
&
INTR_STATUS3__TIME_OUT
)))
;
}
else
{
printk
(
KERN_ERR
"Getting a time out for bank 2!
\n
"
);
}
}
else
{
printk
(
KERN_ERR
"Getting a time out for bank 1!
\n
"
);
}
}
iowrite32
(
INTR_STATUS0__TIME_OUT
,
FlashReg
+
INTR_STATUS0
);
iowrite32
(
INTR_STATUS1__TIME_OUT
,
FlashReg
+
INTR_STATUS1
);
iowrite32
(
INTR_STATUS2__TIME_OUT
,
FlashReg
+
INTR_STATUS2
);
iowrite32
(
INTR_STATUS3__TIME_OUT
,
FlashReg
+
INTR_STATUS3
);
DeviceInfo
.
wONFIDevFeatures
=
ioread32
(
FlashReg
+
ONFI_DEVICE_FEATURES
);
DeviceInfo
.
wONFIOptCommands
=
ioread32
(
FlashReg
+
ONFI_OPTIONAL_COMMANDS
);
DeviceInfo
.
wONFITimingMode
=
ioread32
(
FlashReg
+
ONFI_TIMING_MODE
);
DeviceInfo
.
wONFIPgmCacheTimingMode
=
ioread32
(
FlashReg
+
ONFI_PGM_CACHE_TIMING_MODE
);
n_of_luns
=
ioread32
(
FlashReg
+
ONFI_DEVICE_NO_OF_LUNS
)
&
ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS
;
blks_lun_l
=
ioread32
(
FlashReg
+
ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L
);
blks_lun_h
=
ioread32
(
FlashReg
+
ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U
);
blockperlun
=
(
blks_lun_h
<<
16
)
|
blks_lun_l
;
DeviceInfo
.
wTotalBlocks
=
n_of_luns
*
blockperlun
;
if
(
!
(
ioread32
(
FlashReg
+
ONFI_TIMING_MODE
)
&
ONFI_TIMING_MODE__VALUE
))
return
FAIL
;
for
(
i
=
5
;
i
>
0
;
i
--
)
{
if
(
ioread32
(
FlashReg
+
ONFI_TIMING_MODE
)
&
(
0x01
<<
i
))
break
;
}
NAND_ONFi_Timing_Mode
(
i
);
index_addr
(
MODE_11
|
0
,
0x90
);
index_addr
(
MODE_11
|
1
,
0
);
for
(
i
=
0
;
i
<
3
;
i
++
)
index_addr_read_data
(
MODE_11
|
2
,
&
id
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"3rd ID: 0x%x
\n
"
,
id
);
DeviceInfo
.
MLCDevice
=
id
&
0x0C
;
/* By now, all the ONFI devices we know support the page cache */
/* rw feature. So here we enable the pipeline_rw_ahead feature */
/* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
/* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
return
PASS
;
}
static
void
get_samsung_nand_para
(
void
)
{
u8
no_of_planes
;
u32
blk_size
;
u64
plane_size
,
capacity
;
u32
id_bytes
[
5
];
int
i
;
index_addr
((
u32
)(
MODE_11
|
0
),
0x90
);
index_addr
((
u32
)(
MODE_11
|
1
),
0
);
for
(
i
=
0
;
i
<
5
;
i
++
)
index_addr_read_data
((
u32
)(
MODE_11
|
2
),
&
id_bytes
[
i
]);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x
\n
"
,
id_bytes
[
0
],
id_bytes
[
1
],
id_bytes
[
2
],
id_bytes
[
3
],
id_bytes
[
4
]);
if
((
id_bytes
[
1
]
&
0xff
)
==
0xd3
)
{
/* Samsung K9WAG08U1A */
/* Set timing register values according to datasheet */
iowrite32
(
5
,
FlashReg
+
ACC_CLKS
);
iowrite32
(
20
,
FlashReg
+
RE_2_WE
);
iowrite32
(
12
,
FlashReg
+
WE_2_RE
);
iowrite32
(
14
,
FlashReg
+
ADDR_2_DATA
);
iowrite32
(
3
,
FlashReg
+
RDWR_EN_LO_CNT
);
iowrite32
(
2
,
FlashReg
+
RDWR_EN_HI_CNT
);
iowrite32
(
2
,
FlashReg
+
CS_SETUP_CNT
);
}
no_of_planes
=
1
<<
((
id_bytes
[
4
]
&
0x0c
)
>>
2
);
plane_size
=
(
u64
)
64
<<
((
id_bytes
[
4
]
&
0x70
)
>>
4
);
blk_size
=
64
<<
((
ioread32
(
FlashReg
+
DEVICE_PARAM_1
)
&
0x30
)
>>
4
);
capacity
=
(
u64
)
128
*
plane_size
*
no_of_planes
;
DeviceInfo
.
wTotalBlocks
=
(
u32
)
GLOB_u64_Div
(
capacity
,
blk_size
);
}
static
void
get_toshiba_nand_para
(
void
)
{
void
__iomem
*
scratch_reg
;
u32
tmp
;
/* Workaround to fix a controller bug which reports a wrong */
/* spare area size for some kind of Toshiba NAND device */
if
((
ioread32
(
FlashReg
+
DEVICE_MAIN_AREA_SIZE
)
==
4096
)
&&
(
ioread32
(
FlashReg
+
DEVICE_SPARE_AREA_SIZE
)
==
64
))
{
iowrite32
(
216
,
FlashReg
+
DEVICE_SPARE_AREA_SIZE
);
tmp
=
ioread32
(
FlashReg
+
DEVICES_CONNECTED
)
*
ioread32
(
FlashReg
+
DEVICE_SPARE_AREA_SIZE
);
iowrite32
(
tmp
,
FlashReg
+
LOGICAL_PAGE_SPARE_SIZE
);
#if SUPPORT_15BITECC
iowrite32
(
15
,
FlashReg
+
ECC_CORRECTION
);
#elif SUPPORT_8BITECC
iowrite32
(
8
,
FlashReg
+
ECC_CORRECTION
);
#endif
}
/* As Toshiba NAND can not provide it's block number, */
/* so here we need user to provide the correct block */
/* number in a scratch register before the Linux NAND */
/* driver is loaded. If no valid value found in the scratch */
/* register, then we use default block number value */
scratch_reg
=
ioremap_nocache
(
SCRATCH_REG_ADDR
,
SCRATCH_REG_SIZE
);
if
(
!
scratch_reg
)
{
printk
(
KERN_ERR
"Spectra: ioremap failed in %s, Line %d"
,
__FILE__
,
__LINE__
);
DeviceInfo
.
wTotalBlocks
=
GLOB_HWCTL_DEFAULT_BLKS
;
}
else
{
nand_dbg_print
(
NAND_DBG_WARN
,
"Spectra: ioremap reg address: 0x%p
\n
"
,
scratch_reg
);
DeviceInfo
.
wTotalBlocks
=
1
<<
ioread8
(
scratch_reg
);
if
(
DeviceInfo
.
wTotalBlocks
<
512
)
DeviceInfo
.
wTotalBlocks
=
GLOB_HWCTL_DEFAULT_BLKS
;
iounmap
(
scratch_reg
);
}
}
static
void
get_hynix_nand_para
(
void
)
{
void
__iomem
*
scratch_reg
;
u32
main_size
,
spare_size
;
switch
(
DeviceInfo
.
wDeviceID
)
{
case
0xD5
:
/* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
case
0xD7
:
/* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
iowrite32
(
128
,
FlashReg
+
PAGES_PER_BLOCK
);
iowrite32
(
4096
,
FlashReg
+
DEVICE_MAIN_AREA_SIZE
);
iowrite32
(
224
,
FlashReg
+
DEVICE_SPARE_AREA_SIZE
);
main_size
=
4096
*
ioread32
(
FlashReg
+
DEVICES_CONNECTED
);
spare_size
=
224
*
ioread32
(
FlashReg
+
DEVICES_CONNECTED
);
iowrite32
(
main_size
,
FlashReg
+
LOGICAL_PAGE_DATA_SIZE
);
iowrite32
(
spare_size
,
FlashReg
+
LOGICAL_PAGE_SPARE_SIZE
);
iowrite32
(
0
,
FlashReg
+
DEVICE_WIDTH
);
#if SUPPORT_15BITECC
iowrite32
(
15
,
FlashReg
+
ECC_CORRECTION
);
#elif SUPPORT_8BITECC
iowrite32
(
8
,
FlashReg
+
ECC_CORRECTION
);
#endif
DeviceInfo
.
MLCDevice
=
1
;
break
;
default:
nand_dbg_print
(
NAND_DBG_WARN
,
"Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
"Will use default parameter values instead.
\n
"
,
DeviceInfo
.
wDeviceID
);
}
scratch_reg
=
ioremap_nocache
(
SCRATCH_REG_ADDR
,
SCRATCH_REG_SIZE
);
if
(
!
scratch_reg
)
{
printk
(
KERN_ERR
"Spectra: ioremap failed in %s, Line %d"
,
__FILE__
,
__LINE__
);
DeviceInfo
.
wTotalBlocks
=
GLOB_HWCTL_DEFAULT_BLKS
;
}
else
{
nand_dbg_print
(
NAND_DBG_WARN
,
"Spectra: ioremap reg address: 0x%p
\n
"
,
scratch_reg
);
DeviceInfo
.
wTotalBlocks
=
1
<<
ioread8
(
scratch_reg
);
if
(
DeviceInfo
.
wTotalBlocks
<
512
)
DeviceInfo
.
wTotalBlocks
=
GLOB_HWCTL_DEFAULT_BLKS
;
iounmap
(
scratch_reg
);
}
}
static
void
find_valid_banks
(
void
)
{
u32
id
[
LLD_MAX_FLASH_BANKS
];
int
i
;
totalUsedBanks
=
0
;
for
(
i
=
0
;
i
<
LLD_MAX_FLASH_BANKS
;
i
++
)
{
index_addr
((
u32
)(
MODE_11
|
(
i
<<
24
)
|
0
),
0x90
);
index_addr
((
u32
)(
MODE_11
|
(
i
<<
24
)
|
1
),
0
);
index_addr_read_data
((
u32
)(
MODE_11
|
(
i
<<
24
)
|
2
),
&
id
[
i
]);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Return 1st ID for bank[%d]: %x
\n
"
,
i
,
id
[
i
]);
if
(
i
==
0
)
{
if
(
id
[
i
]
&
0x0ff
)
GLOB_valid_banks
[
i
]
=
1
;
}
else
{
if
((
id
[
i
]
&
0x0ff
)
==
(
id
[
0
]
&
0x0ff
))
GLOB_valid_banks
[
i
]
=
1
;
}
totalUsedBanks
+=
GLOB_valid_banks
[
i
];
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"totalUsedBanks: %d
\n
"
,
totalUsedBanks
);
}
static
void
detect_partition_feature
(
void
)
{
if
(
ioread32
(
FlashReg
+
FEATURES
)
&
FEATURES__PARTITION
)
{
if
((
ioread32
(
FlashReg
+
PERM_SRC_ID_1
)
&
PERM_SRC_ID_1__SRCID
)
==
SPECTRA_PARTITION_ID
)
{
DeviceInfo
.
wSpectraStartBlock
=
((
ioread32
(
FlashReg
+
MIN_MAX_BANK_1
)
&
MIN_MAX_BANK_1__MIN_VALUE
)
*
DeviceInfo
.
wTotalBlocks
)
+
(
ioread32
(
FlashReg
+
MIN_BLK_ADDR_1
)
&
MIN_BLK_ADDR_1__VALUE
);
DeviceInfo
.
wSpectraEndBlock
=
(((
ioread32
(
FlashReg
+
MIN_MAX_BANK_1
)
&
MIN_MAX_BANK_1__MAX_VALUE
)
>>
2
)
*
DeviceInfo
.
wTotalBlocks
)
+
(
ioread32
(
FlashReg
+
MAX_BLK_ADDR_1
)
&
MAX_BLK_ADDR_1__VALUE
);
DeviceInfo
.
wTotalBlocks
*=
totalUsedBanks
;
if
(
DeviceInfo
.
wSpectraEndBlock
>=
DeviceInfo
.
wTotalBlocks
)
{
DeviceInfo
.
wSpectraEndBlock
=
DeviceInfo
.
wTotalBlocks
-
1
;
}
DeviceInfo
.
wDataBlockNum
=
DeviceInfo
.
wSpectraEndBlock
-
DeviceInfo
.
wSpectraStartBlock
+
1
;
}
else
{
DeviceInfo
.
wTotalBlocks
*=
totalUsedBanks
;
DeviceInfo
.
wSpectraStartBlock
=
SPECTRA_START_BLOCK
;
DeviceInfo
.
wSpectraEndBlock
=
DeviceInfo
.
wTotalBlocks
-
1
;
DeviceInfo
.
wDataBlockNum
=
DeviceInfo
.
wSpectraEndBlock
-
DeviceInfo
.
wSpectraStartBlock
+
1
;
}
}
else
{
DeviceInfo
.
wTotalBlocks
*=
totalUsedBanks
;
DeviceInfo
.
wSpectraStartBlock
=
SPECTRA_START_BLOCK
;
DeviceInfo
.
wSpectraEndBlock
=
DeviceInfo
.
wTotalBlocks
-
1
;
DeviceInfo
.
wDataBlockNum
=
DeviceInfo
.
wSpectraEndBlock
-
DeviceInfo
.
wSpectraStartBlock
+
1
;
}
}
static
void
dump_device_info
(
void
)
{
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DeviceInfo:
\n
"
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DeviceMaker: 0x%x
\n
"
,
DeviceInfo
.
wDeviceMaker
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DeviceID: 0x%x
\n
"
,
DeviceInfo
.
wDeviceID
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DeviceType: 0x%x
\n
"
,
DeviceInfo
.
wDeviceType
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"SpectraStartBlock: %d
\n
"
,
DeviceInfo
.
wSpectraStartBlock
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"SpectraEndBlock: %d
\n
"
,
DeviceInfo
.
wSpectraEndBlock
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"TotalBlocks: %d
\n
"
,
DeviceInfo
.
wTotalBlocks
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"PagesPerBlock: %d
\n
"
,
DeviceInfo
.
wPagesPerBlock
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"PageSize: %d
\n
"
,
DeviceInfo
.
wPageSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"PageDataSize: %d
\n
"
,
DeviceInfo
.
wPageDataSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"PageSpareSize: %d
\n
"
,
DeviceInfo
.
wPageSpareSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"NumPageSpareFlag: %d
\n
"
,
DeviceInfo
.
wNumPageSpareFlag
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"ECCBytesPerSector: %d
\n
"
,
DeviceInfo
.
wECCBytesPerSector
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"BlockSize: %d
\n
"
,
DeviceInfo
.
wBlockSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"BlockDataSize: %d
\n
"
,
DeviceInfo
.
wBlockDataSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DataBlockNum: %d
\n
"
,
DeviceInfo
.
wDataBlockNum
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"PlaneNum: %d
\n
"
,
DeviceInfo
.
bPlaneNum
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DeviceMainAreaSize: %d
\n
"
,
DeviceInfo
.
wDeviceMainAreaSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DeviceSpareAreaSize: %d
\n
"
,
DeviceInfo
.
wDeviceSpareAreaSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DevicesConnected: %d
\n
"
,
DeviceInfo
.
wDevicesConnected
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"DeviceWidth: %d
\n
"
,
DeviceInfo
.
wDeviceWidth
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"HWRevision: 0x%x
\n
"
,
DeviceInfo
.
wHWRevision
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"HWFeatures: 0x%x
\n
"
,
DeviceInfo
.
wHWFeatures
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"ONFIDevFeatures: 0x%x
\n
"
,
DeviceInfo
.
wONFIDevFeatures
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"ONFIOptCommands: 0x%x
\n
"
,
DeviceInfo
.
wONFIOptCommands
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"ONFITimingMode: 0x%x
\n
"
,
DeviceInfo
.
wONFITimingMode
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"ONFIPgmCacheTimingMode: 0x%x
\n
"
,
DeviceInfo
.
wONFIPgmCacheTimingMode
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"MLCDevice: %s
\n
"
,
DeviceInfo
.
MLCDevice
?
"Yes"
:
"No"
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"SpareSkipBytes: %d
\n
"
,
DeviceInfo
.
wSpareSkipBytes
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"BitsInPageNumber: %d
\n
"
,
DeviceInfo
.
nBitsInPageNumber
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"BitsInPageDataSize: %d
\n
"
,
DeviceInfo
.
nBitsInPageDataSize
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"BitsInBlockDataSize: %d
\n
"
,
DeviceInfo
.
nBitsInBlockDataSize
);
}
u16
NAND_Read_Device_ID
(
void
)
{
u16
status
=
PASS
;
u8
no_of_planes
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
iowrite32
(
0x02
,
FlashReg
+
SPARE_AREA_SKIP_BYTES
);
iowrite32
(
0xffff
,
FlashReg
+
SPARE_AREA_MARKER
);
DeviceInfo
.
wDeviceMaker
=
ioread32
(
FlashReg
+
MANUFACTURER_ID
);
DeviceInfo
.
wDeviceID
=
ioread32
(
FlashReg
+
DEVICE_ID
);
DeviceInfo
.
MLCDevice
=
ioread32
(
FlashReg
+
DEVICE_PARAM_0
)
&
0x0c
;
if
(
ioread32
(
FlashReg
+
ONFI_DEVICE_NO_OF_LUNS
)
&
ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE
)
{
/* ONFI 1.0 NAND */
if
(
FAIL
==
get_onfi_nand_para
())
return
FAIL
;
}
else
if
(
DeviceInfo
.
wDeviceMaker
==
0xEC
)
{
/* Samsung NAND */
get_samsung_nand_para
();
}
else
if
(
DeviceInfo
.
wDeviceMaker
==
0x98
)
{
/* Toshiba NAND */
get_toshiba_nand_para
();
}
else
if
(
DeviceInfo
.
wDeviceMaker
==
0xAD
)
{
/* Hynix NAND */
get_hynix_nand_para
();
}
else
{
DeviceInfo
.
wTotalBlocks
=
GLOB_HWCTL_DEFAULT_BLKS
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Dump timing register values:"
"acc_clks: %d, re_2_we: %d, we_2_re: %d,"
"addr_2_data: %d, rdwr_en_lo_cnt: %d, "
"rdwr_en_hi_cnt: %d, cs_setup_cnt: %d
\n
"
,
ioread32
(
FlashReg
+
ACC_CLKS
),
ioread32
(
FlashReg
+
RE_2_WE
),
ioread32
(
FlashReg
+
WE_2_RE
),
ioread32
(
FlashReg
+
ADDR_2_DATA
),
ioread32
(
FlashReg
+
RDWR_EN_LO_CNT
),
ioread32
(
FlashReg
+
RDWR_EN_HI_CNT
),
ioread32
(
FlashReg
+
CS_SETUP_CNT
));
DeviceInfo
.
wHWRevision
=
ioread32
(
FlashReg
+
REVISION
);
DeviceInfo
.
wHWFeatures
=
ioread32
(
FlashReg
+
FEATURES
);
DeviceInfo
.
wDeviceMainAreaSize
=
ioread32
(
FlashReg
+
DEVICE_MAIN_AREA_SIZE
);
DeviceInfo
.
wDeviceSpareAreaSize
=
ioread32
(
FlashReg
+
DEVICE_SPARE_AREA_SIZE
);
DeviceInfo
.
wPageDataSize
=
ioread32
(
FlashReg
+
LOGICAL_PAGE_DATA_SIZE
);
/* Note: When using the Micon 4K NAND device, the controller will report
* Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
* And if force set it to 218 bytes, the controller can not work
* correctly. So just let it be. But keep in mind that this bug may
* cause
* other problems in future. - Yunpeng 2008-10-10
*/
DeviceInfo
.
wPageSpareSize
=
ioread32
(
FlashReg
+
LOGICAL_PAGE_SPARE_SIZE
);
DeviceInfo
.
wPagesPerBlock
=
ioread32
(
FlashReg
+
PAGES_PER_BLOCK
);
DeviceInfo
.
wPageSize
=
DeviceInfo
.
wPageDataSize
+
DeviceInfo
.
wPageSpareSize
;
DeviceInfo
.
wBlockSize
=
DeviceInfo
.
wPageSize
*
DeviceInfo
.
wPagesPerBlock
;
DeviceInfo
.
wBlockDataSize
=
DeviceInfo
.
wPagesPerBlock
*
DeviceInfo
.
wPageDataSize
;
DeviceInfo
.
wDeviceWidth
=
ioread32
(
FlashReg
+
DEVICE_WIDTH
);
DeviceInfo
.
wDeviceType
=
((
ioread32
(
FlashReg
+
DEVICE_WIDTH
)
>
0
)
?
16
:
8
);
DeviceInfo
.
wDevicesConnected
=
ioread32
(
FlashReg
+
DEVICES_CONNECTED
);
DeviceInfo
.
wSpareSkipBytes
=
ioread32
(
FlashReg
+
SPARE_AREA_SKIP_BYTES
)
*
DeviceInfo
.
wDevicesConnected
;
DeviceInfo
.
nBitsInPageNumber
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wPagesPerBlock
);
DeviceInfo
.
nBitsInPageDataSize
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wPageDataSize
);
DeviceInfo
.
nBitsInBlockDataSize
=
(
u8
)
GLOB_Calc_Used_Bits
(
DeviceInfo
.
wBlockDataSize
);
set_ecc_config
();
no_of_planes
=
ioread32
(
FlashReg
+
NUMBER_OF_PLANES
)
&
NUMBER_OF_PLANES__VALUE
;
switch
(
no_of_planes
)
{
case
0
:
case
1
:
case
3
:
case
7
:
DeviceInfo
.
bPlaneNum
=
no_of_planes
+
1
;
break
;
default:
status
=
FAIL
;
break
;
}
find_valid_banks
();
detect_partition_feature
();
dump_device_info
();
return
status
;
}
u16
NAND_UnlockArrayAll
(
void
)
{
u64
start_addr
,
end_addr
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
start_addr
=
0
;
end_addr
=
((
u64
)
DeviceInfo
.
wBlockSize
*
(
DeviceInfo
.
wTotalBlocks
-
1
))
>>
DeviceInfo
.
nBitsInPageDataSize
;
index_addr
((
u32
)(
MODE_10
|
(
u32
)
start_addr
),
0x10
);
index_addr
((
u32
)(
MODE_10
|
(
u32
)
end_addr
),
0x11
);
return
PASS
;
}
void
NAND_LLD_Enable_Disable_Interrupts
(
u16
INT_ENABLE
)
{
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
INT_ENABLE
)
iowrite32
(
1
,
FlashReg
+
GLOBAL_INT_ENABLE
);
else
iowrite32
(
0
,
FlashReg
+
GLOBAL_INT_ENABLE
);
}
u16
NAND_Erase_Block
(
u32
block
)
{
u16
status
=
PASS
;
u64
flash_add
;
u16
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
if
(
block
>=
DeviceInfo
.
wTotalBlocks
)
status
=
FAIL
;
if
(
status
==
PASS
)
{
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
INTR_STATUS0__ERASE_COMP
|
INTR_STATUS0__ERASE_FAIL
,
FlashReg
+
intr_status
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
1
);
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
(
INTR_STATUS0__ERASE_COMP
|
INTR_STATUS0__ERASE_FAIL
)))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ERASE_FAIL
)
status
=
FAIL
;
iowrite32
(
INTR_STATUS0__ERASE_COMP
|
INTR_STATUS0__ERASE_FAIL
,
FlashReg
+
intr_status
);
}
return
status
;
}
static
u32
Boundary_Check_Block_Page
(
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
if
(
block
>=
DeviceInfo
.
wTotalBlocks
)
status
=
FAIL
;
if
(
page
+
page_count
>
DeviceInfo
.
wPagesPerBlock
)
status
=
FAIL
;
return
status
;
}
u16
NAND_Read_Page_Spare
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u32
i
;
u64
flash_add
;
u32
PageSpareSize
=
DeviceInfo
.
wPageSpareSize
;
u32
spareFlagBytes
=
DeviceInfo
.
wNumPageSpareFlag
;
u32
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u8
*
page_spare
=
buf_read_page_spare
;
if
(
block
>=
DeviceInfo
.
wTotalBlocks
)
{
printk
(
KERN_ERR
"block too big: %d
\n
"
,
(
int
)
block
);
status
=
FAIL
;
}
if
(
page
>=
DeviceInfo
.
wPagesPerBlock
)
{
printk
(
KERN_ERR
"page too big: %d
\n
"
,
page
);
status
=
FAIL
;
}
if
(
page_count
>
1
)
{
printk
(
KERN_ERR
"page count too big: %d
\n
"
,
page_count
);
status
=
FAIL
;
}
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
if
(
status
==
PASS
)
{
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x41
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x2000
|
page_count
);
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__LOAD_COMP
))
;
iowrite32
((
u32
)(
MODE_01
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
FlashMem
);
for
(
i
=
0
;
i
<
(
PageSpareSize
/
4
);
i
++
)
*
((
u32
*
)
page_spare
+
i
)
=
ioread32
(
FlashMem
+
0x10
);
if
(
enable_ecc
)
{
for
(
i
=
0
;
i
<
spareFlagBytes
;
i
++
)
read_data
[
i
]
=
page_spare
[
PageSpareSize
-
spareFlagBytes
+
i
];
for
(
i
=
0
;
i
<
(
PageSpareSize
-
spareFlagBytes
);
i
++
)
read_data
[
spareFlagBytes
+
i
]
=
page_spare
[
i
];
}
else
{
for
(
i
=
0
;
i
<
PageSpareSize
;
i
++
)
read_data
[
i
]
=
page_spare
[
i
];
}
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x42
);
}
return
status
;
}
/* No use function. Should be removed later */
u16
NAND_Write_Page_Spare
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
printk
(
KERN_ERR
"Error! This function (NAND_Write_Page_Spare) should never"
" be called!
\n
"
);
return
ERR
;
}
/* op value: 0 - DDMA read; 1 - DDMA write */
static
void
ddma_trans
(
u8
*
data
,
u64
flash_add
,
u32
flash_bank
,
int
op
,
u32
numPages
)
{
u32
data_addr
;
/* Map virtual address to bus address for DDMA */
data_addr
=
virt_to_bus
(
data
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
(
u16
)(
2
<<
12
)
|
(
op
<<
8
)
|
numPages
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
((
u16
)(
0x0FFFF
&
(
data_addr
>>
16
))
<<
8
)),
(
u16
)(
2
<<
12
)
|
(
2
<<
8
)
|
0
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
((
u16
)(
0x0FFFF
&
data_addr
)
<<
8
)),
(
u16
)(
2
<<
12
)
|
(
3
<<
8
)
|
0
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
1
<<
16
)
|
(
0x40
<<
8
)),
(
u16
)(
2
<<
12
)
|
(
4
<<
8
)
|
0
);
}
/* If data in buf are all 0xff, then return 1; otherwise return 0 */
static
int
check_all_1
(
u8
*
buf
)
{
int
i
,
j
,
cnt
;
for
(
i
=
0
;
i
<
DeviceInfo
.
wPageDataSize
;
i
++
)
{
if
(
buf
[
i
]
!=
0xff
)
{
cnt
=
0
;
nand_dbg_print
(
NAND_DBG_WARN
,
"the first non-0xff data byte is: %d
\n
"
,
i
);
for
(
j
=
i
;
j
<
DeviceInfo
.
wPageDataSize
;
j
++
)
{
nand_dbg_print
(
NAND_DBG_WARN
,
"0x%x "
,
buf
[
j
]);
cnt
++
;
if
(
cnt
>
8
)
break
;
}
nand_dbg_print
(
NAND_DBG_WARN
,
"
\n
"
);
return
0
;
}
}
return
1
;
}
static
int
do_ecc_new
(
unsigned
long
bank
,
u8
*
buf
,
u32
block
,
u16
page
)
{
int
status
=
PASS
;
u16
err_page
=
0
;
u16
err_byte
;
u8
err_sect
;
u8
err_dev
;
u16
err_fix_info
;
u16
err_addr
;
u32
ecc_sect_size
;
u8
*
err_pos
;
u32
err_page_addr
[
4
]
=
{
ERR_PAGE_ADDR0
,
ERR_PAGE_ADDR1
,
ERR_PAGE_ADDR2
,
ERR_PAGE_ADDR3
};
ecc_sect_size
=
ECC_SECTOR_SIZE
*
(
DeviceInfo
.
wDevicesConnected
);
do
{
err_page
=
ioread32
(
FlashReg
+
err_page_addr
[
bank
]);
err_addr
=
ioread32
(
FlashReg
+
ECC_ERROR_ADDRESS
);
err_byte
=
err_addr
&
ECC_ERROR_ADDRESS__OFFSET
;
err_sect
=
((
err_addr
&
ECC_ERROR_ADDRESS__SECTOR_NR
)
>>
12
);
err_fix_info
=
ioread32
(
FlashReg
+
ERR_CORRECTION_INFO
);
err_dev
=
((
err_fix_info
&
ERR_CORRECTION_INFO__DEVICE_NR
)
>>
8
);
if
(
err_fix_info
&
ERR_CORRECTION_INFO__ERROR_TYPE
)
{
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d Uncorrectable ECC error "
"when read block %d page %d."
"PTN_INTR register: 0x%x "
"err_page: %d, err_sect: %d, err_byte: %d, "
"err_dev: %d, ecc_sect_size: %d, "
"err_fix_info: 0x%x
\n
"
,
__FILE__
,
__LINE__
,
block
,
page
,
ioread32
(
FlashReg
+
PTN_INTR
),
err_page
,
err_sect
,
err_byte
,
err_dev
,
ecc_sect_size
,
(
u32
)
err_fix_info
);
if
(
check_all_1
(
buf
))
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d"
"All 0xff!
\n
"
,
__FILE__
,
__LINE__
);
else
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d"
"Not all 0xff!
\n
"
,
__FILE__
,
__LINE__
);
status
=
FAIL
;
}
else
{
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d Found ECC error "
"when read block %d page %d."
"err_page: %d, err_sect: %d, err_byte: %d, "
"err_dev: %d, ecc_sect_size: %d, "
"err_fix_info: 0x%x
\n
"
,
__FILE__
,
__LINE__
,
block
,
page
,
err_page
,
err_sect
,
err_byte
,
err_dev
,
ecc_sect_size
,
(
u32
)
err_fix_info
);
if
(
err_byte
<
ECC_SECTOR_SIZE
)
{
err_pos
=
buf
+
(
err_page
-
page
)
*
DeviceInfo
.
wPageDataSize
+
err_sect
*
ecc_sect_size
+
err_byte
*
DeviceInfo
.
wDevicesConnected
+
err_dev
;
*
err_pos
^=
err_fix_info
&
ERR_CORRECTION_INFO__BYTEMASK
;
}
}
}
while
(
!
(
err_fix_info
&
ERR_CORRECTION_INFO__LAST_ERR_INFO
));
return
status
;
}
u16
NAND_Read_Page_Main_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u64
flash_add
;
u32
intr_status
=
0
;
u32
flash_bank
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u8
*
read_data_l
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
if
(
status
!=
PASS
)
return
status
;
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
if
(
page_count
>
1
)
{
read_data_l
=
read_data
;
while
(
page_count
>
MAX_PAGES_PER_RW
)
{
if
(
ioread32
(
FlashReg
+
MULTIPLANE_OPERATION
))
status
=
NAND_Multiplane_Read
(
read_data_l
,
block
,
page
,
MAX_PAGES_PER_RW
);
else
status
=
NAND_Pipeline_Read_Ahead_Polling
(
read_data_l
,
block
,
page
,
MAX_PAGES_PER_RW
);
if
(
status
==
FAIL
)
return
status
;
read_data_l
+=
DeviceInfo
.
wPageDataSize
*
MAX_PAGES_PER_RW
;
page_count
-=
MAX_PAGES_PER_RW
;
page
+=
MAX_PAGES_PER_RW
;
}
if
(
ioread32
(
FlashReg
+
MULTIPLANE_OPERATION
))
status
=
NAND_Multiplane_Read
(
read_data_l
,
block
,
page
,
page_count
);
else
status
=
NAND_Pipeline_Read_Ahead_Polling
(
read_data_l
,
block
,
page
,
page_count
);
return
status
;
}
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
ddma_trans
(
read_data
,
flash_add
,
flash_bank
,
0
,
1
);
if
(
enable_ecc
)
{
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
(
INTR_STATUS0__ECC_TRANSACTION_DONE
|
INTR_STATUS0__ECC_ERR
)))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_ERR
)
{
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
status
=
do_ecc_new
(
flash_bank
,
read_data
,
block
,
page
);
}
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_TRANSACTION_DONE
&
INTR_STATUS0__ECC_ERR
)
iowrite32
(
INTR_STATUS0__ECC_TRANSACTION_DONE
|
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_TRANSACTION_DONE
)
iowrite32
(
INTR_STATUS0__ECC_TRANSACTION_DONE
,
FlashReg
+
intr_status
);
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_ERR
)
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
}
else
{
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
))
;
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
return
status
;
}
u16
NAND_Pipeline_Read_Ahead_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u32
NumPages
=
page_count
;
u64
flash_add
;
u32
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u32
ecc_done_OR_dma_comp
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
if
(
page_count
<
2
)
status
=
FAIL
;
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
if
(
status
==
PASS
)
{
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x42
);
ddma_trans
(
read_data
,
flash_add
,
flash_bank
,
0
,
NumPages
);
ecc_done_OR_dma_comp
=
0
;
while
(
1
)
{
if
(
enable_ecc
)
{
while
(
!
ioread32
(
FlashReg
+
intr_status
))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_ERR
)
{
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
status
=
do_ecc_new
(
flash_bank
,
read_data
,
block
,
page
);
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
)
{
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
if
(
1
==
ecc_done_OR_dma_comp
)
break
;
ecc_done_OR_dma_comp
=
1
;
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_TRANSACTION_DONE
)
{
iowrite32
(
INTR_STATUS0__ECC_TRANSACTION_DONE
,
FlashReg
+
intr_status
);
if
(
1
==
ecc_done_OR_dma_comp
)
break
;
ecc_done_OR_dma_comp
=
1
;
}
}
else
{
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
))
;
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
break
;
}
iowrite32
((
~
INTR_STATUS0__ECC_ERR
)
&
(
~
INTR_STATUS0__ECC_TRANSACTION_DONE
)
&
(
~
INTR_STATUS0__DMA_CMD_COMP
),
FlashReg
+
intr_status
);
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
}
return
status
;
}
u16
NAND_Read_Page_Main
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u64
flash_add
;
u32
intr_status
=
0
;
u32
flash_bank
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
int
ret
;
u8
*
read_data_l
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
if
(
status
!=
PASS
)
return
status
;
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
if
(
page_count
>
1
)
{
read_data_l
=
read_data
;
while
(
page_count
>
MAX_PAGES_PER_RW
)
{
if
(
ioread32
(
FlashReg
+
MULTIPLANE_OPERATION
))
status
=
NAND_Multiplane_Read
(
read_data_l
,
block
,
page
,
MAX_PAGES_PER_RW
);
else
status
=
NAND_Pipeline_Read_Ahead
(
read_data_l
,
block
,
page
,
MAX_PAGES_PER_RW
);
if
(
status
==
FAIL
)
return
status
;
read_data_l
+=
DeviceInfo
.
wPageDataSize
*
MAX_PAGES_PER_RW
;
page_count
-=
MAX_PAGES_PER_RW
;
page
+=
MAX_PAGES_PER_RW
;
}
if
(
ioread32
(
FlashReg
+
MULTIPLANE_OPERATION
))
status
=
NAND_Multiplane_Read
(
read_data_l
,
block
,
page
,
page_count
);
else
status
=
NAND_Pipeline_Read_Ahead
(
read_data_l
,
block
,
page
,
page_count
);
return
status
;
}
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
/* Fill the mrst_nand_info structure */
info
.
state
=
INT_READ_PAGE_MAIN
;
info
.
read_data
=
read_data
;
info
.
flash_bank
=
flash_bank
;
info
.
block
=
block
;
info
.
page
=
page
;
info
.
ret
=
PASS
;
ddma_trans
(
read_data
,
flash_add
,
flash_bank
,
0
,
1
);
iowrite32
(
1
,
FlashReg
+
GLOBAL_INT_ENABLE
);
/* Enable Interrupt */
ret
=
wait_for_completion_timeout
(
&
info
.
complete
,
10
*
HZ
);
if
(
!
ret
)
{
printk
(
KERN_ERR
"Wait for completion timeout "
"in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
status
=
ERR
;
}
else
{
status
=
info
.
ret
;
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
return
status
;
}
void
Conv_Spare_Data_Log2Phy_Format
(
u8
*
data
)
{
int
i
;
const
u32
spareFlagBytes
=
DeviceInfo
.
wNumPageSpareFlag
;
const
u32
PageSpareSize
=
DeviceInfo
.
wPageSpareSize
;
if
(
enable_ecc
)
{
for
(
i
=
spareFlagBytes
-
1
;
i
>=
0
;
i
++
)
data
[
PageSpareSize
-
spareFlagBytes
+
i
]
=
data
[
i
];
}
}
void
Conv_Spare_Data_Phy2Log_Format
(
u8
*
data
)
{
int
i
;
const
u32
spareFlagBytes
=
DeviceInfo
.
wNumPageSpareFlag
;
const
u32
PageSpareSize
=
DeviceInfo
.
wPageSpareSize
;
if
(
enable_ecc
)
{
for
(
i
=
0
;
i
<
spareFlagBytes
;
i
++
)
data
[
i
]
=
data
[
PageSpareSize
-
spareFlagBytes
+
i
];
}
}
void
Conv_Main_Spare_Data_Log2Phy_Format
(
u8
*
data
,
u16
page_count
)
{
const
u32
PageSize
=
DeviceInfo
.
wPageSize
;
const
u32
PageDataSize
=
DeviceInfo
.
wPageDataSize
;
const
u32
eccBytes
=
DeviceInfo
.
wECCBytesPerSector
;
const
u32
spareSkipBytes
=
DeviceInfo
.
wSpareSkipBytes
;
const
u32
spareFlagBytes
=
DeviceInfo
.
wNumPageSpareFlag
;
u32
eccSectorSize
;
u32
page_offset
;
int
i
,
j
;
eccSectorSize
=
ECC_SECTOR_SIZE
*
(
DeviceInfo
.
wDevicesConnected
);
if
(
enable_ecc
)
{
while
(
page_count
>
0
)
{
page_offset
=
(
page_count
-
1
)
*
PageSize
;
j
=
(
DeviceInfo
.
wPageDataSize
/
eccSectorSize
);
for
(
i
=
spareFlagBytes
-
1
;
i
>=
0
;
i
--
)
data
[
page_offset
+
(
eccSectorSize
+
eccBytes
)
*
j
+
i
]
=
data
[
page_offset
+
PageDataSize
+
i
];
for
(
j
--
;
j
>=
1
;
j
--
)
{
for
(
i
=
eccSectorSize
-
1
;
i
>=
0
;
i
--
)
data
[
page_offset
+
(
eccSectorSize
+
eccBytes
)
*
j
+
i
]
=
data
[
page_offset
+
eccSectorSize
*
j
+
i
];
}
for
(
i
=
(
PageSize
-
spareSkipBytes
)
-
1
;
i
>=
PageDataSize
;
i
--
)
data
[
page_offset
+
i
+
spareSkipBytes
]
=
data
[
page_offset
+
i
];
page_count
--
;
}
}
}
void
Conv_Main_Spare_Data_Phy2Log_Format
(
u8
*
data
,
u16
page_count
)
{
const
u32
PageSize
=
DeviceInfo
.
wPageSize
;
const
u32
PageDataSize
=
DeviceInfo
.
wPageDataSize
;
const
u32
eccBytes
=
DeviceInfo
.
wECCBytesPerSector
;
const
u32
spareSkipBytes
=
DeviceInfo
.
wSpareSkipBytes
;
const
u32
spareFlagBytes
=
DeviceInfo
.
wNumPageSpareFlag
;
u32
eccSectorSize
;
u32
page_offset
;
int
i
,
j
;
eccSectorSize
=
ECC_SECTOR_SIZE
*
(
DeviceInfo
.
wDevicesConnected
);
if
(
enable_ecc
)
{
while
(
page_count
>
0
)
{
page_offset
=
(
page_count
-
1
)
*
PageSize
;
for
(
i
=
PageDataSize
;
i
<
PageSize
-
spareSkipBytes
;
i
++
)
data
[
page_offset
+
i
]
=
data
[
page_offset
+
i
+
spareSkipBytes
];
for
(
j
=
1
;
j
<
DeviceInfo
.
wPageDataSize
/
eccSectorSize
;
j
++
)
{
for
(
i
=
0
;
i
<
eccSectorSize
;
i
++
)
data
[
page_offset
+
eccSectorSize
*
j
+
i
]
=
data
[
page_offset
+
(
eccSectorSize
+
eccBytes
)
*
j
+
i
];
}
for
(
i
=
0
;
i
<
spareFlagBytes
;
i
++
)
data
[
page_offset
+
PageDataSize
+
i
]
=
data
[
page_offset
+
(
eccSectorSize
+
eccBytes
)
*
j
+
i
];
page_count
--
;
}
}
}
/* Un-tested function */
u16
NAND_Multiplane_Read
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u32
NumPages
=
page_count
;
u64
flash_add
;
u32
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u32
ecc_done_OR_dma_comp
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
if
(
status
==
PASS
)
{
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
iowrite32
(
0x01
,
FlashReg
+
MULTIPLANE_OPERATION
);
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x42
);
ddma_trans
(
read_data
,
flash_add
,
flash_bank
,
0
,
NumPages
);
ecc_done_OR_dma_comp
=
0
;
while
(
1
)
{
if
(
enable_ecc
)
{
while
(
!
ioread32
(
FlashReg
+
intr_status
))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_ERR
)
{
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
status
=
do_ecc_new
(
flash_bank
,
read_data
,
block
,
page
);
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
)
{
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
if
(
1
==
ecc_done_OR_dma_comp
)
break
;
ecc_done_OR_dma_comp
=
1
;
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_TRANSACTION_DONE
)
{
iowrite32
(
INTR_STATUS0__ECC_TRANSACTION_DONE
,
FlashReg
+
intr_status
);
if
(
1
==
ecc_done_OR_dma_comp
)
break
;
ecc_done_OR_dma_comp
=
1
;
}
}
else
{
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
))
;
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
break
;
}
iowrite32
((
~
INTR_STATUS0__ECC_ERR
)
&
(
~
INTR_STATUS0__ECC_TRANSACTION_DONE
)
&
(
~
INTR_STATUS0__DMA_CMD_COMP
),
FlashReg
+
intr_status
);
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
MULTIPLANE_OPERATION
);
}
return
status
;
}
u16
NAND_Pipeline_Read_Ahead
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u32
NumPages
=
page_count
;
u64
flash_add
;
u32
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
int
ret
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
if
(
page_count
<
2
)
status
=
FAIL
;
if
(
status
!=
PASS
)
return
status
;
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
/* Fill the mrst_nand_info structure */
info
.
state
=
INT_PIPELINE_READ_AHEAD
;
info
.
read_data
=
read_data
;
info
.
flash_bank
=
flash_bank
;
info
.
block
=
block
;
info
.
page
=
page
;
info
.
ret
=
PASS
;
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x42
);
ddma_trans
(
read_data
,
flash_add
,
flash_bank
,
0
,
NumPages
);
iowrite32
(
1
,
FlashReg
+
GLOBAL_INT_ENABLE
);
/* Enable Interrupt */
ret
=
wait_for_completion_timeout
(
&
info
.
complete
,
10
*
HZ
);
if
(
!
ret
)
{
printk
(
KERN_ERR
"Wait for completion timeout "
"in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
status
=
ERR
;
}
else
{
status
=
info
.
ret
;
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
return
status
;
}
u16
NAND_Write_Page_Main
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u64
flash_add
;
u32
intr_status
=
0
;
u32
flash_bank
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
int
ret
;
u8
*
write_data_l
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
if
(
status
!=
PASS
)
return
status
;
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
iowrite32
(
INTR_STATUS0__PROGRAM_COMP
|
INTR_STATUS0__PROGRAM_FAIL
,
FlashReg
+
intr_status
);
if
(
page_count
>
1
)
{
write_data_l
=
write_data
;
while
(
page_count
>
MAX_PAGES_PER_RW
)
{
if
(
ioread32
(
FlashReg
+
MULTIPLANE_OPERATION
))
status
=
NAND_Multiplane_Write
(
write_data_l
,
block
,
page
,
MAX_PAGES_PER_RW
);
else
status
=
NAND_Pipeline_Write_Ahead
(
write_data_l
,
block
,
page
,
MAX_PAGES_PER_RW
);
if
(
status
==
FAIL
)
return
status
;
write_data_l
+=
DeviceInfo
.
wPageDataSize
*
MAX_PAGES_PER_RW
;
page_count
-=
MAX_PAGES_PER_RW
;
page
+=
MAX_PAGES_PER_RW
;
}
if
(
ioread32
(
FlashReg
+
MULTIPLANE_OPERATION
))
status
=
NAND_Multiplane_Write
(
write_data_l
,
block
,
page
,
page_count
);
else
status
=
NAND_Pipeline_Write_Ahead
(
write_data_l
,
block
,
page
,
page_count
);
return
status
;
}
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
/* Fill the mrst_nand_info structure */
info
.
state
=
INT_WRITE_PAGE_MAIN
;
info
.
write_data
=
write_data
;
info
.
flash_bank
=
flash_bank
;
info
.
block
=
block
;
info
.
page
=
page
;
info
.
ret
=
PASS
;
ddma_trans
(
write_data
,
flash_add
,
flash_bank
,
1
,
1
);
iowrite32
(
1
,
FlashReg
+
GLOBAL_INT_ENABLE
);
/* Enable interrupt */
ret
=
wait_for_completion_timeout
(
&
info
.
complete
,
10
*
HZ
);
if
(
!
ret
)
{
printk
(
KERN_ERR
"Wait for completion timeout "
"in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
status
=
ERR
;
}
else
{
status
=
info
.
ret
;
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
)
;
return
status
;
}
void
NAND_ECC_Ctrl
(
int
enable
)
{
if
(
enable
)
{
nand_dbg_print
(
NAND_DBG_WARN
,
"Will enable ECC in %s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
iowrite32
(
1
,
FlashReg
+
ECC_ENABLE
);
enable_ecc
=
1
;
}
else
{
nand_dbg_print
(
NAND_DBG_WARN
,
"Will disable ECC in %s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
iowrite32
(
0
,
FlashReg
+
ECC_ENABLE
);
enable_ecc
=
0
;
}
}
u16
NAND_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u32
i
,
j
,
page_num
=
0
;
u32
PageSize
=
DeviceInfo
.
wPageSize
;
u32
PageDataSize
=
DeviceInfo
.
wPageDataSize
;
u32
eccBytes
=
DeviceInfo
.
wECCBytesPerSector
;
u32
spareFlagBytes
=
DeviceInfo
.
wNumPageSpareFlag
;
u32
spareSkipBytes
=
DeviceInfo
.
wSpareSkipBytes
;
u64
flash_add
;
u32
eccSectorSize
;
u32
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u8
*
page_main_spare
=
buf_write_page_main_spare
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
eccSectorSize
=
ECC_SECTOR_SIZE
*
(
DeviceInfo
.
wDevicesConnected
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
if
(
status
==
PASS
)
{
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
1
,
FlashReg
+
TRANSFER_SPARE_REG
);
while
((
status
!=
FAIL
)
&&
(
page_count
>
0
))
{
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
((
u32
)(
MODE_01
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
FlashMem
);
if
(
enable_ecc
)
{
for
(
j
=
0
;
j
<
DeviceInfo
.
wPageDataSize
/
eccSectorSize
;
j
++
)
{
for
(
i
=
0
;
i
<
eccSectorSize
;
i
++
)
page_main_spare
[(
eccSectorSize
+
eccBytes
)
*
j
+
i
]
=
write_data
[
eccSectorSize
*
j
+
i
];
for
(
i
=
0
;
i
<
eccBytes
;
i
++
)
page_main_spare
[(
eccSectorSize
+
eccBytes
)
*
j
+
eccSectorSize
+
i
]
=
write_data
[
PageDataSize
+
spareFlagBytes
+
eccBytes
*
j
+
i
];
}
for
(
i
=
0
;
i
<
spareFlagBytes
;
i
++
)
page_main_spare
[(
eccSectorSize
+
eccBytes
)
*
j
+
i
]
=
write_data
[
PageDataSize
+
i
];
for
(
i
=
PageSize
-
1
;
i
>=
PageDataSize
+
spareSkipBytes
;
i
--
)
page_main_spare
[
i
]
=
page_main_spare
[
i
-
spareSkipBytes
];
for
(
i
=
PageDataSize
;
i
<
PageDataSize
+
spareSkipBytes
;
i
++
)
page_main_spare
[
i
]
=
0xff
;
for
(
i
=
0
;
i
<
PageSize
/
4
;
i
++
)
iowrite32
(
*
((
u32
*
)
page_main_spare
+
i
),
FlashMem
+
0x10
);
}
else
{
for
(
i
=
0
;
i
<
PageSize
/
4
;
i
++
)
iowrite32
(
*
((
u32
*
)
write_data
+
i
),
FlashMem
+
0x10
);
}
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
(
INTR_STATUS0__PROGRAM_COMP
|
INTR_STATUS0__PROGRAM_FAIL
)))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__PROGRAM_FAIL
)
status
=
FAIL
;
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
page_num
++
;
page_count
--
;
write_data
+=
PageSize
;
}
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
}
return
status
;
}
u16
NAND_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u32
status
=
PASS
;
u32
i
,
j
;
u64
flash_add
=
0
;
u32
PageSize
=
DeviceInfo
.
wPageSize
;
u32
PageDataSize
=
DeviceInfo
.
wPageDataSize
;
u32
PageSpareSize
=
DeviceInfo
.
wPageSpareSize
;
u32
eccBytes
=
DeviceInfo
.
wECCBytesPerSector
;
u32
spareFlagBytes
=
DeviceInfo
.
wNumPageSpareFlag
;
u32
spareSkipBytes
=
DeviceInfo
.
wSpareSkipBytes
;
u32
eccSectorSize
;
u32
flash_bank
;
u32
intr_status
=
0
;
u8
*
read_data_l
=
read_data
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u8
*
page_main_spare
=
buf_read_page_main_spare
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
eccSectorSize
=
ECC_SECTOR_SIZE
*
(
DeviceInfo
.
wDevicesConnected
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
if
(
status
==
PASS
)
{
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
1
,
FlashReg
+
TRANSFER_SPARE_REG
);
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
while
((
status
!=
FAIL
)
&&
(
page_count
>
0
))
{
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x43
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x2000
|
page_count
);
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__LOAD_COMP
))
;
iowrite32
((
u32
)(
MODE_01
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
FlashMem
);
for
(
i
=
0
;
i
<
PageSize
/
4
;
i
++
)
*
(((
u32
*
)
page_main_spare
)
+
i
)
=
ioread32
(
FlashMem
+
0x10
);
if
(
enable_ecc
)
{
for
(
i
=
PageDataSize
;
i
<
PageSize
-
spareSkipBytes
;
i
++
)
page_main_spare
[
i
]
=
page_main_spare
[
i
+
spareSkipBytes
];
for
(
j
=
0
;
j
<
DeviceInfo
.
wPageDataSize
/
eccSectorSize
;
j
++
)
{
for
(
i
=
0
;
i
<
eccSectorSize
;
i
++
)
read_data_l
[
eccSectorSize
*
j
+
i
]
=
page_main_spare
[
(
eccSectorSize
+
eccBytes
)
*
j
+
i
];
for
(
i
=
0
;
i
<
eccBytes
;
i
++
)
read_data_l
[
PageDataSize
+
spareFlagBytes
+
eccBytes
*
j
+
i
]
=
page_main_spare
[
(
eccSectorSize
+
eccBytes
)
*
j
+
eccSectorSize
+
i
];
}
for
(
i
=
0
;
i
<
spareFlagBytes
;
i
++
)
read_data_l
[
PageDataSize
+
i
]
=
page_main_spare
[(
eccSectorSize
+
eccBytes
)
*
j
+
i
];
}
else
{
for
(
i
=
0
;
i
<
(
PageDataSize
+
PageSpareSize
);
i
++
)
read_data_l
[
i
]
=
page_main_spare
[
i
];
}
if
(
enable_ecc
)
{
while
(
!
(
ioread32
(
FlashReg
+
intr_status
)
&
(
INTR_STATUS0__ECC_TRANSACTION_DONE
|
INTR_STATUS0__ECC_ERR
)))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_ERR
)
{
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
status
=
do_ecc_new
(
flash_bank
,
read_data
,
block
,
page
);
}
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_TRANSACTION_DONE
&
INTR_STATUS0__ECC_ERR
)
{
iowrite32
(
INTR_STATUS0__ECC_ERR
|
INTR_STATUS0__ECC_TRANSACTION_DONE
,
FlashReg
+
intr_status
);
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_TRANSACTION_DONE
)
{
iowrite32
(
INTR_STATUS0__ECC_TRANSACTION_DONE
,
FlashReg
+
intr_status
);
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_ERR
)
{
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
}
}
page
++
;
page_count
--
;
read_data_l
+=
PageSize
;
}
}
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x42
);
return
status
;
}
u16
NAND_Pipeline_Write_Ahead
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u16
status
=
PASS
;
u32
NumPages
=
page_count
;
u64
flash_add
;
u32
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
int
ret
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
if
(
page_count
<
2
)
status
=
FAIL
;
if
(
status
!=
PASS
)
return
status
;
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
/* Fill the mrst_nand_info structure */
info
.
state
=
INT_PIPELINE_WRITE_AHEAD
;
info
.
write_data
=
write_data
;
info
.
flash_bank
=
flash_bank
;
info
.
block
=
block
;
info
.
page
=
page
;
info
.
ret
=
PASS
;
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x42
);
ddma_trans
(
write_data
,
flash_add
,
flash_bank
,
1
,
NumPages
);
iowrite32
(
1
,
FlashReg
+
GLOBAL_INT_ENABLE
);
/* Enable interrupt */
ret
=
wait_for_completion_timeout
(
&
info
.
complete
,
10
*
HZ
);
if
(
!
ret
)
{
printk
(
KERN_ERR
"Wait for completion timeout "
"in %s, Line %d
\n
"
,
__FILE__
,
__LINE__
);
status
=
ERR
;
}
else
{
status
=
info
.
ret
;
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
return
status
;
}
/* Un-tested function */
u16
NAND_Multiplane_Write
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
)
{
u16
status
=
PASS
;
u32
NumPages
=
page_count
;
u64
flash_add
;
u32
flash_bank
;
u32
intr_status
=
0
;
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u16
status2
=
PASS
;
u32
t
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
status
=
Boundary_Check_Block_Page
(
block
,
page
,
page_count
);
if
(
status
!=
PASS
)
return
status
;
flash_add
=
(
u64
)(
block
%
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
))
*
DeviceInfo
.
wBlockDataSize
+
(
u64
)
page
*
DeviceInfo
.
wPageDataSize
;
flash_bank
=
block
/
(
DeviceInfo
.
wTotalBlocks
/
totalUsedBanks
);
intr_status
=
intr_status_addresses
[
flash_bank
];
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
iowrite32
(
0x01
,
FlashReg
+
MULTIPLANE_OPERATION
);
iowrite32
(
1
,
FlashReg
+
DMA_ENABLE
);
while
(
!
(
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
TRANSFER_SPARE_REG
);
index_addr
((
u32
)(
MODE_10
|
(
flash_bank
<<
24
)
|
(
flash_add
>>
DeviceInfo
.
nBitsInPageDataSize
)),
0x42
);
ddma_trans
(
write_data
,
flash_add
,
flash_bank
,
1
,
NumPages
);
while
(
1
)
{
while
(
!
ioread32
(
FlashReg
+
intr_status
))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
)
{
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
status
=
PASS
;
if
(
status2
==
FAIL
)
status
=
FAIL
;
break
;
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__PROGRAM_FAIL
)
{
status2
=
FAIL
;
status
=
FAIL
;
t
=
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__PROGRAM_FAIL
;
iowrite32
(
t
,
FlashReg
+
intr_status
);
}
else
{
iowrite32
((
~
INTR_STATUS0__PROGRAM_FAIL
)
&
(
~
INTR_STATUS0__DMA_CMD_COMP
),
FlashReg
+
intr_status
);
}
}
iowrite32
(
ioread32
(
FlashReg
+
intr_status
),
FlashReg
+
intr_status
);
iowrite32
(
0
,
FlashReg
+
DMA_ENABLE
);
while
((
ioread32
(
FlashReg
+
DMA_ENABLE
)
&
DMA_ENABLE__FLAG
))
;
iowrite32
(
0
,
FlashReg
+
MULTIPLANE_OPERATION
);
return
status
;
}
#if CMD_DMA
static
irqreturn_t
cdma_isr
(
int
irq
,
void
*
dev_id
)
{
struct
mrst_nand_info
*
dev
=
dev_id
;
int
first_failed_cmd
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
if
(
!
is_cdma_interrupt
())
return
IRQ_NONE
;
/* Disable controller interrupts */
iowrite32
(
0
,
FlashReg
+
GLOBAL_INT_ENABLE
);
GLOB_FTL_Event_Status
(
&
first_failed_cmd
);
complete
(
&
dev
->
complete
);
return
IRQ_HANDLED
;
}
#else
static
void
handle_nand_int_read
(
struct
mrst_nand_info
*
dev
)
{
u32
intr_status_addresses
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
u32
intr_status
;
u32
ecc_done_OR_dma_comp
=
0
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
dev
->
ret
=
PASS
;
intr_status
=
intr_status_addresses
[
dev
->
flash_bank
];
while
(
1
)
{
if
(
enable_ecc
)
{
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_ERR
)
{
iowrite32
(
INTR_STATUS0__ECC_ERR
,
FlashReg
+
intr_status
);
dev
->
ret
=
do_ecc_new
(
dev
->
flash_bank
,
dev
->
read_data
,
dev
->
block
,
dev
->
page
);
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
)
{
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
if
(
1
==
ecc_done_OR_dma_comp
)
break
;
ecc_done_OR_dma_comp
=
1
;
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__ECC_TRANSACTION_DONE
)
{
iowrite32
(
INTR_STATUS0__ECC_TRANSACTION_DONE
,
FlashReg
+
intr_status
);
if
(
1
==
ecc_done_OR_dma_comp
)
break
;
ecc_done_OR_dma_comp
=
1
;
}
}
else
{
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
)
{
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
break
;
}
else
{
printk
(
KERN_ERR
"Illegal INTS "
"(offset addr 0x%x) value: 0x%x
\n
"
,
intr_status
,
ioread32
(
FlashReg
+
intr_status
));
}
}
iowrite32
((
~
INTR_STATUS0__ECC_ERR
)
&
(
~
INTR_STATUS0__ECC_TRANSACTION_DONE
)
&
(
~
INTR_STATUS0__DMA_CMD_COMP
),
FlashReg
+
intr_status
);
}
}
static
void
handle_nand_int_write
(
struct
mrst_nand_info
*
dev
)
{
u32
intr_status
;
u32
intr
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
int
status
=
PASS
;
nand_dbg_print
(
NAND_DBG_DEBUG
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
dev
->
ret
=
PASS
;
intr_status
=
intr
[
dev
->
flash_bank
];
while
(
1
)
{
while
(
!
ioread32
(
FlashReg
+
intr_status
))
;
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__DMA_CMD_COMP
)
{
iowrite32
(
INTR_STATUS0__DMA_CMD_COMP
,
FlashReg
+
intr_status
);
if
(
FAIL
==
status
)
dev
->
ret
=
FAIL
;
break
;
}
else
if
(
ioread32
(
FlashReg
+
intr_status
)
&
INTR_STATUS0__PROGRAM_FAIL
)
{
status
=
FAIL
;
iowrite32
(
INTR_STATUS0__PROGRAM_FAIL
,
FlashReg
+
intr_status
);
}
else
{
iowrite32
((
~
INTR_STATUS0__PROGRAM_FAIL
)
&
(
~
INTR_STATUS0__DMA_CMD_COMP
),
FlashReg
+
intr_status
);
}
}
}
static
irqreturn_t
ddma_isr
(
int
irq
,
void
*
dev_id
)
{
struct
mrst_nand_info
*
dev
=
dev_id
;
u32
int_mask
,
ints0
,
ints1
,
ints2
,
ints3
,
ints_offset
;
u32
intr
[
4
]
=
{
INTR_STATUS0
,
INTR_STATUS1
,
INTR_STATUS2
,
INTR_STATUS3
};
int_mask
=
INTR_STATUS0__DMA_CMD_COMP
|
INTR_STATUS0__ECC_TRANSACTION_DONE
|
INTR_STATUS0__ECC_ERR
|
INTR_STATUS0__PROGRAM_FAIL
|
INTR_STATUS0__ERASE_FAIL
;
ints0
=
ioread32
(
FlashReg
+
INTR_STATUS0
);
ints1
=
ioread32
(
FlashReg
+
INTR_STATUS1
);
ints2
=
ioread32
(
FlashReg
+
INTR_STATUS2
);
ints3
=
ioread32
(
FlashReg
+
INTR_STATUS3
);
ints_offset
=
intr
[
dev
->
flash_bank
];
nand_dbg_print
(
NAND_DBG_DEBUG
,
"INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
"DMA_INTR: 0x%x, "
"dev->state: 0x%x, dev->flash_bank: %d
\n
"
,
ints0
,
ints1
,
ints2
,
ints3
,
ioread32
(
FlashReg
+
DMA_INTR
),
dev
->
state
,
dev
->
flash_bank
);
if
(
!
(
ioread32
(
FlashReg
+
ints_offset
)
&
int_mask
))
{
iowrite32
(
ints0
,
FlashReg
+
INTR_STATUS0
);
iowrite32
(
ints1
,
FlashReg
+
INTR_STATUS1
);
iowrite32
(
ints2
,
FlashReg
+
INTR_STATUS2
);
iowrite32
(
ints3
,
FlashReg
+
INTR_STATUS3
);
nand_dbg_print
(
NAND_DBG_WARN
,
"ddma_isr: Invalid interrupt for NAND controller. "
"Ignore it
\n
"
);
return
IRQ_NONE
;
}
switch
(
dev
->
state
)
{
case
INT_READ_PAGE_MAIN
:
case
INT_PIPELINE_READ_AHEAD
:
/* Disable controller interrupts */
iowrite32
(
0
,
FlashReg
+
GLOBAL_INT_ENABLE
);
handle_nand_int_read
(
dev
);
break
;
case
INT_WRITE_PAGE_MAIN
:
case
INT_PIPELINE_WRITE_AHEAD
:
iowrite32
(
0
,
FlashReg
+
GLOBAL_INT_ENABLE
);
handle_nand_int_write
(
dev
);
break
;
default:
printk
(
KERN_ERR
"ddma_isr - Illegal state: 0x%x
\n
"
,
dev
->
state
);
return
IRQ_NONE
;
}
dev
->
state
=
INT_IDLE_STATE
;
complete
(
&
dev
->
complete
);
return
IRQ_HANDLED
;
}
#endif
static
const
struct
pci_device_id
nand_pci_ids
[]
=
{
{
.
vendor
=
0x8086
,
.
device
=
0x0809
,
.
subvendor
=
PCI_ANY_ID
,
.
subdevice
=
PCI_ANY_ID
,
},
{
/* end: all zeroes */
}
};
static
int
nand_pci_probe
(
struct
pci_dev
*
dev
,
const
struct
pci_device_id
*
id
)
{
int
ret
=
-
ENODEV
;
unsigned
long
csr_base
;
unsigned
long
csr_len
;
struct
mrst_nand_info
*
pndev
=
&
info
;
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
ret
=
pci_enable_device
(
dev
);
if
(
ret
)
{
printk
(
KERN_ERR
"Spectra: pci_enable_device failed.
\n
"
);
return
ret
;
}
pci_set_master
(
dev
);
pndev
->
dev
=
dev
;
csr_base
=
pci_resource_start
(
dev
,
0
);
if
(
!
csr_base
)
{
printk
(
KERN_ERR
"Spectra: pci_resource_start failed!
\n
"
);
return
-
ENODEV
;
}
csr_len
=
pci_resource_len
(
dev
,
0
);
if
(
!
csr_len
)
{
printk
(
KERN_ERR
"Spectra: pci_resource_len failed!
\n
"
);
return
-
ENODEV
;
}
ret
=
pci_request_regions
(
dev
,
SPECTRA_NAND_NAME
);
if
(
ret
)
{
printk
(
KERN_ERR
"Spectra: Unable to request "
"memory region
\n
"
);
goto
failed_req_csr
;
}
pndev
->
ioaddr
=
ioremap_nocache
(
csr_base
,
csr_len
);
if
(
!
pndev
->
ioaddr
)
{
printk
(
KERN_ERR
"Spectra: Unable to remap memory region
\n
"
);
ret
=
-
ENOMEM
;
goto
failed_remap_csr
;
}
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra: CSR 0x%08lx -> 0x%p (0x%lx)
\n
"
,
csr_base
,
pndev
->
ioaddr
,
csr_len
);
init_completion
(
&
pndev
->
complete
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Spectra: IRQ %d
\n
"
,
dev
->
irq
);
#if CMD_DMA
if
(
request_irq
(
dev
->
irq
,
cdma_isr
,
IRQF_SHARED
,
SPECTRA_NAND_NAME
,
&
info
))
{
printk
(
KERN_ERR
"Spectra: Unable to allocate IRQ
\n
"
);
ret
=
-
ENODEV
;
iounmap
(
pndev
->
ioaddr
);
goto
failed_remap_csr
;
}
#else
if
(
request_irq
(
dev
->
irq
,
ddma_isr
,
IRQF_SHARED
,
SPECTRA_NAND_NAME
,
&
info
))
{
printk
(
KERN_ERR
"Spectra: Unable to allocate IRQ
\n
"
);
ret
=
-
ENODEV
;
iounmap
(
pndev
->
ioaddr
);
goto
failed_remap_csr
;
}
#endif
pci_set_drvdata
(
dev
,
pndev
);
return
0
;
failed_remap_csr:
pci_release_regions
(
dev
);
failed_req_csr:
return
ret
;
}
static
void
nand_pci_remove
(
struct
pci_dev
*
dev
)
{
struct
mrst_nand_info
*
pndev
=
pci_get_drvdata
(
dev
);
nand_dbg_print
(
NAND_DBG_WARN
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
#if CMD_DMA
free_irq
(
dev
->
irq
,
pndev
);
#endif
iounmap
(
pndev
->
ioaddr
);
pci_release_regions
(
dev
);
pci_disable_device
(
dev
);
}
MODULE_DEVICE_TABLE
(
pci
,
nand_pci_ids
);
static
struct
pci_driver
nand_pci_driver
=
{
.
name
=
SPECTRA_NAND_NAME
,
.
id_table
=
nand_pci_ids
,
.
probe
=
nand_pci_probe
,
.
remove
=
nand_pci_remove
,
};
int
NAND_Flash_Init
(
void
)
{
int
retval
;
u32
int_mask
;
nand_dbg_print
(
NAND_DBG_TRACE
,
"%s, Line %d, Function: %s
\n
"
,
__FILE__
,
__LINE__
,
__func__
);
FlashReg
=
ioremap_nocache
(
GLOB_HWCTL_REG_BASE
,
GLOB_HWCTL_REG_SIZE
);
if
(
!
FlashReg
)
{
printk
(
KERN_ERR
"Spectra: ioremap_nocache failed!"
);
return
-
ENOMEM
;
}
nand_dbg_print
(
NAND_DBG_WARN
,
"Spectra: Remapped reg base address: "
"0x%p, len: %d
\n
"
,
FlashReg
,
GLOB_HWCTL_REG_SIZE
);
FlashMem
=
ioremap_nocache
(
GLOB_HWCTL_MEM_BASE
,
GLOB_HWCTL_MEM_SIZE
);
if
(
!
FlashMem
)
{
printk
(
KERN_ERR
"Spectra: ioremap_nocache failed!"
);
iounmap
(
FlashReg
);
return
-
ENOMEM
;
}
nand_dbg_print
(
NAND_DBG_WARN
,
"Spectra: Remapped flash base address: "
"0x%p, len: %d
\n
"
,
(
void
*
)
FlashMem
,
GLOB_HWCTL_MEM_SIZE
);
nand_dbg_print
(
NAND_DBG_DEBUG
,
"Dump timing register values:"
"acc_clks: %d, re_2_we: %d, we_2_re: %d,"
"addr_2_data: %d, rdwr_en_lo_cnt: %d, "
"rdwr_en_hi_cnt: %d, cs_setup_cnt: %d
\n
"
,
ioread32
(
FlashReg
+
ACC_CLKS
),
ioread32
(
FlashReg
+
RE_2_WE
),
ioread32
(
FlashReg
+
WE_2_RE
),
ioread32
(
FlashReg
+
ADDR_2_DATA
),
ioread32
(
FlashReg
+
RDWR_EN_LO_CNT
),
ioread32
(
FlashReg
+
RDWR_EN_HI_CNT
),
ioread32
(
FlashReg
+
CS_SETUP_CNT
));
NAND_Flash_Reset
();
iowrite32
(
0
,
FlashReg
+
GLOBAL_INT_ENABLE
);
#if CMD_DMA
info
.
pcmds_num
=
0
;
info
.
flash_bank
=
0
;
info
.
cdma_num
=
0
;
int_mask
=
(
DMA_INTR__DESC_COMP_CHANNEL0
|
DMA_INTR__DESC_COMP_CHANNEL1
|
DMA_INTR__DESC_COMP_CHANNEL2
|
DMA_INTR__DESC_COMP_CHANNEL3
|
DMA_INTR__MEMCOPY_DESC_COMP
);
iowrite32
(
int_mask
,
FlashReg
+
DMA_INTR_EN
);
iowrite32
(
0xFFFF
,
FlashReg
+
DMA_INTR
);
int_mask
=
(
INTR_STATUS0__ECC_ERR
|
INTR_STATUS0__PROGRAM_FAIL
|
INTR_STATUS0__ERASE_FAIL
);
#else
int_mask
=
INTR_STATUS0__DMA_CMD_COMP
|
INTR_STATUS0__ECC_TRANSACTION_DONE
|
INTR_STATUS0__ECC_ERR
|
INTR_STATUS0__PROGRAM_FAIL
|
INTR_STATUS0__ERASE_FAIL
;
#endif
iowrite32
(
int_mask
,
FlashReg
+
INTR_EN0
);
iowrite32
(
int_mask
,
FlashReg
+
INTR_EN1
);
iowrite32
(
int_mask
,
FlashReg
+
INTR_EN2
);
iowrite32
(
int_mask
,
FlashReg
+
INTR_EN3
);
/* Clear all status bits */
iowrite32
(
0xFFFF
,
FlashReg
+
INTR_STATUS0
);
iowrite32
(
0xFFFF
,
FlashReg
+
INTR_STATUS1
);
iowrite32
(
0xFFFF
,
FlashReg
+
INTR_STATUS2
);
iowrite32
(
0xFFFF
,
FlashReg
+
INTR_STATUS3
);
iowrite32
(
0x0F
,
FlashReg
+
RB_PIN_ENABLED
);
iowrite32
(
CHIP_EN_DONT_CARE__FLAG
,
FlashReg
+
CHIP_ENABLE_DONT_CARE
);
/* Should set value for these registers when init */
iowrite32
(
0
,
FlashReg
+
TWO_ROW_ADDR_CYCLES
);
iowrite32
(
1
,
FlashReg
+
ECC_ENABLE
);
enable_ecc
=
1
;
retval
=
pci_register_driver
(
&
nand_pci_driver
);
if
(
retval
)
return
-
ENOMEM
;
return
PASS
;
}
/* Free memory */
int
nand_release_spectra
(
void
)
{
pci_unregister_driver
(
&
nand_pci_driver
);
iounmap
(
FlashMem
);
iounmap
(
FlashReg
);
return
0
;
}
drivers/staging/spectra/lld_nand.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _LLD_NAND_
#define _LLD_NAND_
#ifdef ELDORA
#include "defs.h"
#else
#include "flash.h"
#include "ffsport.h"
#endif
#define MODE_00 0x00000000
#define MODE_01 0x04000000
#define MODE_10 0x08000000
#define MODE_11 0x0C000000
#define DATA_TRANSFER_MODE 0
#define PROTECTION_PER_BLOCK 1
#define LOAD_WAIT_COUNT 2
#define PROGRAM_WAIT_COUNT 3
#define ERASE_WAIT_COUNT 4
#define INT_MONITOR_CYCLE_COUNT 5
#define READ_BUSY_PIN_ENABLED 6
#define MULTIPLANE_OPERATION_SUPPORT 7
#define PRE_FETCH_MODE 8
#define CE_DONT_CARE_SUPPORT 9
#define COPYBACK_SUPPORT 10
#define CACHE_WRITE_SUPPORT 11
#define CACHE_READ_SUPPORT 12
#define NUM_PAGES_IN_BLOCK 13
#define ECC_ENABLE_SELECT 14
#define WRITE_ENABLE_2_READ_ENABLE 15
#define ADDRESS_2_DATA 16
#define READ_ENABLE_2_WRITE_ENABLE 17
#define TWO_ROW_ADDRESS_CYCLES 18
#define MULTIPLANE_ADDRESS_RESTRICT 19
#define ACC_CLOCKS 20
#define READ_WRITE_ENABLE_LOW_COUNT 21
#define READ_WRITE_ENABLE_HIGH_COUNT 22
#define ECC_SECTOR_SIZE 512
#define LLD_MAX_FLASH_BANKS 4
struct
mrst_nand_info
{
struct
pci_dev
*
dev
;
u32
state
;
u32
flash_bank
;
u8
*
read_data
;
u8
*
write_data
;
u32
block
;
u16
page
;
u32
use_dma
;
void
__iomem
*
ioaddr
;
/* Mapped io reg base address */
int
ret
;
u32
pcmds_num
;
struct
pending_cmd
*
pcmds
;
int
cdma_num
;
/* CDMA descriptor number in this chan */
u8
*
cdma_desc_buf
;
/* CDMA descriptor table */
u8
*
memcp_desc_buf
;
/* Memory copy descriptor table */
dma_addr_t
cdma_desc
;
/* Mapped CDMA descriptor table */
dma_addr_t
memcp_desc
;
/* Mapped memory copy descriptor table */
struct
completion
complete
;
};
int
NAND_Flash_Init
(
void
);
int
nand_release_spectra
(
void
);
u16
NAND_Flash_Reset
(
void
);
u16
NAND_Read_Device_ID
(
void
);
u16
NAND_Erase_Block
(
u32
flash_add
);
u16
NAND_Write_Page_Main
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Read_Page_Main
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_UnlockArrayAll
(
void
);
u16
NAND_Write_Page_Main_Spare
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Write_Page_Spare
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Read_Page_Main_Spare
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Read_Page_Spare
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
void
NAND_LLD_Enable_Disable_Interrupts
(
u16
INT_ENABLE
);
u16
NAND_Get_Bad_Block
(
u32
block
);
u16
NAND_Pipeline_Read_Ahead
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Pipeline_Write_Ahead
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Multiplane_Read
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Multiplane_Write
(
u8
*
write_data
,
u32
block
,
u16
page
,
u16
page_count
);
void
NAND_ECC_Ctrl
(
int
enable
);
u16
NAND_Read_Page_Main_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
u16
NAND_Pipeline_Read_Ahead_Polling
(
u8
*
read_data
,
u32
block
,
u16
page
,
u16
page_count
);
void
Conv_Spare_Data_Log2Phy_Format
(
u8
*
data
);
void
Conv_Spare_Data_Phy2Log_Format
(
u8
*
data
);
void
Conv_Main_Spare_Data_Log2Phy_Format
(
u8
*
data
,
u16
page_count
);
void
Conv_Main_Spare_Data_Phy2Log_Format
(
u8
*
data
,
u16
page_count
);
extern
void
__iomem
*
FlashReg
;
extern
void
__iomem
*
FlashMem
;
extern
int
totalUsedBanks
;
extern
u32
GLOB_valid_banks
[
LLD_MAX_FLASH_BANKS
];
#endif
/*_LLD_NAND_*/
drivers/staging/spectra/nand_regs.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#define DEVICE_RESET 0x0
#define DEVICE_RESET__BANK0 0x0001
#define DEVICE_RESET__BANK1 0x0002
#define DEVICE_RESET__BANK2 0x0004
#define DEVICE_RESET__BANK3 0x0008
#define TRANSFER_SPARE_REG 0x10
#define TRANSFER_SPARE_REG__FLAG 0x0001
#define LOAD_WAIT_CNT 0x20
#define LOAD_WAIT_CNT__VALUE 0xffff
#define PROGRAM_WAIT_CNT 0x30
#define PROGRAM_WAIT_CNT__VALUE 0xffff
#define ERASE_WAIT_CNT 0x40
#define ERASE_WAIT_CNT__VALUE 0xffff
#define INT_MON_CYCCNT 0x50
#define INT_MON_CYCCNT__VALUE 0xffff
#define RB_PIN_ENABLED 0x60
#define RB_PIN_ENABLED__BANK0 0x0001
#define RB_PIN_ENABLED__BANK1 0x0002
#define RB_PIN_ENABLED__BANK2 0x0004
#define RB_PIN_ENABLED__BANK3 0x0008
#define MULTIPLANE_OPERATION 0x70
#define MULTIPLANE_OPERATION__FLAG 0x0001
#define MULTIPLANE_READ_ENABLE 0x80
#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
#define COPYBACK_DISABLE 0x90
#define COPYBACK_DISABLE__FLAG 0x0001
#define CACHE_WRITE_ENABLE 0xa0
#define CACHE_WRITE_ENABLE__FLAG 0x0001
#define CACHE_READ_ENABLE 0xb0
#define CACHE_READ_ENABLE__FLAG 0x0001
#define PREFETCH_MODE 0xc0
#define PREFETCH_MODE__PREFETCH_EN 0x0001
#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
#define CHIP_ENABLE_DONT_CARE 0xd0
#define CHIP_EN_DONT_CARE__FLAG 0x01
#define ECC_ENABLE 0xe0
#define ECC_ENABLE__FLAG 0x0001
#define GLOBAL_INT_ENABLE 0xf0
#define GLOBAL_INT_EN_FLAG 0x01
#define WE_2_RE 0x100
#define WE_2_RE__VALUE 0x003f
#define ADDR_2_DATA 0x110
#define ADDR_2_DATA__VALUE 0x003f
#define RE_2_WE 0x120
#define RE_2_WE__VALUE 0x003f
#define ACC_CLKS 0x130
#define ACC_CLKS__VALUE 0x000f
#define NUMBER_OF_PLANES 0x140
#define NUMBER_OF_PLANES__VALUE 0x0007
#define PAGES_PER_BLOCK 0x150
#define PAGES_PER_BLOCK__VALUE 0xffff
#define DEVICE_WIDTH 0x160
#define DEVICE_WIDTH__VALUE 0x0003
#define DEVICE_MAIN_AREA_SIZE 0x170
#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
#define DEVICE_SPARE_AREA_SIZE 0x180
#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
#define TWO_ROW_ADDR_CYCLES 0x190
#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
#define MULTIPLANE_ADDR_RESTRICT 0x1a0
#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
#define ECC_CORRECTION 0x1b0
#define ECC_CORRECTION__VALUE 0x001f
#define READ_MODE 0x1c0
#define READ_MODE__VALUE 0x000f
#define WRITE_MODE 0x1d0
#define WRITE_MODE__VALUE 0x000f
#define COPYBACK_MODE 0x1e0
#define COPYBACK_MODE__VALUE 0x000f
#define RDWR_EN_LO_CNT 0x1f0
#define RDWR_EN_LO_CNT__VALUE 0x001f
#define RDWR_EN_HI_CNT 0x200
#define RDWR_EN_HI_CNT__VALUE 0x001f
#define MAX_RD_DELAY 0x210
#define MAX_RD_DELAY__VALUE 0x000f
#define CS_SETUP_CNT 0x220
#define CS_SETUP_CNT__VALUE 0x001f
#define SPARE_AREA_SKIP_BYTES 0x230
#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
#define SPARE_AREA_MARKER 0x240
#define SPARE_AREA_MARKER__VALUE 0xffff
#define DEVICES_CONNECTED 0x250
#define DEVICES_CONNECTED__VALUE 0x0007
#define DIE_MASK 0x260
#define DIE_MASK__VALUE 0x00ff
#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
#define WRITE_PROTECT 0x280
#define WRITE_PROTECT__FLAG 0x0001
#define RE_2_RE 0x290
#define RE_2_RE__VALUE 0x003f
#define MANUFACTURER_ID 0x300
#define MANUFACTURER_ID__VALUE 0x00ff
#define DEVICE_ID 0x310
#define DEVICE_ID__VALUE 0x00ff
#define DEVICE_PARAM_0 0x320
#define DEVICE_PARAM_0__VALUE 0x00ff
#define DEVICE_PARAM_1 0x330
#define DEVICE_PARAM_1__VALUE 0x00ff
#define DEVICE_PARAM_2 0x340
#define DEVICE_PARAM_2__VALUE 0x00ff
#define LOGICAL_PAGE_DATA_SIZE 0x350
#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
#define LOGICAL_PAGE_SPARE_SIZE 0x360
#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
#define REVISION 0x370
#define REVISION__VALUE 0xffff
#define ONFI_DEVICE_FEATURES 0x380
#define ONFI_DEVICE_FEATURES__VALUE 0x003f
#define ONFI_OPTIONAL_COMMANDS 0x390
#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
#define ONFI_TIMING_MODE 0x3a0
#define ONFI_TIMING_MODE__VALUE 0x003f
#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
#define FEATURES 0x3f0
#define FEATURES__N_BANKS 0x0003
#define FEATURES__ECC_MAX_ERR 0x003c
#define FEATURES__DMA 0x0040
#define FEATURES__CMD_DMA 0x0080
#define FEATURES__PARTITION 0x0100
#define FEATURES__XDMA_SIDEBAND 0x0200
#define FEATURES__GPREG 0x0400
#define FEATURES__INDEX_ADDR 0x0800
#define TRANSFER_MODE 0x400
#define TRANSFER_MODE__VALUE 0x0003
#define INTR_STATUS0 0x410
#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS0__ECC_ERR 0x0002
#define INTR_STATUS0__DMA_CMD_COMP 0x0004
#define INTR_STATUS0__TIME_OUT 0x0008
#define INTR_STATUS0__PROGRAM_FAIL 0x0010
#define INTR_STATUS0__ERASE_FAIL 0x0020
#define INTR_STATUS0__LOAD_COMP 0x0040
#define INTR_STATUS0__PROGRAM_COMP 0x0080
#define INTR_STATUS0__ERASE_COMP 0x0100
#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS0__LOCKED_BLK 0x0400
#define INTR_STATUS0__UNSUP_CMD 0x0800
#define INTR_STATUS0__INT_ACT 0x1000
#define INTR_STATUS0__RST_COMP 0x2000
#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
#define INTR_STATUS0__PAGE_XFER_INC 0x8000
#define INTR_EN0 0x420
#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN0__ECC_ERR 0x0002
#define INTR_EN0__DMA_CMD_COMP 0x0004
#define INTR_EN0__TIME_OUT 0x0008
#define INTR_EN0__PROGRAM_FAIL 0x0010
#define INTR_EN0__ERASE_FAIL 0x0020
#define INTR_EN0__LOAD_COMP 0x0040
#define INTR_EN0__PROGRAM_COMP 0x0080
#define INTR_EN0__ERASE_COMP 0x0100
#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN0__LOCKED_BLK 0x0400
#define INTR_EN0__UNSUP_CMD 0x0800
#define INTR_EN0__INT_ACT 0x1000
#define INTR_EN0__RST_COMP 0x2000
#define INTR_EN0__PIPE_CMD_ERR 0x4000
#define INTR_EN0__PAGE_XFER_INC 0x8000
#define PAGE_CNT0 0x430
#define PAGE_CNT0__VALUE 0x00ff
#define ERR_PAGE_ADDR0 0x440
#define ERR_PAGE_ADDR0__VALUE 0xffff
#define ERR_BLOCK_ADDR0 0x450
#define ERR_BLOCK_ADDR0__VALUE 0xffff
#define INTR_STATUS1 0x460
#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS1__ECC_ERR 0x0002
#define INTR_STATUS1__DMA_CMD_COMP 0x0004
#define INTR_STATUS1__TIME_OUT 0x0008
#define INTR_STATUS1__PROGRAM_FAIL 0x0010
#define INTR_STATUS1__ERASE_FAIL 0x0020
#define INTR_STATUS1__LOAD_COMP 0x0040
#define INTR_STATUS1__PROGRAM_COMP 0x0080
#define INTR_STATUS1__ERASE_COMP 0x0100
#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS1__LOCKED_BLK 0x0400
#define INTR_STATUS1__UNSUP_CMD 0x0800
#define INTR_STATUS1__INT_ACT 0x1000
#define INTR_STATUS1__RST_COMP 0x2000
#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
#define INTR_STATUS1__PAGE_XFER_INC 0x8000
#define INTR_EN1 0x470
#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN1__ECC_ERR 0x0002
#define INTR_EN1__DMA_CMD_COMP 0x0004
#define INTR_EN1__TIME_OUT 0x0008
#define INTR_EN1__PROGRAM_FAIL 0x0010
#define INTR_EN1__ERASE_FAIL 0x0020
#define INTR_EN1__LOAD_COMP 0x0040
#define INTR_EN1__PROGRAM_COMP 0x0080
#define INTR_EN1__ERASE_COMP 0x0100
#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN1__LOCKED_BLK 0x0400
#define INTR_EN1__UNSUP_CMD 0x0800
#define INTR_EN1__INT_ACT 0x1000
#define INTR_EN1__RST_COMP 0x2000
#define INTR_EN1__PIPE_CMD_ERR 0x4000
#define INTR_EN1__PAGE_XFER_INC 0x8000
#define PAGE_CNT1 0x480
#define PAGE_CNT1__VALUE 0x00ff
#define ERR_PAGE_ADDR1 0x490
#define ERR_PAGE_ADDR1__VALUE 0xffff
#define ERR_BLOCK_ADDR1 0x4a0
#define ERR_BLOCK_ADDR1__VALUE 0xffff
#define INTR_STATUS2 0x4b0
#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS2__ECC_ERR 0x0002
#define INTR_STATUS2__DMA_CMD_COMP 0x0004
#define INTR_STATUS2__TIME_OUT 0x0008
#define INTR_STATUS2__PROGRAM_FAIL 0x0010
#define INTR_STATUS2__ERASE_FAIL 0x0020
#define INTR_STATUS2__LOAD_COMP 0x0040
#define INTR_STATUS2__PROGRAM_COMP 0x0080
#define INTR_STATUS2__ERASE_COMP 0x0100
#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS2__LOCKED_BLK 0x0400
#define INTR_STATUS2__UNSUP_CMD 0x0800
#define INTR_STATUS2__INT_ACT 0x1000
#define INTR_STATUS2__RST_COMP 0x2000
#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
#define INTR_STATUS2__PAGE_XFER_INC 0x8000
#define INTR_EN2 0x4c0
#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN2__ECC_ERR 0x0002
#define INTR_EN2__DMA_CMD_COMP 0x0004
#define INTR_EN2__TIME_OUT 0x0008
#define INTR_EN2__PROGRAM_FAIL 0x0010
#define INTR_EN2__ERASE_FAIL 0x0020
#define INTR_EN2__LOAD_COMP 0x0040
#define INTR_EN2__PROGRAM_COMP 0x0080
#define INTR_EN2__ERASE_COMP 0x0100
#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN2__LOCKED_BLK 0x0400
#define INTR_EN2__UNSUP_CMD 0x0800
#define INTR_EN2__INT_ACT 0x1000
#define INTR_EN2__RST_COMP 0x2000
#define INTR_EN2__PIPE_CMD_ERR 0x4000
#define INTR_EN2__PAGE_XFER_INC 0x8000
#define PAGE_CNT2 0x4d0
#define PAGE_CNT2__VALUE 0x00ff
#define ERR_PAGE_ADDR2 0x4e0
#define ERR_PAGE_ADDR2__VALUE 0xffff
#define ERR_BLOCK_ADDR2 0x4f0
#define ERR_BLOCK_ADDR2__VALUE 0xffff
#define INTR_STATUS3 0x500
#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
#define INTR_STATUS3__ECC_ERR 0x0002
#define INTR_STATUS3__DMA_CMD_COMP 0x0004
#define INTR_STATUS3__TIME_OUT 0x0008
#define INTR_STATUS3__PROGRAM_FAIL 0x0010
#define INTR_STATUS3__ERASE_FAIL 0x0020
#define INTR_STATUS3__LOAD_COMP 0x0040
#define INTR_STATUS3__PROGRAM_COMP 0x0080
#define INTR_STATUS3__ERASE_COMP 0x0100
#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_STATUS3__LOCKED_BLK 0x0400
#define INTR_STATUS3__UNSUP_CMD 0x0800
#define INTR_STATUS3__INT_ACT 0x1000
#define INTR_STATUS3__RST_COMP 0x2000
#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
#define INTR_STATUS3__PAGE_XFER_INC 0x8000
#define INTR_EN3 0x510
#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
#define INTR_EN3__ECC_ERR 0x0002
#define INTR_EN3__DMA_CMD_COMP 0x0004
#define INTR_EN3__TIME_OUT 0x0008
#define INTR_EN3__PROGRAM_FAIL 0x0010
#define INTR_EN3__ERASE_FAIL 0x0020
#define INTR_EN3__LOAD_COMP 0x0040
#define INTR_EN3__PROGRAM_COMP 0x0080
#define INTR_EN3__ERASE_COMP 0x0100
#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
#define INTR_EN3__LOCKED_BLK 0x0400
#define INTR_EN3__UNSUP_CMD 0x0800
#define INTR_EN3__INT_ACT 0x1000
#define INTR_EN3__RST_COMP 0x2000
#define INTR_EN3__PIPE_CMD_ERR 0x4000
#define INTR_EN3__PAGE_XFER_INC 0x8000
#define PAGE_CNT3 0x520
#define PAGE_CNT3__VALUE 0x00ff
#define ERR_PAGE_ADDR3 0x530
#define ERR_PAGE_ADDR3__VALUE 0xffff
#define ERR_BLOCK_ADDR3 0x540
#define ERR_BLOCK_ADDR3__VALUE 0xffff
#define DATA_INTR 0x550
#define DATA_INTR__WRITE_SPACE_AV 0x0001
#define DATA_INTR__READ_DATA_AV 0x0002
#define DATA_INTR_EN 0x560
#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
#define DATA_INTR_EN__READ_DATA_AV 0x0002
#define GPREG_0 0x570
#define GPREG_0__VALUE 0xffff
#define GPREG_1 0x580
#define GPREG_1__VALUE 0xffff
#define GPREG_2 0x590
#define GPREG_2__VALUE 0xffff
#define GPREG_3 0x5a0
#define GPREG_3__VALUE 0xffff
#define ECC_THRESHOLD 0x600
#define ECC_THRESHOLD__VALUE 0x03ff
#define ECC_ERROR_BLOCK_ADDRESS 0x610
#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
#define ECC_ERROR_PAGE_ADDRESS 0x620
#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
#define ECC_ERROR_ADDRESS 0x630
#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
#define ERR_CORRECTION_INFO 0x640
#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
#define DMA_ENABLE 0x700
#define DMA_ENABLE__FLAG 0x0001
#define IGNORE_ECC_DONE 0x710
#define IGNORE_ECC_DONE__FLAG 0x0001
#define DMA_INTR 0x720
#define DMA_INTR__TARGET_ERROR 0x0001
#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
#define DMA_INTR_EN 0x730
#define DMA_INTR_EN__TARGET_ERROR 0x0001
#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
#define TARGET_ERR_ADDR_LO 0x740
#define TARGET_ERR_ADDR_LO__VALUE 0xffff
#define TARGET_ERR_ADDR_HI 0x750
#define TARGET_ERR_ADDR_HI__VALUE 0xffff
#define CHNL_ACTIVE 0x760
#define CHNL_ACTIVE__CHANNEL0 0x0001
#define CHNL_ACTIVE__CHANNEL1 0x0002
#define CHNL_ACTIVE__CHANNEL2 0x0004
#define CHNL_ACTIVE__CHANNEL3 0x0008
#define ACTIVE_SRC_ID 0x800
#define ACTIVE_SRC_ID__VALUE 0x00ff
#define PTN_INTR 0x810
#define PTN_INTR__CONFIG_ERROR 0x0001
#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
#define PTN_INTR__REG_ACCESS_ERROR 0x0020
#define PTN_INTR_EN 0x820
#define PTN_INTR_EN__CONFIG_ERROR 0x0001
#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
#define PERM_SRC_ID_0 0x830
#define PERM_SRC_ID_0__SRCID 0x00ff
#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_0 0x840
#define MIN_BLK_ADDR_0__VALUE 0xffff
#define MAX_BLK_ADDR_0 0x850
#define MAX_BLK_ADDR_0__VALUE 0xffff
#define MIN_MAX_BANK_0 0x860
#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
#define PERM_SRC_ID_1 0x870
#define PERM_SRC_ID_1__SRCID 0x00ff
#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_1 0x880
#define MIN_BLK_ADDR_1__VALUE 0xffff
#define MAX_BLK_ADDR_1 0x890
#define MAX_BLK_ADDR_1__VALUE 0xffff
#define MIN_MAX_BANK_1 0x8a0
#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
#define PERM_SRC_ID_2 0x8b0
#define PERM_SRC_ID_2__SRCID 0x00ff
#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_2 0x8c0
#define MIN_BLK_ADDR_2__VALUE 0xffff
#define MAX_BLK_ADDR_2 0x8d0
#define MAX_BLK_ADDR_2__VALUE 0xffff
#define MIN_MAX_BANK_2 0x8e0
#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
#define PERM_SRC_ID_3 0x8f0
#define PERM_SRC_ID_3__SRCID 0x00ff
#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_3 0x900
#define MIN_BLK_ADDR_3__VALUE 0xffff
#define MAX_BLK_ADDR_3 0x910
#define MAX_BLK_ADDR_3__VALUE 0xffff
#define MIN_MAX_BANK_3 0x920
#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
#define PERM_SRC_ID_4 0x930
#define PERM_SRC_ID_4__SRCID 0x00ff
#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_4 0x940
#define MIN_BLK_ADDR_4__VALUE 0xffff
#define MAX_BLK_ADDR_4 0x950
#define MAX_BLK_ADDR_4__VALUE 0xffff
#define MIN_MAX_BANK_4 0x960
#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
#define PERM_SRC_ID_5 0x970
#define PERM_SRC_ID_5__SRCID 0x00ff
#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_5 0x980
#define MIN_BLK_ADDR_5__VALUE 0xffff
#define MAX_BLK_ADDR_5 0x990
#define MAX_BLK_ADDR_5__VALUE 0xffff
#define MIN_MAX_BANK_5 0x9a0
#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
#define PERM_SRC_ID_6 0x9b0
#define PERM_SRC_ID_6__SRCID 0x00ff
#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_6 0x9c0
#define MIN_BLK_ADDR_6__VALUE 0xffff
#define MAX_BLK_ADDR_6 0x9d0
#define MAX_BLK_ADDR_6__VALUE 0xffff
#define MIN_MAX_BANK_6 0x9e0
#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
#define PERM_SRC_ID_7 0x9f0
#define PERM_SRC_ID_7__SRCID 0x00ff
#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
#define MIN_BLK_ADDR_7 0xa00
#define MIN_BLK_ADDR_7__VALUE 0xffff
#define MAX_BLK_ADDR_7 0xa10
#define MAX_BLK_ADDR_7__VALUE 0xffff
#define MIN_MAX_BANK_7 0xa20
#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
drivers/staging/spectra/spectraswconfig.h
0 → 100644
View file @
178f16db
/*
* NAND Flash Controller Device Driver
* Copyright (c) 2009, Intel Corporation and its suppliers.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef _SPECTRASWCONFIG_
#define _SPECTRASWCONFIG_
/* NAND driver version */
#define GLOB_VERSION "driver version 20100311"
/***** Common Parameters *****/
#define RETRY_TIMES 3
#define READ_BADBLOCK_INFO 1
#define READBACK_VERIFY 0
#define AUTO_FORMAT_FLASH 0
/***** Cache Parameters *****/
#define CACHE_ITEM_NUM 128
#define BLK_NUM_FOR_L2_CACHE 16
/***** Block Table Parameters *****/
#define BLOCK_TABLE_INDEX 0
/***** Wear Leveling Parameters *****/
#define WEAR_LEVELING_GATE 0x10
#define WEAR_LEVELING_BLOCK_NUM 10
#define DEBUG_BNDRY 0
/***** Product Feature Support *****/
#define FLASH_EMU defined(CONFIG_SPECTRA_EMU)
#define FLASH_NAND defined(CONFIG_SPECTRA_MRST_HW)
#define FLASH_MTD defined(CONFIG_SPECTRA_MTD)
#define CMD_DMA defined(CONFIG_SPECTRA_MRST_HW_DMA)
#define SPECTRA_PARTITION_ID 0
/* Enable this macro if the number of flash blocks is larger than 16K. */
#define SUPPORT_LARGE_BLOCKNUM 1
/**** Block Table and Reserved Block Parameters *****/
#define SPECTRA_START_BLOCK 3
//#define NUM_FREE_BLOCKS_GATE 30
#define NUM_FREE_BLOCKS_GATE 60
/**** Hardware Parameters ****/
#define GLOB_HWCTL_REG_BASE 0xFFA40000
#define GLOB_HWCTL_REG_SIZE 4096
#define GLOB_HWCTL_MEM_BASE 0xFFA48000
#define GLOB_HWCTL_MEM_SIZE 4096
/* KBV - Updated to LNW scratch register address */
#define SCRATCH_REG_ADDR 0xFF108018
#define SCRATCH_REG_SIZE 64
#define GLOB_HWCTL_DEFAULT_BLKS 2048
#define SUPPORT_15BITECC 1
#define SUPPORT_8BITECC 1
#define ONFI_BLOOM_TIME 0
#define MODE5_WORKAROUND 1
#endif
/*_SPECTRASWCONFIG_*/
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment