Commit 6b2a4f7a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (26 commits)
  mmc: SDHI should depend on SUPERH || ARCH_SHMOBILE
  mmc: tmio_mmc: Move some defines into a shared header
  mmc: tmio: support aggressive clock gating
  mmc: tmio: fix power-mode interpretation
  mmc: tmio: remove work-around for unmasked SDIO interrupts
  sh: fix SDHI IO address-range
  ARM: mach-shmobile: fix SDHI IO address-range
  mmc: tmio: only access registers above 0xff, if available
  mfd: remove now redundant sh_mobile_sdhi.h header
  sh: convert boards to use linux/mmc/sh_mobile_sdhi.h
  ARM: mach-shmobile: convert boards to use linux/mmc/sh_mobile_sdhi.h
  mmc: tmio: convert the SDHI MMC driver from MFD to a platform driver
  sh: ecovec: use the CONFIG_MMC_TMIO symbols instead of MFD
  mmc: tmio: split core functionality, DMA and MFD glue
  mmc: tmio: use PIO for short transfers
  mmc: tmio-mmc: Improve DMA stability on sh-mobile
  mmc: fix mmc_app_send_scr() for dma transfer
  mmc: sdhci-esdhc: enable esdhc on imx53
  mmc: sdhci-esdhc: use writel/readl as general APIs
  mmc: sdhci: add the abort CMDTYPE bits definition
  ...
parents eefbab59 022b4835
...@@ -24,9 +24,9 @@ ...@@ -24,9 +24,9 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
...@@ -312,7 +312,7 @@ static struct resource sdhi0_resources[] = { ...@@ -312,7 +312,7 @@ static struct resource sdhi0_resources[] = {
[0] = { [0] = {
.name = "SDHI0", .name = "SDHI0",
.start = 0xe6850000, .start = 0xe6850000,
.end = 0xe68501ff, .end = 0xe68500ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -345,7 +345,7 @@ static struct resource sdhi1_resources[] = { ...@@ -345,7 +345,7 @@ static struct resource sdhi1_resources[] = {
[0] = { [0] = {
.name = "SDHI1", .name = "SDHI1",
.start = 0xe6860000, .start = 0xe6860000,
.end = 0xe68601ff, .end = 0xe68600ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <linux/input.h> #include <linux/input.h>
#include <linux/input/sh_keysc.h> #include <linux/input/sh_keysc.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mfd/sh_mobile_sdhi.h> #include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/gpio.h> #include <linux/gpio.h>
#include <mach/sh7377.h> #include <mach/sh7377.h>
#include <mach/common.h> #include <mach/common.h>
...@@ -205,7 +205,7 @@ static struct resource sdhi0_resources[] = { ...@@ -205,7 +205,7 @@ static struct resource sdhi0_resources[] = {
[0] = { [0] = {
.name = "SDHI0", .name = "SDHI0",
.start = 0xe6d50000, .start = 0xe6d50000,
.end = 0xe6d501ff, .end = 0xe6d50nff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -232,7 +232,7 @@ static struct resource sdhi1_resources[] = { ...@@ -232,7 +232,7 @@ static struct resource sdhi1_resources[] = {
[0] = { [0] = {
.name = "SDHI1", .name = "SDHI1",
.start = 0xe6d60000, .start = 0xe6d60000,
.end = 0xe6d601ff, .end = 0xe6d600ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
......
...@@ -32,10 +32,10 @@ ...@@ -32,10 +32,10 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/i2c.h> #include <linux/i2c.h>
#include <linux/leds.h> #include <linux/leds.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
...@@ -690,7 +690,7 @@ static struct resource sdhi0_resources[] = { ...@@ -690,7 +690,7 @@ static struct resource sdhi0_resources[] = {
[0] = { [0] = {
.name = "SDHI0", .name = "SDHI0",
.start = 0xe6850000, .start = 0xe6850000,
.end = 0xe68501ff, .end = 0xe68500ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -725,7 +725,7 @@ static struct resource sdhi1_resources[] = { ...@@ -725,7 +725,7 @@ static struct resource sdhi1_resources[] = {
[0] = { [0] = {
.name = "SDHI1", .name = "SDHI1",
.start = 0xe6860000, .start = 0xe6860000,
.end = 0xe68601ff, .end = 0xe68600ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -768,7 +768,7 @@ static struct resource sdhi2_resources[] = { ...@@ -768,7 +768,7 @@ static struct resource sdhi2_resources[] = {
[0] = { [0] = {
.name = "SDHI2", .name = "SDHI2",
.start = 0xe6870000, .start = 0xe6870000,
.end = 0xe68701ff, .end = 0xe68700ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/mtd/sh_flctl.h> #include <linux/mtd/sh_flctl.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -423,7 +423,7 @@ static struct resource sdhi0_cn3_resources[] = { ...@@ -423,7 +423,7 @@ static struct resource sdhi0_cn3_resources[] = {
[0] = { [0] = {
.name = "SDHI0", .name = "SDHI0",
.start = 0x04ce0000, .start = 0x04ce0000,
.end = 0x04ce01ff, .end = 0x04ce00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -453,7 +453,7 @@ static struct resource sdhi1_cn7_resources[] = { ...@@ -453,7 +453,7 @@ static struct resource sdhi1_cn7_resources[] = {
[0] = { [0] = {
.name = "SDHI1", .name = "SDHI1",
.start = 0x04cf0000, .start = 0x04cf0000,
.end = 0x04cf01ff, .end = 0x04cf00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
......
...@@ -11,9 +11,9 @@ ...@@ -11,9 +11,9 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h> #include <linux/mmc/sh_mmcif.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -464,7 +464,7 @@ static struct i2c_board_info ts_i2c_clients = { ...@@ -464,7 +464,7 @@ static struct i2c_board_info ts_i2c_clients = {
.irq = IRQ0, .irq = IRQ0,
}; };
#ifdef CONFIG_MFD_SH_MOBILE_SDHI #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
/* SDHI0 */ /* SDHI0 */
static void sdhi0_set_pwr(struct platform_device *pdev, int state) static void sdhi0_set_pwr(struct platform_device *pdev, int state)
{ {
...@@ -482,7 +482,7 @@ static struct resource sdhi0_resources[] = { ...@@ -482,7 +482,7 @@ static struct resource sdhi0_resources[] = {
[0] = { [0] = {
.name = "SDHI0", .name = "SDHI0",
.start = 0x04ce0000, .start = 0x04ce0000,
.end = 0x04ce01ff, .end = 0x04ce00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -522,7 +522,7 @@ static struct resource sdhi1_resources[] = { ...@@ -522,7 +522,7 @@ static struct resource sdhi1_resources[] = {
[0] = { [0] = {
.name = "SDHI1", .name = "SDHI1",
.start = 0x04cf0000, .start = 0x04cf0000,
.end = 0x04cf01ff, .end = 0x04cf00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -880,7 +880,7 @@ static struct platform_device *ecovec_devices[] __initdata = { ...@@ -880,7 +880,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
&ceu0_device, &ceu0_device,
&ceu1_device, &ceu1_device,
&keysc_device, &keysc_device,
#ifdef CONFIG_MFD_SH_MOBILE_SDHI #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
&sdhi0_device, &sdhi0_device,
#if !defined(CONFIG_MMC_SH_MMCIF) #if !defined(CONFIG_MMC_SH_MMCIF)
&sdhi1_device, &sdhi1_device,
...@@ -1162,7 +1162,7 @@ static int __init arch_setup(void) ...@@ -1162,7 +1162,7 @@ static int __init arch_setup(void)
gpio_direction_input(GPIO_PTR5); gpio_direction_input(GPIO_PTR5);
gpio_direction_input(GPIO_PTR6); gpio_direction_input(GPIO_PTR6);
#ifdef CONFIG_MFD_SH_MOBILE_SDHI #if defined(CONFIG_MMC_TMIO) || defined(CONFIG_MMC_TMIO_MODULE)
/* enable SDHI0 on CN11 (needs DS2.4 set to ON) */ /* enable SDHI0 on CN11 (needs DS2.4 set to ON) */
gpio_request(GPIO_FN_SDHI0CD, NULL); gpio_request(GPIO_FN_SDHI0CD, NULL);
gpio_request(GPIO_FN_SDHI0WP, NULL); gpio_request(GPIO_FN_SDHI0WP, NULL);
......
...@@ -10,8 +10,8 @@ ...@@ -10,8 +10,8 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/mtd/onenand.h> #include <linux/mtd/onenand.h>
...@@ -354,7 +354,7 @@ static struct resource kfr2r09_sh_sdhi0_resources[] = { ...@@ -354,7 +354,7 @@ static struct resource kfr2r09_sh_sdhi0_resources[] = {
[0] = { [0] = {
.name = "SDHI0", .name = "SDHI0",
.start = 0x04ce0000, .start = 0x04ce0000,
.end = 0x04ce01ff, .end = 0x04ce00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/input.h> #include <linux/input.h>
#include <linux/input/sh_keysc.h> #include <linux/input/sh_keysc.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/mtd/nand.h> #include <linux/mtd/nand.h>
#include <linux/i2c.h> #include <linux/i2c.h>
...@@ -399,7 +399,7 @@ static struct resource sdhi_cn9_resources[] = { ...@@ -399,7 +399,7 @@ static struct resource sdhi_cn9_resources[] = {
[0] = { [0] = {
.name = "SDHI", .name = "SDHI",
.start = 0x04ce0000, .start = 0x04ce0000,
.end = 0x04ce01ff, .end = 0x04ce00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
......
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/smc91x.h> #include <linux/smc91x.h>
...@@ -456,7 +456,7 @@ static struct resource sdhi0_cn7_resources[] = { ...@@ -456,7 +456,7 @@ static struct resource sdhi0_cn7_resources[] = {
[0] = { [0] = {
.name = "SDHI0", .name = "SDHI0",
.start = 0x04ce0000, .start = 0x04ce0000,
.end = 0x04ce01ff, .end = 0x04ce00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
...@@ -488,7 +488,7 @@ static struct resource sdhi1_cn8_resources[] = { ...@@ -488,7 +488,7 @@ static struct resource sdhi1_cn8_resources[] = {
[0] = { [0] = {
.name = "SDHI1", .name = "SDHI1",
.start = 0x04cf0000, .start = 0x04cf0000,
.end = 0x04cf01ff, .end = 0x04cf00ff,
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { [1] = {
......
...@@ -60,15 +60,6 @@ config MFD_ASIC3 ...@@ -60,15 +60,6 @@ config MFD_ASIC3
This driver supports the ASIC3 multifunction chip found on many This driver supports the ASIC3 multifunction chip found on many
PDAs (mainly iPAQ and HTC based ones) PDAs (mainly iPAQ and HTC based ones)
config MFD_SH_MOBILE_SDHI
bool "Support for SuperH Mobile SDHI"
depends on SUPERH || ARCH_SHMOBILE
select MFD_CORE
select TMIO_MMC_DMA
---help---
This driver supports the SDHI hardware block found in many
SuperH Mobile SoCs.
config MFD_DAVINCI_VOICECODEC config MFD_DAVINCI_VOICECODEC
tristate tristate
select MFD_CORE select MFD_CORE
...@@ -266,11 +257,6 @@ config MFD_TMIO ...@@ -266,11 +257,6 @@ config MFD_TMIO
bool bool
default n default n
config TMIO_MMC_DMA
bool
select DMA_ENGINE
select DMADEVICES
config MFD_T7L66XB config MFD_T7L66XB
bool "Support Toshiba T7L66XB" bool "Support Toshiba T7L66XB"
depends on ARM && HAVE_CLK depends on ARM && HAVE_CLK
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_SM501) += sm501.o obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o obj-$(CONFIG_HTC_EGPIO) += htc-egpio.o
obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
......
...@@ -1875,7 +1875,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, ...@@ -1875,7 +1875,7 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
unsigned int tot_sz, int max_scatter) unsigned int tot_sz, int max_scatter)
{ {
unsigned int dev_addr, i, cnt, sz, ssz; unsigned int dev_addr, i, cnt, sz, ssz;
struct timespec ts1, ts2, ts; struct timespec ts1, ts2;
int ret; int ret;
sz = test->area.max_tfr; sz = test->area.max_tfr;
...@@ -1912,7 +1912,6 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write, ...@@ -1912,7 +1912,6 @@ static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
} }
getnstimeofday(&ts2); getnstimeofday(&ts2);
ts = timespec_sub(ts2, ts1);
mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
return 0; return 0;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
* your option) any later version. * your option) any later version.
*/ */
#include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
...@@ -252,6 +253,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) ...@@ -252,6 +253,7 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
struct mmc_command cmd; struct mmc_command cmd;
struct mmc_data data; struct mmc_data data;
struct scatterlist sg; struct scatterlist sg;
void *data_buf;
BUG_ON(!card); BUG_ON(!card);
BUG_ON(!card->host); BUG_ON(!card->host);
...@@ -263,6 +265,13 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) ...@@ -263,6 +265,13 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
if (err) if (err)
return err; return err;
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
if (data_buf == NULL)
return -ENOMEM;
memset(&mrq, 0, sizeof(struct mmc_request)); memset(&mrq, 0, sizeof(struct mmc_request));
memset(&cmd, 0, sizeof(struct mmc_command)); memset(&cmd, 0, sizeof(struct mmc_command));
memset(&data, 0, sizeof(struct mmc_data)); memset(&data, 0, sizeof(struct mmc_data));
...@@ -280,12 +289,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) ...@@ -280,12 +289,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
data.sg = &sg; data.sg = &sg;
data.sg_len = 1; data.sg_len = 1;
sg_init_one(&sg, scr, 8); sg_init_one(&sg, data_buf, 8);
mmc_set_data_timeout(&data, card); mmc_set_data_timeout(&data, card);
mmc_wait_for_req(card->host, &mrq); mmc_wait_for_req(card->host, &mrq);
memcpy(scr, data_buf, sizeof(card->raw_scr));
kfree(data_buf);
if (cmd.error) if (cmd.error)
return cmd.error; return cmd.error;
if (data.error) if (data.error)
......
...@@ -439,13 +439,25 @@ config MMC_SDRICOH_CS ...@@ -439,13 +439,25 @@ config MMC_SDRICOH_CS
To compile this driver as a module, choose M here: the To compile this driver as a module, choose M here: the
module will be called sdricoh_cs. module will be called sdricoh_cs.
config MMC_TMIO_CORE
tristate
config MMC_TMIO config MMC_TMIO
tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI depends on MFD_TMIO || MFD_ASIC3
select MMC_TMIO_CORE
help help
This provides support for the SD/MMC cell found in TC6393XB, This provides support for the SD/MMC cell found in TC6393XB,
T7L66XB and also HTC ASIC3 T7L66XB and also HTC ASIC3
config MMC_SDHI
tristate "SH-Mobile SDHI SD/SDIO controller support"
depends on SUPERH || ARCH_SHMOBILE
select MMC_TMIO_CORE
help
This provides support for the SDHI SD/SDIO controller found in
SuperH and ARM SH-Mobile SoCs
config MMC_CB710 config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support" tristate "ENE CB710 MMC/SD Interface support"
depends on PCI depends on PCI
......
...@@ -29,7 +29,13 @@ endif ...@@ -29,7 +29,13 @@ endif
obj-$(CONFIG_MMC_S3C) += s3cmci.o obj-$(CONFIG_MMC_S3C) += s3cmci.o
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
tmio_mmc_core-y := tmio_mmc_pio.o
ifneq ($(CONFIG_MMC_SDHI),n)
tmio_mmc_core-y += tmio_mmc_dma.o
endif
obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_DW) += dw_mmc.o obj-$(CONFIG_MMC_DW) += dw_mmc.o
......
...@@ -316,7 +316,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) ...@@ -316,7 +316,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
/* Stop the IDMAC running */ /* Stop the IDMAC running */
temp = mci_readl(host, BMOD); temp = mci_readl(host, BMOD);
temp &= ~SDMMC_IDMAC_ENABLE; temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
mci_writel(host, BMOD, temp); mci_writel(host, BMOD, temp);
} }
...@@ -385,7 +385,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) ...@@ -385,7 +385,7 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
/* Enable the IDMAC */ /* Enable the IDMAC */
temp = mci_readl(host, BMOD); temp = mci_readl(host, BMOD);
temp |= SDMMC_IDMAC_ENABLE; temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
mci_writel(host, BMOD, temp); mci_writel(host, BMOD, temp);
/* Start it running */ /* Start it running */
......
...@@ -68,6 +68,12 @@ static struct variant_data variant_arm = { ...@@ -68,6 +68,12 @@ static struct variant_data variant_arm = {
.datalength_bits = 16, .datalength_bits = 16,
}; };
static struct variant_data variant_arm_extended_fifo = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.datalength_bits = 16,
};
static struct variant_data variant_u300 = { static struct variant_data variant_u300 = {
.fifosize = 16 * 4, .fifosize = 16 * 4,
.fifohalfsize = 8 * 4, .fifohalfsize = 8 * 4,
...@@ -1277,9 +1283,14 @@ static int mmci_resume(struct amba_device *dev) ...@@ -1277,9 +1283,14 @@ static int mmci_resume(struct amba_device *dev)
static struct amba_id mmci_ids[] = { static struct amba_id mmci_ids[] = {
{ {
.id = 0x00041180, .id = 0x00041180,
.mask = 0x000fffff, .mask = 0xff0fffff,
.data = &variant_arm, .data = &variant_arm,
}, },
{
.id = 0x01041180,
.mask = 0xff0fffff,
.data = &variant_arm_extended_fifo,
},
{ {
.id = 0x00041181, .id = 0x00041181,
.mask = 0x000fffff, .mask = 0x000fffff,
......
...@@ -15,9 +15,11 @@ ...@@ -15,9 +15,11 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/irq.h>
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_gpio.h> #include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/spi/spi.h> #include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h> #include <linux/spi/mmc_spi.h>
#include <linux/mmc/core.h> #include <linux/mmc/core.h>
......
...@@ -16,14 +16,40 @@ ...@@ -16,14 +16,40 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/sdhci-pltfm.h> #include <linux/mmc/sdhci-pltfm.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <mach/hardware.h> #include <mach/hardware.h>
#include <mach/esdhc.h> #include <mach/esdhc.h>
#include "sdhci.h" #include "sdhci.h"
#include "sdhci-pltfm.h" #include "sdhci-pltfm.h"
#include "sdhci-esdhc.h" #include "sdhci-esdhc.h"
/* VENDOR SPEC register */
#define SDHCI_VENDOR_SPEC 0xC0
#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0)
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
* "11" when the STOP CMD12 is issued on imx53 to abort one
* open ended multi-blk IO. Otherwise the TC INT wouldn't
* be generated.
* In exact block transfer, the controller doesn't complete the
* operations automatically as required at the end of the
* transfer and remains on hold if the abort command is not sent.
* As a result, the TC flag is not asserted and SW received timeout
* exeception. Bit1 of Vendor Spec registor is used to fix it.
*/
#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1)
struct pltfm_imx_data {
int flags;
u32 scratchpad;
};
static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)
{ {
void __iomem *base = host->ioaddr + (reg & ~0x3); void __iomem *base = host->ioaddr + (reg & ~0x3);
...@@ -34,10 +60,14 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i ...@@ -34,10 +60,14 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i
static u32 esdhc_readl_le(struct sdhci_host *host, int reg) static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
{ {
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
/* fake CARD_PRESENT flag on mx25/35 */ /* fake CARD_PRESENT flag on mx25/35 */
u32 val = readl(host->ioaddr + reg); u32 val = readl(host->ioaddr + reg);
if (unlikely(reg == SDHCI_PRESENT_STATE)) { if (unlikely((reg == SDHCI_PRESENT_STATE)
&& (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) {
struct esdhc_platform_data *boarddata = struct esdhc_platform_data *boarddata =
host->mmc->parent->platform_data; host->mmc->parent->platform_data;
...@@ -55,13 +85,26 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) ...@@ -55,13 +85,26 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
{ {
if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
&& (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP)))
/* /*
* these interrupts won't work with a custom card_detect gpio * these interrupts won't work with a custom card_detect gpio
* (only applied to mx25/35) * (only applied to mx25/35)
*/ */
val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (reg == SDHCI_INT_STATUS)
&& (val & SDHCI_INT_DATA_END))) {
u32 v;
v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK;
writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
}
writel(val, host->ioaddr + reg); writel(val, host->ioaddr + reg);
} }
...@@ -76,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg) ...@@ -76,6 +119,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
{ {
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = pltfm_host->priv;
switch (reg) { switch (reg) {
case SDHCI_TRANSFER_MODE: case SDHCI_TRANSFER_MODE:
...@@ -83,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) ...@@ -83,10 +127,22 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
* Postpone this write, we must do it together with a * Postpone this write, we must do it together with a
* command write that is down below. * command write that is down below.
*/ */
pltfm_host->scratchpad = val; if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)
&& (host->cmd->opcode == SD_IO_RW_EXTENDED)
&& (host->cmd->data->blocks > 1)
&& (host->cmd->data->flags & MMC_DATA_READ)) {
u32 v;
v = readl(host->ioaddr + SDHCI_VENDOR_SPEC);
v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK;
writel(v, host->ioaddr + SDHCI_VENDOR_SPEC);
}
imx_data->scratchpad = val;
return; return;
case SDHCI_COMMAND: case SDHCI_COMMAND:
writel(val << 16 | pltfm_host->scratchpad, if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
&& (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
val |= SDHCI_CMD_ABORTCMD;
writel(val << 16 | imx_data->scratchpad,
host->ioaddr + SDHCI_TRANSFER_MODE); host->ioaddr + SDHCI_TRANSFER_MODE);
return; return;
case SDHCI_BLOCK_SIZE: case SDHCI_BLOCK_SIZE:
...@@ -146,7 +202,9 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) ...@@ -146,7 +202,9 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
} }
static struct sdhci_ops sdhci_esdhc_ops = { static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le, .read_w = esdhc_readw_le,
.write_l = esdhc_writel_le,
.write_w = esdhc_writew_le, .write_w = esdhc_writew_le,
.write_b = esdhc_writeb_le, .write_b = esdhc_writeb_le,
.set_clock = esdhc_set_clock, .set_clock = esdhc_set_clock,
...@@ -168,6 +226,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd ...@@ -168,6 +226,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct clk *clk; struct clk *clk;
int err; int err;
struct pltfm_imx_data *imx_data;
clk = clk_get(mmc_dev(host->mmc), NULL); clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) { if (IS_ERR(clk)) {
...@@ -177,7 +236,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd ...@@ -177,7 +236,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
clk_enable(clk); clk_enable(clk);
pltfm_host->clk = clk; pltfm_host->clk = clk;
if (cpu_is_mx35() || cpu_is_mx51()) imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
if (!imx_data) {
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
return -ENOMEM;
}
pltfm_host->priv = imx_data;
if (!cpu_is_mx25())
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
if (cpu_is_mx25() || cpu_is_mx35()) { if (cpu_is_mx25() || cpu_is_mx35()) {
...@@ -187,6 +254,9 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd ...@@ -187,6 +254,9 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro;
} }
if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51()))
imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
if (boarddata) { if (boarddata) {
err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
if (err) { if (err) {
...@@ -214,8 +284,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd ...@@ -214,8 +284,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
goto no_card_detect_irq; goto no_card_detect_irq;
} }
sdhci_esdhc_ops.write_l = esdhc_writel_le; imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP;
sdhci_esdhc_ops.read_l = esdhc_readl_le;
/* Now we have a working card_detect again */ /* Now we have a working card_detect again */
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
} }
...@@ -227,6 +296,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd ...@@ -227,6 +296,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
no_card_detect_pin: no_card_detect_pin:
boarddata->cd_gpio = err; boarddata->cd_gpio = err;
not_supported: not_supported:
kfree(imx_data);
return 0; return 0;
} }
...@@ -234,6 +304,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) ...@@ -234,6 +304,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host)
{ {
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct pltfm_imx_data *imx_data = pltfm_host->priv;
if (boarddata && gpio_is_valid(boarddata->wp_gpio)) if (boarddata && gpio_is_valid(boarddata->wp_gpio))
gpio_free(boarddata->wp_gpio); gpio_free(boarddata->wp_gpio);
...@@ -247,6 +318,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host) ...@@ -247,6 +318,7 @@ static void esdhc_pltfm_exit(struct sdhci_host *host)
clk_disable(pltfm_host->clk); clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk); clk_put(pltfm_host->clk);
kfree(imx_data);
} }
struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
......
...@@ -23,8 +23,7 @@ ...@@ -23,8 +23,7 @@
SDHCI_QUIRK_NONSTANDARD_CLOCK | \ SDHCI_QUIRK_NONSTANDARD_CLOCK | \
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \
SDHCI_QUIRK_PIO_NEEDS_DELAY | \ SDHCI_QUIRK_PIO_NEEDS_DELAY | \
SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
SDHCI_QUIRK_NO_CARD_NO_RESET)
#define ESDHC_SYSTEM_CONTROL 0x2c #define ESDHC_SYSTEM_CONTROL 0x2c
#define ESDHC_CLOCK_MASK 0x0000fff0 #define ESDHC_CLOCK_MASK 0x0000fff0
......
...@@ -74,7 +74,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) ...@@ -74,7 +74,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
struct sdhci_of_data sdhci_esdhc = { struct sdhci_of_data sdhci_esdhc = {
/* card detection could be handled via GPIO */ /* card detection could be handled via GPIO */
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION, .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
| SDHCI_QUIRK_NO_CARD_NO_RESET,
.ops = { .ops = {
.read_l = sdhci_be32bs_readl, .read_l = sdhci_be32bs_readl,
.read_w = esdhc_readw, .read_w = esdhc_readw,
......
...@@ -1016,16 +1016,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, ...@@ -1016,16 +1016,14 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
struct sdhci_pci_chip *chip; struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot; struct sdhci_pci_slot *slot;
u8 slots, rev, first_bar; u8 slots, first_bar;
int ret, i; int ret, i;
BUG_ON(pdev == NULL); BUG_ON(pdev == NULL);
BUG_ON(ent == NULL); BUG_ON(ent == NULL);
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)rev); (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots); ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);
if (ret) if (ret)
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
struct sdhci_pltfm_host { struct sdhci_pltfm_host {
struct clk *clk; struct clk *clk;
u32 scratchpad; /* to handle quirks across io-accessor calls */ void *priv; /* to handle quirks across io-accessor calls */
}; };
extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#define SDHCI_CMD_CRC 0x08 #define SDHCI_CMD_CRC 0x08
#define SDHCI_CMD_INDEX 0x10 #define SDHCI_CMD_INDEX 0x10
#define SDHCI_CMD_DATA 0x20 #define SDHCI_CMD_DATA 0x20
#define SDHCI_CMD_ABORTCMD 0xC0
#define SDHCI_CMD_RESP_NONE 0x00 #define SDHCI_CMD_RESP_NONE 0x00
#define SDHCI_CMD_RESP_LONG 0x01 #define SDHCI_CMD_RESP_LONG 0x01
......
...@@ -23,51 +23,30 @@ ...@@ -23,51 +23,30 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mfd/core.h> #include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
#include <linux/mfd/sh_mobile_sdhi.h>
#include <linux/sh_dma.h> #include <linux/sh_dma.h>
#include "tmio_mmc.h"
struct sh_mobile_sdhi { struct sh_mobile_sdhi {
struct clk *clk; struct clk *clk;
struct tmio_mmc_data mmc_data; struct tmio_mmc_data mmc_data;
struct mfd_cell cell_mmc;
struct sh_dmae_slave param_tx; struct sh_dmae_slave param_tx;
struct sh_dmae_slave param_rx; struct sh_dmae_slave param_rx;
struct tmio_mmc_dma dma_priv; struct tmio_mmc_dma dma_priv;
}; };
static struct resource sh_mobile_sdhi_resources[] = { static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
{
.start = 0x000,
.end = 0x1ff,
.flags = IORESOURCE_MEM,
},
{
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ,
},
};
static struct mfd_cell sh_mobile_sdhi_cell = {
.name = "tmio-mmc",
.num_resources = ARRAY_SIZE(sh_mobile_sdhi_resources),
.resources = sh_mobile_sdhi_resources,
};
static void sh_mobile_sdhi_set_pwr(struct platform_device *tmio, int state)
{ {
struct platform_device *pdev = to_platform_device(tmio->dev.parent);
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
if (p && p->set_pwr) if (p && p->set_pwr)
p->set_pwr(pdev, state); p->set_pwr(pdev, state);
} }
static int sh_mobile_sdhi_get_cd(struct platform_device *tmio) static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
{ {
struct platform_device *pdev = to_platform_device(tmio->dev.parent);
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
if (p && p->get_cd) if (p && p->get_cd)
...@@ -81,20 +60,9 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) ...@@ -81,20 +60,9 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
struct sh_mobile_sdhi *priv; struct sh_mobile_sdhi *priv;
struct tmio_mmc_data *mmc_data; struct tmio_mmc_data *mmc_data;
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
struct resource *mem; struct tmio_mmc_host *host;
char clk_name[8]; char clk_name[8];
int ret, irq; int ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
dev_err(&pdev->dev, "missing MEM resource\n");
irq = platform_get_irq(pdev, 0);
if (irq < 0)
dev_err(&pdev->dev, "missing IRQ resource\n");
if (!mem || (irq < 0))
return -EINVAL;
priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
if (priv == NULL) { if (priv == NULL) {
...@@ -109,8 +77,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) ...@@ -109,8 +77,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk)) { if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
ret = PTR_ERR(priv->clk); ret = PTR_ERR(priv->clk);
kfree(priv); goto eclkget;
return ret;
} }
clk_enable(priv->clk); clk_enable(priv->clk);
...@@ -123,6 +90,15 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) ...@@ -123,6 +90,15 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->flags = p->tmio_flags; mmc_data->flags = p->tmio_flags;
mmc_data->ocr_mask = p->tmio_ocr_mask; mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps; mmc_data->capabilities |= p->tmio_caps;
if (p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx;
priv->dma_priv.chan_priv_rx = &priv->param_rx;
priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
mmc_data->dma = &priv->dma_priv;
}
} }
/* /*
...@@ -136,36 +112,30 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) ...@@ -136,36 +112,30 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
*/ */
mmc_data->flags |= TMIO_MMC_SDIO_IRQ; mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { ret = tmio_mmc_host_probe(&host, pdev, mmc_data);
priv->param_tx.slave_id = p->dma_slave_tx; if (ret < 0)
priv->param_rx.slave_id = p->dma_slave_rx; goto eprobe;
priv->dma_priv.chan_priv_tx = &priv->param_tx;
priv->dma_priv.chan_priv_rx = &priv->param_rx;
priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
mmc_data->dma = &priv->dma_priv;
}
memcpy(&priv->cell_mmc, &sh_mobile_sdhi_cell, sizeof(priv->cell_mmc)); pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
priv->cell_mmc.mfd_data = mmc_data; (unsigned long)host->ctl, host->irq);
platform_set_drvdata(pdev, priv); return ret;
ret = mfd_add_devices(&pdev->dev, pdev->id,
&priv->cell_mmc, 1, mem, irq);
if (ret) {
clk_disable(priv->clk);
clk_put(priv->clk);
kfree(priv);
}
eprobe:
clk_disable(priv->clk);
clk_put(priv->clk);
eclkget:
kfree(priv);
return ret; return ret;
} }
static int sh_mobile_sdhi_remove(struct platform_device *pdev) static int sh_mobile_sdhi_remove(struct platform_device *pdev)
{ {
struct sh_mobile_sdhi *priv = platform_get_drvdata(pdev); struct mmc_host *mmc = platform_get_drvdata(pdev);
struct tmio_mmc_host *host = mmc_priv(mmc);
struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
mfd_remove_devices(&pdev->dev); tmio_mmc_host_remove(host);
clk_disable(priv->clk); clk_disable(priv->clk);
clk_put(priv->clk); clk_put(priv->clk);
kfree(priv); kfree(priv);
...@@ -198,3 +168,4 @@ module_exit(sh_mobile_sdhi_exit); ...@@ -198,3 +168,4 @@ module_exit(sh_mobile_sdhi_exit);
MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
MODULE_AUTHOR("Magnus Damm"); MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:sh_mobile_sdhi");
/* /*
* linux/drivers/mmc/tmio_mmc.c * linux/drivers/mmc/host/tmio_mmc.c
* *
* Copyright (C) 2004 Ian Molton * Copyright (C) 2007 Ian Molton
* Copyright (C) 2007 Ian Molton * Copyright (C) 2004 Ian Molton
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -11,1182 +11,17 @@ ...@@ -11,1182 +11,17 @@
* Driver for the MMC / SD / SDIO cell found in: * Driver for the MMC / SD / SDIO cell found in:
* *
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
*
* This driver draws mainly on scattered spec sheets, Reverse engineering
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
* support). (Further 4 bit support from a later datasheet).
*
* TODO:
* Investigate using a workqueue for PIO transfers
* Eliminate FIXMEs
* SDIO support
* Better Power management
* Handle MMC errors better
* double buffer support
*
*/ */
#include <linux/delay.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/core.h> #include <linux/mfd/core.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
#define CTL_STOP_INTERNAL_ACTION 0x08
#define CTL_XFER_BLK_COUNT 0xa
#define CTL_RESPONSE 0x0c
#define CTL_STATUS 0x1c
#define CTL_IRQ_MASK 0x20
#define CTL_SD_CARD_CLK_CTL 0x24
#define CTL_SD_XFER_LEN 0x26
#define CTL_SD_MEM_CARD_OPT 0x28
#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
#define CTL_SD_DATA_PORT 0x30
#define CTL_TRANSACTION_CTL 0x34
#define CTL_SDIO_STATUS 0x36
#define CTL_SDIO_IRQ_MASK 0x38
#define CTL_RESET_SD 0xe0
#define CTL_SDIO_REGS 0x100
#define CTL_CLK_AND_WAIT_CTL 0x138
#define CTL_RESET_SDIO 0x1e0
/* Definitions for values the CTRL_STATUS register can take. */
#define TMIO_STAT_CMDRESPEND 0x00000001
#define TMIO_STAT_DATAEND 0x00000004
#define TMIO_STAT_CARD_REMOVE 0x00000008
#define TMIO_STAT_CARD_INSERT 0x00000010
#define TMIO_STAT_SIGSTATE 0x00000020
#define TMIO_STAT_WRPROTECT 0x00000080
#define TMIO_STAT_CARD_REMOVE_A 0x00000100
#define TMIO_STAT_CARD_INSERT_A 0x00000200
#define TMIO_STAT_SIGSTATE_A 0x00000400
#define TMIO_STAT_CMD_IDX_ERR 0x00010000
#define TMIO_STAT_CRCFAIL 0x00020000
#define TMIO_STAT_STOPBIT_ERR 0x00040000
#define TMIO_STAT_DATATIMEOUT 0x00080000
#define TMIO_STAT_RXOVERFLOW 0x00100000
#define TMIO_STAT_TXUNDERRUN 0x00200000
#define TMIO_STAT_CMDTIMEOUT 0x00400000
#define TMIO_STAT_RXRDY 0x01000000
#define TMIO_STAT_TXRQ 0x02000000
#define TMIO_STAT_ILL_FUNC 0x20000000
#define TMIO_STAT_CMD_BUSY 0x40000000
#define TMIO_STAT_ILL_ACCESS 0x80000000
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
#define TMIO_SDIO_STAT_EXPUB52 0x4000
#define TMIO_SDIO_STAT_EXWT 0x8000
#define TMIO_SDIO_MASK_ALL 0xc007
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
#define enable_mmc_irqs(host, i) \
do { \
u32 mask;\
mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask &= ~((i) & TMIO_MASK_IRQ); \
sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
#define disable_mmc_irqs(host, i) \
do { \
u32 mask;\
mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask |= ((i) & TMIO_MASK_IRQ); \
sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
#define ack_mmc_irqs(host, i) \
do { \
sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
} while (0)
/* This is arbitrary, just noone needed any higher alignment yet */
#define MAX_ALIGN 4
struct tmio_mmc_host {
void __iomem *ctl;
unsigned long bus_shift;
struct mmc_command *cmd;
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
int irq;
unsigned int sdio_irq_enabled;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
/* pio related stuff */
struct scatterlist *sg_ptr;
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
struct platform_device *pdev;
/* DMA support */
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
#ifdef CONFIG_TMIO_MMC_DMA
u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
struct scatterlist bounce_sg;
#endif
/* Track lost interrupts */
struct delayed_work delayed_reset_work;
spinlock_t lock;
unsigned long last_req_ts;
};
static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift));
}
static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
readsw(host->ctl + (addr << host->bus_shift), buf, count);
}
static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift)) |
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
}
static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
writesw(host->ctl + (addr << host->bus_shift), buf, count);
}
static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
{
host->sg_len = data->sg_len;
host->sg_ptr = data->sg;
host->sg_orig = data->sg;
host->sg_off = 0;
}
static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
{
host->sg_ptr = sg_next(host->sg_ptr);
host->sg_off = 0;
return --host->sg_len;
}
static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
static void tmio_mmc_kunmap_atomic(struct scatterlist *sg, unsigned long *flags, void *virt)
{
kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
#ifdef CONFIG_MMC_DEBUG
#define STATUS_TO_TEXT(a, status, i) \
do { \
if (status & TMIO_STAT_##a) { \
if (i++) \
printk(" | "); \
printk(#a); \
} \
} while (0)
void pr_debug_status(u32 status)
{
int i = 0;
printk(KERN_DEBUG "status: %08x = ", status);
STATUS_TO_TEXT(CARD_REMOVE, status, i);
STATUS_TO_TEXT(CARD_INSERT, status, i);
STATUS_TO_TEXT(SIGSTATE, status, i);
STATUS_TO_TEXT(WRPROTECT, status, i);
STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
STATUS_TO_TEXT(CARD_INSERT_A, status, i);
STATUS_TO_TEXT(SIGSTATE_A, status, i);
STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
STATUS_TO_TEXT(STOPBIT_ERR, status, i);
STATUS_TO_TEXT(ILL_FUNC, status, i);
STATUS_TO_TEXT(CMD_BUSY, status, i);
STATUS_TO_TEXT(CMDRESPEND, status, i);
STATUS_TO_TEXT(DATAEND, status, i);
STATUS_TO_TEXT(CRCFAIL, status, i);
STATUS_TO_TEXT(DATATIMEOUT, status, i);
STATUS_TO_TEXT(CMDTIMEOUT, status, i);
STATUS_TO_TEXT(RXOVERFLOW, status, i);
STATUS_TO_TEXT(TXUNDERRUN, status, i);
STATUS_TO_TEXT(RXRDY, status, i);
STATUS_TO_TEXT(TXRQ, status, i);
STATUS_TO_TEXT(ILL_ACCESS, status, i);
printk("\n");
}
#else
#define pr_debug_status(s) do { } while (0)
#endif
static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
if (enable) {
host->sdio_irq_enabled = 1;
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
(TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
} else {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
host->sdio_irq_enabled = 0;
}
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
{
u32 clk = 0, clock;
if (new_clock) {
for (clock = host->mmc->f_min, clk = 0x80000080;
new_clock >= (clock<<1); clk >>= 1)
clock <<= 1;
clk |= 0x100;
}
if (host->set_clk_div)
host->set_clk_div(host->pdev, (clk>>22) & 1);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
/*
* Testing on sh-mobile showed that SDIO IRQs are unmasked when
* CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
* device IRQ here and restore the SDIO IRQ mask before
* re-enabling the device IRQ.
*/
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
enable_irq(host->irq);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
}
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
/* see comment in tmio_mmc_clk_stop above */
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
enable_irq(host->irq);
}
}
static void reset(struct tmio_mmc_host *host)
{
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
msleep(10);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
msleep(10);
}
static void tmio_mmc_reset_work(struct work_struct *work)
{
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
delayed_reset_work.work);
struct mmc_request *mrq;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
mrq = host->mrq;
/* request already finished */
if (!mrq
|| time_is_after_jiffies(host->last_req_ts +
msecs_to_jiffies(2000))) {
spin_unlock_irqrestore(&host->lock, flags);
return;
}
dev_warn(&host->pdev->dev,
"timeout waiting for hardware interrupt (CMD%u)\n",
mrq->cmd->opcode);
if (host->data)
host->data->error = -ETIMEDOUT;
else if (host->cmd)
host->cmd->error = -ETIMEDOUT;
else
mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
host->data = NULL;
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
reset(host);
mmc_request_done(host->mmc, mrq);
}
static void
tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
struct mmc_request *mrq = host->mrq;
if (!mrq)
return;
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
cancel_delayed_work(&host->delayed_reset_work);
mmc_request_done(host->mmc, mrq);
}
/* These are the bitmasks the tmio chip requires to implement the MMC response
* types. Note that R1 and R6 are the same in this scheme. */
#define APP_CMD 0x0040
#define RESP_NONE 0x0300
#define RESP_R1 0x0400
#define RESP_R1B 0x0500
#define RESP_R2 0x0600
#define RESP_R3 0x0700
#define DATA_PRESENT 0x0800
#define TRANSFER_READ 0x1000
#define TRANSFER_MULTI 0x2000
#define SECURITY_CMD 0x4000
static int
tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
{
struct mmc_data *data = host->data;
int c = cmd->opcode;
/* Command 12 is handled by hardware */
if (cmd->opcode == 12 && !cmd->arg) {
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
return 0;
}
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE: c |= RESP_NONE; break;
case MMC_RSP_R1: c |= RESP_R1; break;
case MMC_RSP_R1B: c |= RESP_R1B; break;
case MMC_RSP_R2: c |= RESP_R2; break;
case MMC_RSP_R3: c |= RESP_R3; break;
default:
pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
return -EINVAL;
}
host->cmd = cmd;
/* FIXME - this seems to be ok commented out but the spec suggest this bit
* should be set when issuing app commands.
* if(cmd->flags & MMC_FLAG_ACMD)
* c |= APP_CMD;
*/
if (data) {
c |= DATA_PRESENT;
if (data->blocks > 1) {
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
c |= TRANSFER_MULTI;
}
if (data->flags & MMC_DATA_READ)
c |= TRANSFER_READ;
}
enable_mmc_irqs(host, TMIO_MASK_CMD);
/* Fire off the command */
sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
sd_ctrl_write16(host, CTL_SD_CMD, c);
return 0;
}
/*
* This chip always returns (at least?) as much data as you ask for.
* I'm unsure what happens if you ask for less than a block. This should be
* looked into to ensure that a funny length read doesnt hose the controller.
*/
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
if (!data) {
pr_debug("Spurious PIO IRQ\n");
return;
}
sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
buf = (unsigned short *)(sg_virt + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
count = data->blksz;
pr_debug("count: %08x offset: %08x flags %08x\n",
count, host->sg_off, data->flags);
/* Transfer the data */
if (data->flags & MMC_DATA_READ)
sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
else
sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
host->sg_off += count;
tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
return;
}
/* needs to be called with host->lock held */
static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
struct mmc_command *stop;
host->data = NULL;
if (!data) {
dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
return;
}
stop = data->stop;
/* FIXME - return correct transfer count on errors */
if (!data->error)
data->bytes_xfered = data->blocks * data->blksz;
else
data->bytes_xfered = 0;
pr_debug("Completed data request\n");
/*
* FIXME: other drivers allow an optional stop command of any given type
* which we dont do, as the chip can auto generate them.
* Perhaps we can be smarter about when to use auto CMD12 and
* only issue the auto request when we know this is the desired
* stop command, allowing fallback to the stop command the
* upper layers expect. For now, we do what works.
*/
if (data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
disable_mmc_irqs(host, TMIO_MASK_READOP);
else
tmio_check_bounce_buffer(host);
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
host->mrq);
} else {
if (!host->chan_tx)
disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
host->mrq);
}
if (stop) {
if (stop->opcode == 12 && !stop->arg)
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
else
BUG();
}
tmio_mmc_finish_request(host);
}
static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data;
spin_lock(&host->lock);
data = host->data;
if (!data)
goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
/*
* Has all data been written out yet? Testing on SuperH showed,
* that in most cases the first interrupt comes already with the
* BUSY status bit clear, but on some operations, like mount or
* in the beginning of a write / sync / umount, there is one
* DATAEND interrupt with the BUSY bit set, in this cases
* waiting for one more interrupt fixes the problem.
*/
if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete);
}
} else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete);
} else {
tmio_mmc_do_data_irq(host);
}
out:
spin_unlock(&host->lock);
}
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
int i, addr;
spin_lock(&host->lock);
if (!host->cmd) {
pr_debug("Spurious CMD irq\n");
goto out;
}
host->cmd = NULL;
/* This controller is sicker than the PXA one. Not only do we need to
* drop the top 8 bits of the first response word, we also need to
* modify the order of the response for short response command types.
*/
for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
cmd->resp[i] = sd_ctrl_read32(host, addr);
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
cmd->resp[3] <<= 8;
} else if (cmd->flags & MMC_RSP_R3) {
cmd->resp[0] = cmd->resp[3];
}
if (stat & TMIO_STAT_CMDTIMEOUT)
cmd->error = -ETIMEDOUT;
else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
cmd->error = -EILSEQ;
/* If there is data to handle we enable data IRQs here, and
* we will ultimatley finish the request in the data_end handler.
* If theres no data or we encountered an error, finish now.
*/
if (host->data && !cmd->error) {
if (host->data->flags & MMC_DATA_READ) {
if (!host->chan_rx)
enable_mmc_irqs(host, TMIO_MASK_READOP);
} else {
if (!host->chan_tx)
enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
else
tasklet_schedule(&host->dma_issue);
}
} else {
tmio_mmc_finish_request(host);
}
out:
spin_unlock(&host->lock);
return;
}
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
pr_debug("MMC IRQ begin\n");
status = sd_ctrl_read32(host, CTL_STATUS);
irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
sdio_ireg = 0;
if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
if (sdio_ireg && !host->sdio_irq_enabled) {
pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
sdio_status, sdio_irq_mask, sdio_ireg);
tmio_mmc_enable_sdio_irq(host->mmc, 0);
goto out;
}
if (host->mmc->caps & MMC_CAP_SDIO_IRQ && #include "tmio_mmc.h"
sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(host->mmc);
if (sdio_ireg)
goto out;
}
pr_debug_status(status);
pr_debug_status(ireg);
if (!ireg) {
disable_mmc_irqs(host, status & ~irq_mask);
pr_warning("tmio_mmc: Spurious irq, disabling! "
"0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
pr_debug_status(status);
goto out;
}
while (ireg) {
/* Card insert / remove attempts */
if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
TMIO_STAT_CARD_REMOVE);
mmc_detect_change(host->mmc, msecs_to_jiffies(100));
}
/* CRC and other errors */
/* if (ireg & TMIO_STAT_ERR_IRQ)
* handled |= tmio_error_irq(host, irq, stat);
*/
/* Command completion */
if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
ack_mmc_irqs(host,
TMIO_STAT_CMDRESPEND |
TMIO_STAT_CMDTIMEOUT);
tmio_mmc_cmd_irq(host, status);
}
/* Data transfer */
if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
tmio_mmc_pio_irq(host);
}
/* Data transfer completion */
if (ireg & TMIO_STAT_DATAEND) {
ack_mmc_irqs(host, TMIO_STAT_DATAEND);
tmio_mmc_data_irq(host);
}
/* Check status - keep going until we've handled it all */
status = sd_ctrl_read32(host, CTL_STATUS);
irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
pr_debug("Status at end of loop: %08x\n", status);
pr_debug_status(status);
}
pr_debug("MMC IRQ end\n");
out:
return IRQ_HANDLED;
}
#ifdef CONFIG_TMIO_MMC_DMA
static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
{
if (host->sg_ptr == &host->bounce_sg) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
}
}
static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
/* Switch DMA mode on or off - SuperH specific? */
sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
#endif
}
static void tmio_dma_complete(void *arg)
{
struct tmio_mmc_host *host = arg;
dev_dbg(&host->pdev->dev, "Command completed\n");
if (!host->data)
dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
else
enable_mmc_irqs(host, TMIO_STAT_DATAEND);
}
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
align >= MAX_ALIGN)) || !multiple) {
ret = -EINVAL;
goto pio;
}
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
cookie = dmaengine_submit(desc);
dma_async_issue_pending(chan);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
dma_release_channel(chan);
/* Free the Tx channel too */
chan = host->chan_tx;
if (chan) {
host->chan_tx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
desc, cookie, host->sg_len);
}
static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
align >= MAX_ALIGN)) || !multiple) {
ret = -EINVAL;
goto pio;
}
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (desc) {
desc->callback = tmio_dma_complete;
desc->callback_param = host;
cookie = dmaengine_submit(desc);
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
dma_release_channel(chan);
/* Free the Rx channel too */
chan = host->chan_rx;
if (chan) {
host->chan_rx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
desc, cookie);
}
static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
tmio_mmc_start_dma_rx(host);
} else {
if (host->chan_tx)
tmio_mmc_start_dma_tx(host);
}
}
static void tmio_issue_tasklet_fn(unsigned long priv)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
struct dma_chan *chan = host->chan_tx;
dma_async_issue_pending(chan);
}
static void tmio_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (!host->data)
goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irqrestore(&host->lock, flags);
}
/* It might be necessary to make filter MFD specific */
static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
{
dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
chan->private = arg;
return true;
}
static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
pdata->dma->chan_priv_tx);
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
pdata->dma->chan_priv_rx);
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx) {
dma_release_channel(host->chan_tx);
host->chan_tx = NULL;
return;
}
tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
tmio_mmc_enable_dma(host, true);
}
}
static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
if (host->chan_tx) {
struct dma_chan *chan = host->chan_tx;
host->chan_tx = NULL;
dma_release_channel(chan);
}
if (host->chan_rx) {
struct dma_chan *chan = host->chan_rx;
host->chan_rx = NULL;
dma_release_channel(chan);
}
}
#else
static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
{
}
static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
}
static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
host->chan_tx = NULL;
host->chan_rx = NULL;
}
static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
}
#endif
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
/* Some hardware cannot perform 2 byte requests in 4 bit mode */
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
pr_err("%s: %d byte block unsupported in 4 bit mode\n",
mmc_hostname(host->mmc), data->blksz);
return -EINVAL;
}
}
tmio_mmc_init_sg(host, data);
host->data = data;
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data);
return 0;
}
/* Process requests from the MMC layer */
static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
int ret;
if (host->mrq)
pr_debug("request not null\n");
host->last_req_ts = jiffies;
wmb();
host->mrq = mrq;
if (mrq->data) {
ret = tmio_mmc_start_data(host, mrq->data);
if (ret)
goto fail;
}
ret = tmio_mmc_start_command(host, mrq->cmd);
if (!ret) {
schedule_delayed_work(&host->delayed_reset_work,
msecs_to_jiffies(2000));
return;
}
fail:
host->mrq = NULL;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
}
/* Set MMC clock / power.
* Note: This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
* MMC wont run that fast, it has to be clocked at 12MHz which is the next
* slowest setting.
*/
static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
if (ios->clock)
tmio_mmc_set_clock(host, ios->clock);
/* Power sequence - OFF -> ON -> UP */
switch (ios->power_mode) {
case MMC_POWER_OFF: /* power down SD bus */
if (host->set_pwr)
host->set_pwr(host->pdev, 0);
tmio_mmc_clk_stop(host);
break;
case MMC_POWER_ON: /* power up SD bus */
if (host->set_pwr)
host->set_pwr(host->pdev, 1);
break;
case MMC_POWER_UP: /* start bus clock */
tmio_mmc_clk_start(host);
break;
}
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
break;
case MMC_BUS_WIDTH_4:
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
break;
}
/* Let things settle. delay taken from winCE driver */
udelay(140);
}
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1;
}
static int tmio_mmc_get_cd(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = mfd_get_data(host->pdev);
if (!pdata->get_cd)
return -ENOSYS;
else
return pdata->get_cd(host->pdev);
}
static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
.get_cd = tmio_mmc_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
...@@ -1227,138 +62,54 @@ static int tmio_mmc_resume(struct platform_device *dev) ...@@ -1227,138 +62,54 @@ static int tmio_mmc_resume(struct platform_device *dev)
#define tmio_mmc_resume NULL #define tmio_mmc_resume NULL
#endif #endif
static int __devinit tmio_mmc_probe(struct platform_device *dev) static int __devinit tmio_mmc_probe(struct platform_device *pdev)
{ {
const struct mfd_cell *cell = mfd_get_cell(dev); const struct mfd_cell *cell = mfd_get_cell(pdev);
struct tmio_mmc_data *pdata; struct tmio_mmc_data *pdata;
struct resource *res_ctl;
struct tmio_mmc_host *host; struct tmio_mmc_host *host;
struct mmc_host *mmc;
int ret = -EINVAL; int ret = -EINVAL;
u32 irq_mask = TMIO_MASK_CMD;
if (dev->num_resources != 2) if (pdev->num_resources != 2)
goto out; goto out;
res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); pdata = mfd_get_data(pdev);
if (!res_ctl)
goto out;
pdata = mfd_get_data(dev);
if (!pdata || !pdata->hclk) if (!pdata || !pdata->hclk)
goto out; goto out;
ret = -ENOMEM;
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev);
if (!mmc)
goto out;
host = mmc_priv(mmc);
host->mmc = mmc;
host->pdev = dev;
platform_set_drvdata(dev, mmc);
host->set_pwr = pdata->set_pwr;
host->set_clk_div = pdata->set_clk_div;
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res_ctl) >> 10;
host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
if (!host->ctl)
goto host_free;
mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
if (pdata->ocr_mask)
mmc->ocr_avail = pdata->ocr_mask;
else
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Tell the MFD core we are ready to be enabled */ /* Tell the MFD core we are ready to be enabled */
if (cell->enable) { if (cell->enable) {
ret = cell->enable(dev); ret = cell->enable(pdev);
if (ret) if (ret)
goto unmap_ctl; goto out;
} }
tmio_mmc_clk_stop(host); ret = tmio_mmc_host_probe(&host, pdev, pdata);
reset(host);
ret = platform_get_irq(dev, 0);
if (ret >= 0)
host->irq = ret;
else
goto cell_disable;
disable_mmc_irqs(host, TMIO_MASK_ALL);
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
tmio_mmc_enable_sdio_irq(mmc, 0);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret) if (ret)
goto cell_disable; goto cell_disable;
spin_lock_init(&host->lock);
/* Init delayed work for request timeouts */
INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
/* See if we also get DMA */
tmio_mmc_request_dma(host, pdata);
mmc_add_host(mmc);
pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
(unsigned long)host->ctl, host->irq); (unsigned long)host->ctl, host->irq);
/* Unmask the IRQs we want to know about */
if (!host->chan_rx)
irq_mask |= TMIO_MASK_READOP;
if (!host->chan_tx)
irq_mask |= TMIO_MASK_WRITEOP;
enable_mmc_irqs(host, irq_mask);
return 0; return 0;
cell_disable: cell_disable:
if (cell->disable) if (cell->disable)
cell->disable(dev); cell->disable(pdev);
unmap_ctl:
iounmap(host->ctl);
host_free:
mmc_free_host(mmc);
out: out:
return ret; return ret;
} }
static int __devexit tmio_mmc_remove(struct platform_device *dev) static int __devexit tmio_mmc_remove(struct platform_device *pdev)
{ {
const struct mfd_cell *cell = mfd_get_cell(dev); const struct mfd_cell *cell = mfd_get_cell(pdev);
struct mmc_host *mmc = platform_get_drvdata(dev); struct mmc_host *mmc = platform_get_drvdata(pdev);
platform_set_drvdata(dev, NULL); platform_set_drvdata(pdev, NULL);
if (mmc) { if (mmc) {
struct tmio_mmc_host *host = mmc_priv(mmc); tmio_mmc_host_remove(mmc_priv(mmc));
mmc_remove_host(mmc);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
free_irq(host->irq, host);
if (cell->disable) if (cell->disable)
cell->disable(dev); cell->disable(pdev);
iounmap(host->ctl);
mmc_free_host(mmc);
} }
return 0; return 0;
......
/*
* linux/drivers/mmc/host/tmio_mmc.h
*
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Driver for the MMC / SD / SDIO cell found in:
*
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
*/
#ifndef TMIO_MMC_H
#define TMIO_MMC_H
#include <linux/highmem.h>
#include <linux/mmc/tmio.h>
#include <linux/pagemap.h>
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
#define TMIO_SDIO_STAT_EXPUB52 0x4000
#define TMIO_SDIO_STAT_EXWT 0x8000
#define TMIO_SDIO_MASK_ALL 0xc007
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
struct tmio_mmc_data;
struct tmio_mmc_host {
void __iomem *ctl;
unsigned long bus_shift;
struct mmc_command *cmd;
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
int irq;
unsigned int sdio_irq_enabled;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
/* pio related stuff */
struct scatterlist *sg_ptr;
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
struct platform_device *pdev;
struct tmio_mmc_data *pdata;
/* DMA support */
bool force_pio;
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
struct scatterlist bounce_sg;
u8 *bounce_buf;
/* Track lost interrupts */
struct delayed_work delayed_reset_work;
spinlock_t lock;
unsigned long last_req_ts;
};
int tmio_mmc_host_probe(struct tmio_mmc_host **host,
struct platform_device *pdev,
struct tmio_mmc_data *pdata);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
unsigned long *flags, void *virt)
{
kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
void tmio_mmc_release_dma(struct tmio_mmc_host *host);
#else
static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
}
static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
host->chan_tx = NULL;
host->chan_rx = NULL;
}
static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
}
#endif
#endif
/*
* linux/drivers/mmc/tmio_mmc_dma.c
*
* Copyright (C) 2010-2011 Guennadi Liakhovetski
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* DMA function for TMIO MMC implementations
*/
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/tmio.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include "tmio_mmc.h"
#define TMIO_MMC_MIN_DMA_LEN 8
static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
/* Switch DMA mode on or off - SuperH specific? */
writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
#endif
}
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
struct tmio_mmc_data *pdata = host->pdata;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
}
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
host->force_pio = true;
return;
}
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY);
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_FROM_DEVICE, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
ret = cookie;
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
host->chan_rx = NULL;
dma_release_channel(chan);
/* Free the Tx channel too */
chan = host->chan_tx;
if (chan) {
host->chan_tx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
desc, cookie, host->sg_len);
}
static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
struct tmio_mmc_data *pdata = host->pdata;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
}
if (sg->length < TMIO_MMC_MIN_DMA_LEN) {
host->force_pio = true;
return;
}
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ);
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
DMA_TO_DEVICE, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
ret = cookie;
}
}
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, cookie, host->mrq);
pio:
if (!desc) {
/* DMA failed, fall back to PIO */
if (ret >= 0)
ret = -EIO;
host->chan_tx = NULL;
dma_release_channel(chan);
/* Free the Rx channel too */
chan = host->chan_rx;
if (chan) {
host->chan_rx = NULL;
dma_release_channel(chan);
}
dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false);
}
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
desc, cookie);
}
void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx)
tmio_mmc_start_dma_rx(host);
} else {
if (host->chan_tx)
tmio_mmc_start_dma_tx(host);
}
}
static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
struct dma_chan *chan = NULL;
spin_lock_irq(&host->lock);
if (host && host->data) {
if (host->data->flags & MMC_DATA_READ)
chan = host->chan_rx;
else
chan = host->chan_tx;
}
spin_unlock_irq(&host->lock);
tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
if (chan)
dma_async_issue_pending(chan);
}
static void tmio_mmc_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
spin_lock_irq(&host->lock);
if (!host->data)
goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
}
/* It might be necessary to make filter MFD specific */
static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
{
dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
chan->private = arg;
return true;
}
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
pdata->dma->chan_priv_tx);
dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
host->chan_tx);
if (!host->chan_tx)
return;
host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
pdata->dma->chan_priv_rx);
dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
host->chan_rx);
if (!host->chan_rx)
goto ereqrx;
host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!host->bounce_buf)
goto ebouncebuf;
tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
tmio_mmc_enable_dma(host, true);
return;
ebouncebuf:
dma_release_channel(host->chan_rx);
host->chan_rx = NULL;
ereqrx:
dma_release_channel(host->chan_tx);
host->chan_tx = NULL;
return;
}
}
void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
if (host->chan_tx) {
struct dma_chan *chan = host->chan_tx;
host->chan_tx = NULL;
dma_release_channel(chan);
}
if (host->chan_rx) {
struct dma_chan *chan = host->chan_rx;
host->chan_rx = NULL;
dma_release_channel(chan);
}
if (host->bounce_buf) {
free_pages((unsigned long)host->bounce_buf, 0);
host->bounce_buf = NULL;
}
}
/*
* linux/drivers/mmc/host/tmio_mmc_pio.c
*
* Copyright (C) 2011 Guennadi Liakhovetski
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Driver for the MMC / SD / SDIO IP found in:
*
* TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
*
* This driver draws mainly on scattered spec sheets, Reverse engineering
* of the toshiba e800 SD driver and some parts of the 2.4 ASIC3 driver (4 bit
* support). (Further 4 bit support from a later datasheet).
*
* TODO:
* Investigate using a workqueue for PIO transfers
* Eliminate FIXMEs
* SDIO support
* Better Power management
* Handle MMC errors better
* double buffer support
*
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/mmc/tmio.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include "tmio_mmc.h"
static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift));
}
static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
readsw(host->ctl + (addr << host->bus_shift), buf, count);
}
static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift)) |
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
}
static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
writesw(host->ctl + (addr << host->bus_shift), buf, count);
}
static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
}
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) | (i & TMIO_MASK_IRQ);
sd_ctrl_write32(host, CTL_IRQ_MASK, mask);
}
static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
sd_ctrl_write32(host, CTL_STATUS, ~i);
}
static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
{
host->sg_len = data->sg_len;
host->sg_ptr = data->sg;
host->sg_orig = data->sg;
host->sg_off = 0;
}
static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
{
host->sg_ptr = sg_next(host->sg_ptr);
host->sg_off = 0;
return --host->sg_len;
}
#ifdef CONFIG_MMC_DEBUG
#define STATUS_TO_TEXT(a, status, i) \
do { \
if (status & TMIO_STAT_##a) { \
if (i++) \
printk(" | "); \
printk(#a); \
} \
} while (0)
static void pr_debug_status(u32 status)
{
int i = 0;
printk(KERN_DEBUG "status: %08x = ", status);
STATUS_TO_TEXT(CARD_REMOVE, status, i);
STATUS_TO_TEXT(CARD_INSERT, status, i);
STATUS_TO_TEXT(SIGSTATE, status, i);
STATUS_TO_TEXT(WRPROTECT, status, i);
STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
STATUS_TO_TEXT(CARD_INSERT_A, status, i);
STATUS_TO_TEXT(SIGSTATE_A, status, i);
STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
STATUS_TO_TEXT(STOPBIT_ERR, status, i);
STATUS_TO_TEXT(ILL_FUNC, status, i);
STATUS_TO_TEXT(CMD_BUSY, status, i);
STATUS_TO_TEXT(CMDRESPEND, status, i);
STATUS_TO_TEXT(DATAEND, status, i);
STATUS_TO_TEXT(CRCFAIL, status, i);
STATUS_TO_TEXT(DATATIMEOUT, status, i);
STATUS_TO_TEXT(CMDTIMEOUT, status, i);
STATUS_TO_TEXT(RXOVERFLOW, status, i);
STATUS_TO_TEXT(TXUNDERRUN, status, i);
STATUS_TO_TEXT(RXRDY, status, i);
STATUS_TO_TEXT(TXRQ, status, i);
STATUS_TO_TEXT(ILL_ACCESS, status, i);
printk("\n");
}
#else
#define pr_debug_status(s) do { } while (0)
#endif
static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
if (enable) {
host->sdio_irq_enabled = 1;
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
(TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
} else {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
host->sdio_irq_enabled = 0;
}
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
{
u32 clk = 0, clock;
if (new_clock) {
for (clock = host->mmc->f_min, clk = 0x80000080;
new_clock >= (clock<<1); clk >>= 1)
clock <<= 1;
clk |= 0x100;
}
if (host->set_clk_div)
host->set_clk_div(host->pdev, (clk>>22) & 1);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
}
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
/* implicit BUG_ON(!res) */
if (resource_size(res) > 0x100) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
}
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10);
/* implicit BUG_ON(!res) */
if (resource_size(res) > 0x100) {
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10);
}
}
static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0);
/* FIXME - should we set stop clock reg here */
sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
/* implicit BUG_ON(!res) */
if (resource_size(res) > 0x100)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
msleep(10);
sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
if (resource_size(res) > 0x100)
sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
msleep(10);
}
static void tmio_mmc_reset_work(struct work_struct *work)
{
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
delayed_reset_work.work);
struct mmc_request *mrq;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
mrq = host->mrq;
/* request already finished */
if (!mrq
|| time_is_after_jiffies(host->last_req_ts +
msecs_to_jiffies(2000))) {
spin_unlock_irqrestore(&host->lock, flags);
return;
}
dev_warn(&host->pdev->dev,
"timeout waiting for hardware interrupt (CMD%u)\n",
mrq->cmd->opcode);
if (host->data)
host->data->error = -ETIMEDOUT;
else if (host->cmd)
host->cmd->error = -ETIMEDOUT;
else
mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
host->data = NULL;
host->mrq = NULL;
host->force_pio = false;
spin_unlock_irqrestore(&host->lock, flags);
tmio_mmc_reset(host);
mmc_request_done(host->mmc, mrq);
}
static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
struct mmc_request *mrq = host->mrq;
if (!mrq)
return;
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
host->force_pio = false;
cancel_delayed_work(&host->delayed_reset_work);
mmc_request_done(host->mmc, mrq);
}
/* These are the bitmasks the tmio chip requires to implement the MMC response
* types. Note that R1 and R6 are the same in this scheme. */
#define APP_CMD 0x0040
#define RESP_NONE 0x0300
#define RESP_R1 0x0400
#define RESP_R1B 0x0500
#define RESP_R2 0x0600
#define RESP_R3 0x0700
#define DATA_PRESENT 0x0800
#define TRANSFER_READ 0x1000
#define TRANSFER_MULTI 0x2000
#define SECURITY_CMD 0x4000
static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
{
struct mmc_data *data = host->data;
int c = cmd->opcode;
/* Command 12 is handled by hardware */
if (cmd->opcode == 12 && !cmd->arg) {
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
return 0;
}
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE: c |= RESP_NONE; break;
case MMC_RSP_R1: c |= RESP_R1; break;
case MMC_RSP_R1B: c |= RESP_R1B; break;
case MMC_RSP_R2: c |= RESP_R2; break;
case MMC_RSP_R3: c |= RESP_R3; break;
default:
pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
return -EINVAL;
}
host->cmd = cmd;
/* FIXME - this seems to be ok commented out but the spec suggest this bit
* should be set when issuing app commands.
* if(cmd->flags & MMC_FLAG_ACMD)
* c |= APP_CMD;
*/
if (data) {
c |= DATA_PRESENT;
if (data->blocks > 1) {
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100);
c |= TRANSFER_MULTI;
}
if (data->flags & MMC_DATA_READ)
c |= TRANSFER_READ;
}
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
/* Fire off the command */
sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg);
sd_ctrl_write16(host, CTL_SD_CMD, c);
return 0;
}
/*
* This chip always returns (at least?) as much data as you ask for.
* I'm unsure what happens if you ask for less than a block. This should be
* looked into to ensure that a funny length read doesnt hose the controller.
*/
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
void *sg_virt;
unsigned short *buf;
unsigned int count;
unsigned long flags;
if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
pr_err("PIO IRQ in DMA mode!\n");
return;
} else if (!data) {
pr_debug("Spurious PIO IRQ\n");
return;
}
sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
buf = (unsigned short *)(sg_virt + host->sg_off);
count = host->sg_ptr->length - host->sg_off;
if (count > data->blksz)
count = data->blksz;
pr_debug("count: %08x offset: %08x flags %08x\n",
count, host->sg_off, data->flags);
/* Transfer the data */
if (data->flags & MMC_DATA_READ)
sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
else
sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
host->sg_off += count;
tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);
if (host->sg_off == host->sg_ptr->length)
tmio_mmc_next_sg(host);
return;
}
static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
{
if (host->sg_ptr == &host->bounce_sg) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
}
}
/* needs to be called with host->lock held */
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data = host->data;
struct mmc_command *stop;
host->data = NULL;
if (!data) {
dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
return;
}
stop = data->stop;
/* FIXME - return correct transfer count on errors */
if (!data->error)
data->bytes_xfered = data->blocks * data->blksz;
else
data->bytes_xfered = 0;
pr_debug("Completed data request\n");
/*
* FIXME: other drivers allow an optional stop command of any given type
* which we dont do, as the chip can auto generate them.
* Perhaps we can be smarter about when to use auto CMD12 and
* only issue the auto request when we know this is the desired
* stop command, allowing fallback to the stop command the
* upper layers expect. For now, we do what works.
*/
if (data->flags & MMC_DATA_READ) {
if (host->chan_rx && !host->force_pio)
tmio_mmc_check_bounce_buffer(host);
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
host->mrq);
} else {
dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
host->mrq);
}
if (stop) {
if (stop->opcode == 12 && !stop->arg)
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
else
BUG();
}
tmio_mmc_finish_request(host);
}
static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
{
struct mmc_data *data;
spin_lock(&host->lock);
data = host->data;
if (!data)
goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
/*
* Has all data been written out yet? Testing on SuperH showed,
* that in most cases the first interrupt comes already with the
* BUSY status bit clear, but on some operations, like mount or
* in the beginning of a write / sync / umount, there is one
* DATAEND interrupt with the BUSY bit set, in this cases
* waiting for one more interrupt fixes the problem.
*/
if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete);
}
} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete);
} else {
tmio_mmc_do_data_irq(host);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
}
out:
spin_unlock(&host->lock);
}
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
unsigned int stat)
{
struct mmc_command *cmd = host->cmd;
int i, addr;
spin_lock(&host->lock);
if (!host->cmd) {
pr_debug("Spurious CMD irq\n");
goto out;
}
host->cmd = NULL;
/* This controller is sicker than the PXA one. Not only do we need to
* drop the top 8 bits of the first response word, we also need to
* modify the order of the response for short response command types.
*/
for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
cmd->resp[i] = sd_ctrl_read32(host, addr);
if (cmd->flags & MMC_RSP_136) {
cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
cmd->resp[3] <<= 8;
} else if (cmd->flags & MMC_RSP_R3) {
cmd->resp[0] = cmd->resp[3];
}
if (stat & TMIO_STAT_CMDTIMEOUT)
cmd->error = -ETIMEDOUT;
else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC)
cmd->error = -EILSEQ;
/* If there is data to handle we enable data IRQs here, and
* we will ultimatley finish the request in the data_end handler.
* If theres no data or we encountered an error, finish now.
*/
if (host->data && !cmd->error) {
if (host->data->flags & MMC_DATA_READ) {
if (host->force_pio || !host->chan_rx)
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
else
tasklet_schedule(&host->dma_issue);
} else {
if (host->force_pio || !host->chan_tx)
tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
else
tasklet_schedule(&host->dma_issue);
}
} else {
tmio_mmc_finish_request(host);
}
out:
spin_unlock(&host->lock);
}
static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
struct tmio_mmc_data *pdata = host->pdata;
unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
pr_debug("MMC IRQ begin\n");
status = sd_ctrl_read32(host, CTL_STATUS);
irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
sdio_ireg = 0;
if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
if (sdio_ireg && !host->sdio_irq_enabled) {
pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
sdio_status, sdio_irq_mask, sdio_ireg);
tmio_mmc_enable_sdio_irq(host->mmc, 0);
goto out;
}
if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(host->mmc);
if (sdio_ireg)
goto out;
}
pr_debug_status(status);
pr_debug_status(ireg);
if (!ireg) {
tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
pr_warning("tmio_mmc: Spurious irq, disabling! "
"0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
pr_debug_status(status);
goto out;
}
while (ireg) {
/* Card insert / remove attempts */
if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
TMIO_STAT_CARD_REMOVE);
mmc_detect_change(host->mmc, msecs_to_jiffies(100));
}
/* CRC and other errors */
/* if (ireg & TMIO_STAT_ERR_IRQ)
* handled |= tmio_error_irq(host, irq, stat);
*/
/* Command completion */
if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
tmio_mmc_ack_mmc_irqs(host,
TMIO_STAT_CMDRESPEND |
TMIO_STAT_CMDTIMEOUT);
tmio_mmc_cmd_irq(host, status);
}
/* Data transfer */
if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
tmio_mmc_pio_irq(host);
}
/* Data transfer completion */
if (ireg & TMIO_STAT_DATAEND) {
tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
tmio_mmc_data_irq(host);
}
/* Check status - keep going until we've handled it all */
status = sd_ctrl_read32(host, CTL_STATUS);
irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask;
pr_debug("Status at end of loop: %08x\n", status);
pr_debug_status(status);
}
pr_debug("MMC IRQ end\n");
out:
return IRQ_HANDLED;
}
static int tmio_mmc_start_data(struct tmio_mmc_host *host,
struct mmc_data *data)
{
struct tmio_mmc_data *pdata = host->pdata;
pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
data->blksz, data->blocks);
/* Some hardware cannot perform 2 byte requests in 4 bit mode */
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;
if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
pr_err("%s: %d byte block unsupported in 4 bit mode\n",
mmc_hostname(host->mmc), data->blksz);
return -EINVAL;
}
}
tmio_mmc_init_sg(host, data);
host->data = data;
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data);
return 0;
}
/* Process requests from the MMC layer */
static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
int ret;
if (host->mrq)
pr_debug("request not null\n");
host->last_req_ts = jiffies;
wmb();
host->mrq = mrq;
if (mrq->data) {
ret = tmio_mmc_start_data(host, mrq->data);
if (ret)
goto fail;
}
ret = tmio_mmc_start_command(host, mrq->cmd);
if (!ret) {
schedule_delayed_work(&host->delayed_reset_work,
msecs_to_jiffies(2000));
return;
}
fail:
host->mrq = NULL;
host->force_pio = false;
mrq->cmd->error = ret;
mmc_request_done(mmc, mrq);
}
/* Set MMC clock / power.
* Note: This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
* MMC wont run that fast, it has to be clocked at 12MHz which is the next
* slowest setting.
*/
static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
if (ios->clock)
tmio_mmc_set_clock(host, ios->clock);
/* Power sequence - OFF -> UP -> ON */
if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* power down SD bus */
if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
host->set_pwr(host->pdev, 0);
tmio_mmc_clk_stop(host);
} else if (ios->power_mode == MMC_POWER_UP) {
/* power up SD bus */
if (host->set_pwr)
host->set_pwr(host->pdev, 1);
} else {
/* start bus clock */
tmio_mmc_clk_start(host);
}
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
break;
case MMC_BUS_WIDTH_4:
sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
break;
}
/* Let things settle. delay taken from winCE driver */
udelay(140);
}
static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
}
static int tmio_mmc_get_cd(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
if (!pdata->get_cd)
return -ENOSYS;
else
return pdata->get_cd(host->pdev);
}
static const struct mmc_host_ops tmio_mmc_ops = {
.request = tmio_mmc_request,
.set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro,
.get_cd = tmio_mmc_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
struct platform_device *pdev,
struct tmio_mmc_data *pdata)
{
struct tmio_mmc_host *_host;
struct mmc_host *mmc;
struct resource *res_ctl;
int ret;
u32 irq_mask = TMIO_MASK_CMD;
res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res_ctl)
return -EINVAL;
mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev);
if (!mmc)
return -ENOMEM;
_host = mmc_priv(mmc);
_host->pdata = pdata;
_host->mmc = mmc;
_host->pdev = pdev;
platform_set_drvdata(pdev, mmc);
_host->set_pwr = pdata->set_pwr;
_host->set_clk_div = pdata->set_clk_div;
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
_host->bus_shift = resource_size(res_ctl) >> 10;
_host->ctl = ioremap(res_ctl->start, resource_size(res_ctl));
if (!_host->ctl) {
ret = -ENOMEM;
goto host_free;
}
mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
if (pdata->ocr_mask)
mmc->ocr_avail = pdata->ocr_mask;
else
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
tmio_mmc_clk_stop(_host);
tmio_mmc_reset(_host);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto unmap_ctl;
_host->irq = ret;
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
tmio_mmc_enable_sdio_irq(mmc, 0);
ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host);
if (ret)
goto unmap_ctl;
spin_lock_init(&_host->lock);
/* Init delayed work for request timeouts */
INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
mmc_add_host(mmc);
/* Unmask the IRQs we want to know about */
if (!_host->chan_rx)
irq_mask |= TMIO_MASK_READOP;
if (!_host->chan_tx)
irq_mask |= TMIO_MASK_WRITEOP;
tmio_mmc_enable_mmc_irqs(_host, irq_mask);
*host = _host;
return 0;
unmap_ctl:
iounmap(_host->ctl);
host_free:
mmc_free_host(mmc);
return ret;
}
EXPORT_SYMBOL(tmio_mmc_host_probe);
void tmio_mmc_host_remove(struct tmio_mmc_host *host)
{
mmc_remove_host(host->mmc);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
free_irq(host->irq, host);
iounmap(host->ctl);
mmc_free_host(host->mmc);
}
EXPORT_SYMBOL(tmio_mmc_host_remove);
MODULE_LICENSE("GPL v2");
...@@ -1087,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev, ...@@ -1087,14 +1087,13 @@ static int __devinit via_sd_probe(struct pci_dev *pcidev,
struct mmc_host *mmc; struct mmc_host *mmc;
struct via_crdr_mmc_host *sdhost; struct via_crdr_mmc_host *sdhost;
u32 base, len; u32 base, len;
u8 rev, gatt; u8 gatt;
int ret; int ret;
pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);
pr_info(DRV_NAME pr_info(DRV_NAME
": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n", ": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
(int)rev); (int)pcidev->revision);
ret = pci_enable_device(pcidev); ret = pci_enable_device(pcidev);
if (ret) if (ret)
......
/*
* include/linux/mmc/tmio.h
*
* Copyright (C) 2007 Ian Molton
* Copyright (C) 2004 Ian Molton
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Driver for the MMC / SD / SDIO cell found in:
*
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
*/
#ifndef _LINUX_MMC_TMIO_H_
#define _LINUX_MMC_TMIO_H_
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
#define CTL_STOP_INTERNAL_ACTION 0x08
#define CTL_XFER_BLK_COUNT 0xa
#define CTL_RESPONSE 0x0c
#define CTL_STATUS 0x1c
#define CTL_IRQ_MASK 0x20
#define CTL_SD_CARD_CLK_CTL 0x24
#define CTL_SD_XFER_LEN 0x26
#define CTL_SD_MEM_CARD_OPT 0x28
#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
#define CTL_SD_DATA_PORT 0x30
#define CTL_TRANSACTION_CTL 0x34
#define CTL_SDIO_STATUS 0x36
#define CTL_SDIO_IRQ_MASK 0x38
#define CTL_RESET_SD 0xe0
#define CTL_SDIO_REGS 0x100
#define CTL_CLK_AND_WAIT_CTL 0x138
#define CTL_RESET_SDIO 0x1e0
/* Definitions for values the CTRL_STATUS register can take. */
#define TMIO_STAT_CMDRESPEND 0x00000001
#define TMIO_STAT_DATAEND 0x00000004
#define TMIO_STAT_CARD_REMOVE 0x00000008
#define TMIO_STAT_CARD_INSERT 0x00000010
#define TMIO_STAT_SIGSTATE 0x00000020
#define TMIO_STAT_WRPROTECT 0x00000080
#define TMIO_STAT_CARD_REMOVE_A 0x00000100
#define TMIO_STAT_CARD_INSERT_A 0x00000200
#define TMIO_STAT_SIGSTATE_A 0x00000400
#define TMIO_STAT_CMD_IDX_ERR 0x00010000
#define TMIO_STAT_CRCFAIL 0x00020000
#define TMIO_STAT_STOPBIT_ERR 0x00040000
#define TMIO_STAT_DATATIMEOUT 0x00080000
#define TMIO_STAT_RXOVERFLOW 0x00100000
#define TMIO_STAT_TXUNDERRUN 0x00200000
#define TMIO_STAT_CMDTIMEOUT 0x00400000
#define TMIO_STAT_RXRDY 0x01000000
#define TMIO_STAT_TXRQ 0x02000000
#define TMIO_STAT_ILL_FUNC 0x20000000
#define TMIO_STAT_CMD_BUSY 0x40000000
#define TMIO_STAT_ILL_ACCESS 0x80000000
#define TMIO_BBS 512 /* Boot block size */
#endif /* _LINUX_MMC_TMIO_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment