Commit 2442d310 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/drzeus/mmc: (32 commits)
  mmc: tifm: replace kmap with page_address
  mmc: sdhci: fix voltage ocr
  mmc: sdhci: replace kmap with page_address
  mmc: wbsd: replace kmap with page_address
  mmc: handle pci_enable_device() return value in sdhci
  mmc: Proper unclaim in mmc_block
  mmc: change wbsd mailing list
  mmc: Graceful fallback for fancy features
  mmc: Handle wbsd's stupid command list
  mmc: Allow host drivers to specify max block count
  mmc: Allow host drivers to specify a max block size
  tifm_sd: add suspend and resume functionality
  tifm_core: add suspend/resume infrastructure for tifm devices
  tifm_7xx1: prettify
  tifm_7xx1: recognize device 0xac8f as supported
  tifm_7xx1: switch from workqueue to kthread
  tifm_7xx1: Merge media insert and media remove functions
  tifm_7xx1: simplify eject function
  Add dummy_signal_irq function to save check in ISR
  Remove unused return value from signal_irq callback
  ...
parents 02aedd69 f9d429a2
......@@ -3647,7 +3647,7 @@ S: Maintained
W83L51xD SD/MMC CARD INTERFACE DRIVER
P: Pierre Ossman
M: drzeus-wbsd@drzeus.cx
L: wbsd-devel@list.drzeus.cx
L: linux-kernel@vger.kernel.org
W: http://projects.drzeus.cx/wbsd
S: Maintained
......
This diff is collapsed.
......@@ -14,7 +14,7 @@
#include <linux/idr.h>
#define DRIVER_NAME "tifm_core"
#define DRIVER_VERSION "0.6"
#define DRIVER_VERSION "0.7"
static DEFINE_IDR(tifm_adapter_idr);
static DEFINE_SPINLOCK(tifm_adapter_lock);
......@@ -60,10 +60,41 @@ static int tifm_uevent(struct device *dev, char **envp, int num_envp,
return 0;
}
#ifdef CONFIG_PM
static int tifm_device_suspend(struct device *dev, pm_message_t state)
{
struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
struct tifm_driver *drv = fm_dev->drv;
if (drv && drv->suspend)
return drv->suspend(fm_dev, state);
return 0;
}
static int tifm_device_resume(struct device *dev)
{
struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
struct tifm_driver *drv = fm_dev->drv;
if (drv && drv->resume)
return drv->resume(fm_dev);
return 0;
}
#else
#define tifm_device_suspend NULL
#define tifm_device_resume NULL
#endif /* CONFIG_PM */
static struct bus_type tifm_bus_type = {
.name = "tifm",
.match = tifm_match,
.uevent = tifm_uevent,
.suspend = tifm_device_suspend,
.resume = tifm_device_resume
};
static void tifm_free(struct class_device *cdev)
......@@ -71,8 +102,6 @@ static void tifm_free(struct class_device *cdev)
struct tifm_adapter *fm = container_of(cdev, struct tifm_adapter, cdev);
kfree(fm->sockets);
if (fm->wq)
destroy_workqueue(fm->wq);
kfree(fm);
}
......@@ -101,7 +130,8 @@ void tifm_free_adapter(struct tifm_adapter *fm)
}
EXPORT_SYMBOL(tifm_free_adapter);
int tifm_add_adapter(struct tifm_adapter *fm)
int tifm_add_adapter(struct tifm_adapter *fm,
int (*mediathreadfn)(void *data))
{
int rc;
......@@ -113,10 +143,10 @@ int tifm_add_adapter(struct tifm_adapter *fm)
spin_unlock(&tifm_adapter_lock);
if (!rc) {
snprintf(fm->cdev.class_id, BUS_ID_SIZE, "tifm%u", fm->id);
strncpy(fm->wq_name, fm->cdev.class_id, KOBJ_NAME_LEN);
fm->media_switcher = kthread_create(mediathreadfn,
fm, "tifm/%u", fm->id);
fm->wq = create_singlethread_workqueue(fm->wq_name);
if (fm->wq)
if (!IS_ERR(fm->media_switcher))
return class_device_add(&fm->cdev);
spin_lock(&tifm_adapter_lock);
......@@ -141,27 +171,27 @@ EXPORT_SYMBOL(tifm_remove_adapter);
void tifm_free_device(struct device *dev)
{
struct tifm_dev *fm_dev = container_of(dev, struct tifm_dev, dev);
if (fm_dev->wq)
destroy_workqueue(fm_dev->wq);
kfree(fm_dev);
}
EXPORT_SYMBOL(tifm_free_device);
struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id)
static void tifm_dummy_signal_irq(struct tifm_dev *sock,
unsigned int sock_irq_status)
{
return;
}
struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm)
{
struct tifm_dev *dev = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
if (dev) {
spin_lock_init(&dev->lock);
snprintf(dev->wq_name, KOBJ_NAME_LEN, "tifm%u:%u", fm->id, id);
dev->wq = create_singlethread_workqueue(dev->wq_name);
if (!dev->wq) {
kfree(dev);
return NULL;
}
dev->dev.parent = fm->dev;
dev->dev.bus = &tifm_bus_type;
dev->dev.release = tifm_free_device;
dev->signal_irq = tifm_dummy_signal_irq;
}
return dev;
}
......@@ -219,6 +249,7 @@ static int tifm_device_remove(struct device *dev)
struct tifm_driver *drv = fm_dev->drv;
if (drv) {
fm_dev->signal_irq = tifm_dummy_signal_irq;
if (drv->remove)
drv->remove(fm_dev);
fm_dev->drv = NULL;
......@@ -233,6 +264,8 @@ int tifm_register_driver(struct tifm_driver *drv)
drv->driver.bus = &tifm_bus_type;
drv->driver.probe = tifm_device_probe;
drv->driver.remove = tifm_device_remove;
drv->driver.suspend = tifm_device_suspend;
drv->driver.resume = tifm_device_resume;
return driver_register(&drv->driver);
}
......
......@@ -823,6 +823,9 @@ static int __init at91_mci_probe(struct platform_device *pdev)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->caps = MMC_CAP_BYTEBLOCK;
mmc->max_blk_size = 4095;
mmc->max_blk_count = mmc->max_req_size;
host = mmc_priv(mmc);
host->mmc = mmc;
host->buffer = NULL;
......
......@@ -152,8 +152,9 @@ static inline int au1xmmc_card_inserted(struct au1xmmc_host *host)
? 1 : 0;
}
static inline int au1xmmc_card_readonly(struct au1xmmc_host *host)
static int au1xmmc_card_readonly(struct mmc_host *mmc)
{
struct au1xmmc_host *host = mmc_priv(mmc);
return (bcsr->status & au1xmmc_card_table[host->id].wpstatus)
? 1 : 0;
}
......@@ -193,6 +194,8 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
u32 mmccmd = (cmd->opcode << SD_CMD_CI_SHIFT);
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
break;
case MMC_RSP_R1:
mmccmd |= SD_CMD_RT_1;
break;
......@@ -205,6 +208,10 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,
case MMC_RSP_R3:
mmccmd |= SD_CMD_RT_3;
break;
default:
printk(KERN_INFO "au1xmmc: unhandled response type %02x\n",
mmc_resp_type(cmd));
return MMC_ERR_INVALID;
}
switch(cmd->opcode) {
......@@ -878,6 +885,7 @@ static void au1xmmc_init_dma(struct au1xmmc_host *host)
static const struct mmc_host_ops au1xmmc_ops = {
.request = au1xmmc_request,
.set_ios = au1xmmc_set_ios,
.get_ro = au1xmmc_card_readonly,
};
static int __devinit au1xmmc_probe(struct platform_device *pdev)
......@@ -914,6 +922,9 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE;
mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 512;
mmc->ocr_avail = AU1XMMC_OCR;
host = mmc_priv(mmc);
......
......@@ -958,8 +958,10 @@ static int imxmci_probe(struct platform_device *pdev)
/* MMC core transfer sizes tunable parameters */
mmc->max_hw_segs = 64;
mmc->max_phys_segs = 64;
mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */
mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
host = mmc_priv(mmc);
host->mmc = mmc;
......
......@@ -103,11 +103,16 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mmc_hostname(host), mrq->cmd->opcode,
mrq->cmd->arg, mrq->cmd->flags);
WARN_ON(host->card_busy == NULL);
WARN_ON(!host->claimed);
mrq->cmd->error = 0;
mrq->cmd->mrq = mrq;
if (mrq->data) {
BUG_ON(mrq->data->blksz > host->max_blk_size);
BUG_ON(mrq->data->blocks > host->max_blk_count);
BUG_ON(mrq->data->blocks * mrq->data->blksz >
host->max_req_size);
mrq->cmd->data = mrq->data;
mrq->data->error = 0;
mrq->data->mrq = mrq;
......@@ -157,7 +162,7 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries
{
struct mmc_request mrq;
BUG_ON(host->card_busy == NULL);
BUG_ON(!host->claimed);
memset(&mrq, 0, sizeof(struct mmc_request));
......@@ -195,7 +200,7 @@ int mmc_wait_for_app_cmd(struct mmc_host *host, unsigned int rca,
int i, err;
BUG_ON(host->card_busy == NULL);
BUG_ON(!host->claimed);
BUG_ON(retries < 0);
err = MMC_ERR_INVALID;
......@@ -289,7 +294,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card,
else
limit_us = 100000;
if (timeout_us > limit_us) {
/*
* SDHC cards always use these fixed values.
*/
if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
data->timeout_ns = limit_us * 1000;
data->timeout_clks = 0;
}
......@@ -320,14 +328,14 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
spin_lock_irqsave(&host->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (host->card_busy == NULL)
if (!host->claimed)
break;
spin_unlock_irqrestore(&host->lock, flags);
schedule();
spin_lock_irqsave(&host->lock, flags);
}
set_current_state(TASK_RUNNING);
host->card_busy = card;
host->claimed = 1;
spin_unlock_irqrestore(&host->lock, flags);
remove_wait_queue(&host->wq, &wait);
......@@ -353,10 +361,10 @@ void mmc_release_host(struct mmc_host *host)
{
unsigned long flags;
BUG_ON(host->card_busy == NULL);
BUG_ON(!host->claimed);
spin_lock_irqsave(&host->lock, flags);
host->card_busy = NULL;
host->claimed = 0;
spin_unlock_irqrestore(&host->lock, flags);
wake_up(&host->wq);
......@@ -372,7 +380,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width);
host->ops->set_ios(host, ios);
}
......@@ -381,7 +389,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
int err;
struct mmc_command cmd;
BUG_ON(host->card_busy == NULL);
BUG_ON(!host->claimed);
if (host->card_selected == card)
return MMC_ERR_NONE;
......@@ -588,34 +596,65 @@ static void mmc_decode_csd(struct mmc_card *card)
if (mmc_card_sd(card)) {
csd_struct = UNSTUFF_BITS(resp, 126, 2);
if (csd_struct != 0) {
switch (csd_struct) {
case 0:
m = UNSTUFF_BITS(resp, 115, 4);
e = UNSTUFF_BITS(resp, 112, 3);
csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
e = UNSTUFF_BITS(resp, 47, 3);
m = UNSTUFF_BITS(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
break;
case 1:
/*
* This is a block-addressed SDHC card. Most
* interesting fields are unused and have fixed
* values. To avoid getting tripped by buggy cards,
* we assume those fixed values ourselves.
*/
mmc_card_set_blockaddr(card);
csd->tacc_ns = 0; /* Unused */
csd->tacc_clks = 0; /* Unused */
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
m = UNSTUFF_BITS(resp, 48, 22);
csd->capacity = (1 + m) << 10;
csd->read_blkbits = 9;
csd->read_partial = 0;
csd->write_misalign = 0;
csd->read_misalign = 0;
csd->r2w_factor = 4; /* Unused */
csd->write_blkbits = 9;
csd->write_partial = 0;
break;
default:
printk("%s: unrecognised CSD structure version %d\n",
mmc_hostname(card->host), csd_struct);
mmc_card_set_bad(card);
return;
}
m = UNSTUFF_BITS(resp, 115, 4);
e = UNSTUFF_BITS(resp, 112, 3);
csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
m = UNSTUFF_BITS(resp, 99, 4);
e = UNSTUFF_BITS(resp, 96, 3);
csd->max_dtr = tran_exp[e] * tran_mant[m];
csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
e = UNSTUFF_BITS(resp, 47, 3);
m = UNSTUFF_BITS(resp, 62, 12);
csd->capacity = (1 + m) << (e + 2);
csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
} else {
/*
* We only understand CSD structure v1.1 and v1.2.
......@@ -848,6 +887,41 @@ static int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
return err;
}
static int mmc_send_if_cond(struct mmc_host *host, u32 ocr, int *rsd2)
{
struct mmc_command cmd;
int err, sd2;
static const u8 test_pattern = 0xAA;
/*
* To support SD 2.0 cards, we must always invoke SD_SEND_IF_COND
* before SD_APP_OP_COND. This command will harmlessly fail for
* SD 1.0 cards.
*/
cmd.opcode = SD_SEND_IF_COND;
cmd.arg = ((ocr & 0xFF8000) != 0) << 8 | test_pattern;
cmd.flags = MMC_RSP_R7 | MMC_CMD_BCR;
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err == MMC_ERR_NONE) {
if ((cmd.resp[0] & 0xFF) == test_pattern) {
sd2 = 1;
} else {
sd2 = 0;
err = MMC_ERR_FAILED;
}
} else {
/*
* Treat errors as SD 1.0 card.
*/
sd2 = 0;
err = MMC_ERR_NONE;
}
if (rsd2)
*rsd2 = sd2;
return err;
}
/*
* Discover cards by requesting their CID. If this command
* times out, it is not an error; there are no further cards
......@@ -1018,7 +1092,8 @@ static void mmc_process_ext_csds(struct mmc_host *host)
mmc_wait_for_req(host, &mrq);
if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
mmc_card_set_dead(card);
printk("%s: unable to read EXT_CSD, performance "
"might suffer.\n", mmc_hostname(card->host));
continue;
}
......@@ -1034,7 +1109,6 @@ static void mmc_process_ext_csds(struct mmc_host *host)
printk("%s: card is mmc v4 but doesn't support "
"any high-speed modes.\n",
mmc_hostname(card->host));
mmc_card_set_bad(card);
continue;
}
......@@ -1215,7 +1289,9 @@ static void mmc_read_switch_caps(struct mmc_host *host)
mmc_wait_for_req(host, &mrq);
if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
mmc_card_set_dead(card);
printk("%s: unable to read switch capabilities, "
"performance might suffer.\n",
mmc_hostname(card->host));
continue;
}
......@@ -1247,12 +1323,8 @@ static void mmc_read_switch_caps(struct mmc_host *host)
mmc_wait_for_req(host, &mrq);
if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE) {
mmc_card_set_dead(card);
continue;
}
if ((status[16] & 0xF) != 1) {
if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE ||
(status[16] & 0xF) != 1) {
printk(KERN_WARNING "%s: Problem switching card "
"into high-speed mode!\n",
mmc_hostname(host));
......@@ -1334,6 +1406,10 @@ static void mmc_setup(struct mmc_host *host)
mmc_power_up(host);
mmc_idle_cards(host);
err = mmc_send_if_cond(host, host->ocr_avail, NULL);
if (err != MMC_ERR_NONE) {
return;
}
err = mmc_send_app_op_cond(host, 0, &ocr);
/*
......@@ -1386,10 +1462,21 @@ static void mmc_setup(struct mmc_host *host)
* all get the idea that they should be ready for CMD2.
* (My SanDisk card seems to need this.)
*/
if (host->mode == MMC_MODE_SD)
mmc_send_app_op_cond(host, host->ocr, NULL);
else
if (host->mode == MMC_MODE_SD) {
int err, sd2;
err = mmc_send_if_cond(host, host->ocr, &sd2);
if (err == MMC_ERR_NONE) {
/*
* If SD_SEND_IF_COND indicates an SD 2.0
* compliant card and we should set bit 30
* of the ocr to indicate that we can handle
* block-addressed SDHC cards.
*/
mmc_send_app_op_cond(host, host->ocr | (sd2 << 30), NULL);
}
} else {
mmc_send_op_cond(host, host->ocr, NULL);
}
mmc_discover_cards(host);
......@@ -1519,8 +1606,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
*/
host->max_hw_segs = 1;
host->max_phys_segs = 1;
host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
host->max_seg_size = PAGE_CACHE_SIZE;
host->max_req_size = PAGE_CACHE_SIZE;
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
}
return host;
......
......@@ -237,13 +237,17 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.mrq.cmd = &brq.cmd;
brq.mrq.data = &brq.data;
brq.cmd.arg = req->sector << 9;
brq.cmd.arg = req->sector;
if (!mmc_card_blockaddr(card))
brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
if (brq.data.blocks > card->host->max_blk_count)
brq.data.blocks = card->host->max_blk_count;
mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
......@@ -375,9 +379,10 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
spin_unlock_irq(&md->lock);
}
flush_queue:
mmc_card_release_host(card);
flush_queue:
spin_lock_irq(&md->lock);
while (ret) {
ret = end_that_request_chunk(req, 0,
......@@ -494,6 +499,10 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
struct mmc_command cmd;
int err;
/* Block-addressed cards ignore MMC_SET_BLOCKLEN. */
if (mmc_card_blockaddr(card))
return 0;
mmc_card_claim_host(card);
cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = 1 << md->block_bits;
......
......@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_prep_rq(mq->queue, mmc_prep_request);
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue, host->max_sectors);
blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
......
......@@ -199,7 +199,7 @@ void mmc_init_card(struct mmc_card *card, struct mmc_host *host)
memset(card, 0, sizeof(struct mmc_card));
card->host = host;
device_initialize(&card->dev);
card->dev.parent = mmc_dev(host);
card->dev.parent = mmc_classdev(host);
card->dev.bus = &mmc_bus_type;
card->dev.release = mmc_release_card;
}
......
......@@ -524,15 +524,24 @@ static int mmci_probe(struct amba_device *dev, void *id)
/*
* Since we only have a 16-bit data length register, we must
* ensure that we don't exceed 2^16-1 bytes in a single request.
* Choose 64 (512-byte) sectors as the limit.
*/
mmc->max_sectors = 64;
mmc->max_req_size = 65535;
/*
* Set the maximum segment size. Since we aren't doing DMA
* (yet) we are only limited by the data length register.
*/
mmc->max_seg_size = mmc->max_sectors << 9;
mmc->max_seg_size = mmc->max_req_size;
/*
* Block size can be up to 2048 bytes, but must be a power of two.
*/
mmc->max_blk_size = 2048;
/*
* No limit on the number of blocks transferred.
*/
mmc->max_blk_count = mmc->max_req_size;
spin_lock_init(&host->lock);
......
......@@ -1099,8 +1099,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
*/
mmc->max_phys_segs = 32;
mmc->max_hw_segs = 32;
mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */
mmc->max_seg_size = mmc->max_sectors * 512;
mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
if (host->power_pin >= 0) {
if ((ret = omap_request_gpio(host->power_pin)) != 0) {
......
......@@ -450,6 +450,16 @@ static int pxamci_probe(struct platform_device *pdev)
*/
mmc->max_seg_size = PAGE_SIZE;
/*
* Block length register is 10 bits.
*/
mmc->max_blk_size = 1023;
/*
* Block count register is 16 bits.
*/
mmc->max_blk_count = 65535;
host = mmc_priv(mmc);
host->mmc = mmc;
host->dma = -1;
......
......@@ -37,6 +37,7 @@ static unsigned int debug_quirks = 0;
#define SDHCI_QUIRK_FORCE_DMA (1<<1)
/* Controller doesn't like some resets when there is no card inserted. */
#define SDHCI_QUIRK_NO_CARD_NO_RESET (1<<2)
#define SDHCI_QUIRK_SINGLE_POWER_WRITE (1<<3)
static const struct pci_device_id pci_ids[] __devinitdata = {
{
......@@ -65,6 +66,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = SDHCI_QUIRK_FORCE_DMA,
},
{
.vendor = PCI_VENDOR_ID_ENE,
.device = PCI_DEVICE_ID_ENE_CB712_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = SDHCI_QUIRK_SINGLE_POWER_WRITE,
},
{ /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
},
......@@ -197,15 +206,9 @@ static void sdhci_deactivate_led(struct sdhci_host *host)
* *
\*****************************************************************************/
static inline char* sdhci_kmap_sg(struct sdhci_host* host)
static inline char* sdhci_sg_to_buffer(struct sdhci_host* host)
{
host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ);
return host->mapped_sg + host->cur_sg->offset;
}
static inline void sdhci_kunmap_sg(struct sdhci_host* host)
{
kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
return page_address(host->cur_sg->page) + host->cur_sg->offset;
}
static inline int sdhci_next_sg(struct sdhci_host* host)
......@@ -240,7 +243,7 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
chunk_remain = 0;
data = 0;
buffer = sdhci_kmap_sg(host) + host->offset;
buffer = sdhci_sg_to_buffer(host) + host->offset;
while (blksize) {
if (chunk_remain == 0) {
......@@ -264,16 +267,13 @@ static void sdhci_read_block_pio(struct sdhci_host *host)
}
if (host->remain == 0) {
sdhci_kunmap_sg(host);
if (sdhci_next_sg(host) == 0) {
BUG_ON(blksize != 0);
return;
}
buffer = sdhci_kmap_sg(host);
buffer = sdhci_sg_to_buffer(host);
}
}
sdhci_kunmap_sg(host);
}
static void sdhci_write_block_pio(struct sdhci_host *host)
......@@ -290,7 +290,7 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
data = 0;
bytes = 0;
buffer = sdhci_kmap_sg(host) + host->offset;
buffer = sdhci_sg_to_buffer(host) + host->offset;
while (blksize) {
size = min(host->size, host->remain);
......@@ -314,16 +314,13 @@ static void sdhci_write_block_pio(struct sdhci_host *host)
}
if (host->remain == 0) {
sdhci_kunmap_sg(host);
if (sdhci_next_sg(host) == 0) {
BUG_ON(blksize != 0);
return;
}
buffer = sdhci_kmap_sg(host);
buffer = sdhci_sg_to_buffer(host);
}
}
sdhci_kunmap_sg(host);
}
static void sdhci_transfer_pio(struct sdhci_host *host)
......@@ -372,7 +369,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)
/* Sanity checks */
BUG_ON(data->blksz * data->blocks > 524288);
BUG_ON(data->blksz > host->max_block);
BUG_ON(data->blksz > host->mmc->max_blk_size);
BUG_ON(data->blocks > 65535);
/* timeout in us */
......@@ -674,10 +671,17 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
if (host->power == power)
return;
writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
if (power == (unsigned short)-1)
if (power == (unsigned short)-1) {
writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
goto out;
}
/*
* Spec says that we should clear the power reg before setting
* a new value. Some controllers don't seem to like this though.
*/
if (!(host->chip->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
writeb(0, host->ioaddr + SDHCI_POWER_CONTROL);
pwr = SDHCI_POWER_ON;
......@@ -1109,7 +1113,9 @@ static int sdhci_resume (struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_device(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
for (i = 0;i < chip->num_slots;i++) {
if (!chip->hosts[i])
......@@ -1274,15 +1280,6 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
if (caps & SDHCI_TIMEOUT_CLK_UNIT)
host->timeout_clk *= 1000;
host->max_block = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
if (host->max_block >= 3) {
printk(KERN_ERR "%s: Invalid maximum block size.\n",
host->slot_descr);
ret = -ENODEV;
goto unmap;
}
host->max_block = 512 << host->max_block;
/*
* Set host parameters.
*/
......@@ -1294,9 +1291,9 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
mmc->ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330)
mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
else if (caps & SDHCI_CAN_VDD_300)
if (caps & SDHCI_CAN_VDD_300)
mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
else if (caps & SDHCI_CAN_VDD_180)
if (caps & SDHCI_CAN_VDD_180)
mmc->ocr_avail |= MMC_VDD_17_18|MMC_VDD_18_19;
if ((host->max_clk > 25000000) && !(caps & SDHCI_CAN_DO_HISPD)) {
......@@ -1326,15 +1323,33 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
/*
* Maximum number of sectors in one transfer. Limited by DMA boundary
* size (512KiB), which means (512 KiB/512=) 1024 entries.
* size (512KiB).
*/
mmc->max_sectors = 1024;
mmc->max_req_size = 524288;
/*
* Maximum segment size. Could be one segment with the maximum number
* of sectors.
* of bytes.
*/
mmc->max_seg_size = mmc->max_req_size;
/*
* Maximum block size. This varies from controller to controller and
* is specified in the capabilities register.
*/
mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT;
if (mmc->max_blk_size >= 3) {
printk(KERN_ERR "%s: Invalid maximum block size.\n",
host->slot_descr);
ret = -ENODEV;
goto unmap;
}
mmc->max_blk_size = 512 << mmc->max_blk_size;
/*
* Maximum block count.
*/
mmc->max_seg_size = mmc->max_sectors * 512;
mmc->max_blk_count = 65535;
/*
* Init tasklets.
......
......@@ -174,7 +174,6 @@ struct sdhci_host {
unsigned int max_clk; /* Max possible freq (MHz) */
unsigned int timeout_clk; /* Timeout freq (KHz) */
unsigned int max_block; /* Max block size (bytes) */
unsigned int clock; /* Current clock (MHz) */
unsigned short power; /* Current voltage */
......@@ -184,7 +183,6 @@ struct sdhci_host {
struct mmc_data *data; /* Current data request */
struct scatterlist *cur_sg; /* We're working on this */
char *mapped_sg; /* This is where it's mapped */
int num_sg; /* Entries left */
int offset; /* Offset into current sg */
int remain; /* Bytes left in current */
......
This diff is collapsed.
/*
* linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
*
* Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
* Copyright (C) 2004-2006 Pierre Ossman, All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -272,16 +272,9 @@ static inline int wbsd_next_sg(struct wbsd_host *host)
return host->num_sg;
}
static inline char *wbsd_kmap_sg(struct wbsd_host *host)
static inline char *wbsd_sg_to_buffer(struct wbsd_host *host)
{
host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
host->cur_sg->offset;
return host->mapped_sg;
}
static inline void wbsd_kunmap_sg(struct wbsd_host *host)
{
kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
return page_address(host->cur_sg->page) + host->cur_sg->offset;
}
static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
......@@ -302,12 +295,11 @@ static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
* we do not transfer too much.
*/
for (i = 0; i < len; i++) {
sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
sgbuf = page_address(sg[i].page) + sg[i].offset;
if (size < sg[i].length)
memcpy(dmabuf, sgbuf, size);
else
memcpy(dmabuf, sgbuf, sg[i].length);
kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
dmabuf += sg[i].length;
if (size < sg[i].length)
......@@ -347,7 +339,7 @@ static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
* we do not transfer too much.
*/
for (i = 0; i < len; i++) {
sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
sgbuf = page_address(sg[i].page) + sg[i].offset;
if (size < sg[i].length)
memcpy(sgbuf, dmabuf, size);
else
......@@ -497,7 +489,7 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
if (data->bytes_xfered == host->size)
return;
buffer = wbsd_kmap_sg(host) + host->offset;
buffer = wbsd_sg_to_buffer(host) + host->offset;
/*
* Drain the fifo. This has a tendency to loop longer
......@@ -526,17 +518,13 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
/*
* Transfer done?
*/
if (data->bytes_xfered == host->size) {
wbsd_kunmap_sg(host);
if (data->bytes_xfered == host->size)
return;
}
/*
* End of scatter list entry?
*/
if (host->remain == 0) {
wbsd_kunmap_sg(host);
/*
* Get next entry. Check if last.
*/
......@@ -554,13 +542,11 @@ static void wbsd_empty_fifo(struct wbsd_host *host)
return;
}
buffer = wbsd_kmap_sg(host);
buffer = wbsd_sg_to_buffer(host);
}
}
}
wbsd_kunmap_sg(host);
/*
* This is a very dirty hack to solve a
* hardware problem. The chip doesn't trigger
......@@ -583,7 +569,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
if (data->bytes_xfered == host->size)
return;
buffer = wbsd_kmap_sg(host) + host->offset;
buffer = wbsd_sg_to_buffer(host) + host->offset;
/*
* Fill the fifo. This has a tendency to loop longer
......@@ -612,17 +598,13 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
/*
* Transfer done?
*/
if (data->bytes_xfered == host->size) {
wbsd_kunmap_sg(host);
if (data->bytes_xfered == host->size)
return;
}
/*
* End of scatter list entry?
*/
if (host->remain == 0) {
wbsd_kunmap_sg(host);
/*
* Get next entry. Check if last.
*/
......@@ -640,13 +622,11 @@ static void wbsd_fill_fifo(struct wbsd_host *host)
return;
}
buffer = wbsd_kmap_sg(host);
buffer = wbsd_sg_to_buffer(host);
}
}
}
wbsd_kunmap_sg(host);
/*
* The controller stops sending interrupts for
* 'FIFO empty' under certain conditions. So we
......@@ -909,6 +889,45 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
* transfered.
*/
if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
/*
* The hardware is so delightfully stupid that it has a list
* of "data" commands. If a command isn't on this list, it'll
* just go back to the idle state and won't send any data
* interrupts.
*/
switch (cmd->opcode) {
case 11:
case 17:
case 18:
case 20:
case 24:
case 25:
case 26:
case 27:
case 30:
case 42:
case 56:
break;
/* ACMDs. We don't keep track of state, so we just treat them
* like any other command. */
case 51:
break;
default:
#ifdef CONFIG_MMC_DEBUG
printk(KERN_WARNING "%s: Data command %d is not "
"supported by this controller.\n",
mmc_hostname(host->mmc), cmd->opcode);
#endif
cmd->data->error = MMC_ERR_INVALID;
if (cmd->data->stop)
wbsd_send_command(host, cmd->data->stop);
goto done;
};
/*
* Dirty fix for hardware bug.
*/
......@@ -1343,16 +1362,27 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
mmc->max_phys_segs = 128;
/*
* Maximum number of sectors in one transfer. Also limited by 64kB
* buffer.
* Maximum request size. Also limited by 64KiB buffer.
*/
mmc->max_sectors = 128;
mmc->max_req_size = 65536;
/*
* Maximum segment size. Could be one segment with the maximum number
* of segments.
* of bytes.
*/
mmc->max_seg_size = mmc->max_req_size;
/*
* Maximum block size. We have 12 bits (= 4095) but have to subtract
* space for CRC. So the maximum is 4095 - 4*2 = 4087.
*/
mmc->max_blk_size = 4087;
/*
* Maximum block count. There is no real limit so the maximum
* request size will be the only restriction.
*/
mmc->max_seg_size = mmc->max_sectors * 512;
mmc->max_blk_count = mmc->max_req_size;
dev_set_drvdata(dev, mmc);
......
......@@ -154,7 +154,6 @@ struct wbsd_host
struct scatterlist* cur_sg; /* Current SG entry */
unsigned int num_sg; /* Number of entries left */
void* mapped_sg; /* vaddr of mapped sg */
unsigned int offset; /* Offset into current entry */
unsigned int remain; /* Data left in curren entry */
......
......@@ -71,6 +71,7 @@ struct mmc_card {
#define MMC_STATE_SDCARD (1<<3) /* is an SD card */
#define MMC_STATE_READONLY (1<<4) /* card is read-only */
#define MMC_STATE_HIGHSPEED (1<<5) /* card is in high speed mode */
#define MMC_STATE_BLOCKADDR (1<<6) /* card uses block-addressing */
u32 raw_cid[4]; /* raw card CID */
u32 raw_csd[4]; /* raw card CSD */
u32 raw_scr[2]; /* raw card SCR */
......@@ -87,6 +88,7 @@ struct mmc_card {
#define mmc_card_sd(c) ((c)->state & MMC_STATE_SDCARD)
#define mmc_card_readonly(c) ((c)->state & MMC_STATE_READONLY)
#define mmc_card_highspeed(c) ((c)->state & MMC_STATE_HIGHSPEED)
#define mmc_card_blockaddr(c) ((c)->state & MMC_STATE_BLOCKADDR)
#define mmc_card_set_present(c) ((c)->state |= MMC_STATE_PRESENT)
#define mmc_card_set_dead(c) ((c)->state |= MMC_STATE_DEAD)
......@@ -94,6 +96,7 @@ struct mmc_card {
#define mmc_card_set_sd(c) ((c)->state |= MMC_STATE_SDCARD)
#define mmc_card_set_readonly(c) ((c)->state |= MMC_STATE_READONLY)
#define mmc_card_set_highspeed(c) ((c)->state |= MMC_STATE_HIGHSPEED)
#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) ((c)->dev.bus_id)
......
......@@ -92,8 +92,10 @@ struct mmc_host {
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */
unsigned short max_sectors; /* see blk_queue_max_sectors */
unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
unsigned int max_blk_count; /* maximum number of blocks in one req */
/* private data */
struct mmc_ios ios; /* current io bus settings */
......@@ -106,8 +108,9 @@ struct mmc_host {
struct list_head cards; /* devices attached to this host */
wait_queue_head_t wq;
spinlock_t lock; /* card_busy lock */
struct mmc_card *card_busy; /* the MMC card claiming host */
spinlock_t lock; /* claimed lock */
unsigned int claimed:1; /* host exclusively claimed */
struct mmc_card *card_selected; /* the selected MMC card */
struct delayed_work detect;
......@@ -126,6 +129,7 @@ static inline void *mmc_priv(struct mmc_host *host)
}
#define mmc_dev(x) ((x)->parent)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) ((x)->class_dev.bus_id)
extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
......
......@@ -43,6 +43,7 @@ struct mmc_command {
#define MMC_RSP_R2 (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_PRESENT)
#define MMC_RSP_R6 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define MMC_RSP_R7 (MMC_RSP_PRESENT|MMC_RSP_CRC|MMC_RSP_OPCODE)
#define mmc_resp_type(cmd) ((cmd)->flags & (MMC_RSP_PRESENT|MMC_RSP_136|MMC_RSP_CRC|MMC_RSP_BUSY|MMC_RSP_OPCODE))
......
......@@ -79,9 +79,12 @@
#define MMC_GEN_CMD 56 /* adtc [0] RD/WR R1 */
/* SD commands type argument response */
/* class 8 */
/* class 0 */
/* This is basically the same command as for MMC with some quirks. */
#define SD_SEND_RELATIVE_ADDR 3 /* bcr R6 */
#define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */
/* class 10 */
#define SD_SWITCH 6 /* adtc [31:0] See below R1 */
/* Application commands */
......@@ -114,6 +117,14 @@
* [3:0] Function group 1
*/
/*
* SD_SEND_IF_COND argument format:
*
* [31:12] Reserved (0)
* [11:8] Host Voltage Supply Flags
* [7:0] Check Pattern (0xAA)
*/
/*
MMC status in R1
Type
......
......@@ -735,9 +735,11 @@
#define PCI_DEVICE_ID_TI_TVP4020 0x3d07
#define PCI_DEVICE_ID_TI_4450 0x8011
#define PCI_DEVICE_ID_TI_XX21_XX11 0x8031
#define PCI_DEVICE_ID_TI_XX21_XX11_FM 0x8033
#define PCI_DEVICE_ID_TI_XX21_XX11_SD 0x8034
#define PCI_DEVICE_ID_TI_X515 0x8036
#define PCI_DEVICE_ID_TI_XX12 0x8039
#define PCI_DEVICE_ID_TI_XX12_FM 0x803b
#define PCI_DEVICE_ID_TI_1130 0xac12
#define PCI_DEVICE_ID_TI_1031 0xac13
#define PCI_DEVICE_ID_TI_1131 0xac15
......@@ -765,6 +767,7 @@
#define PCI_DEVICE_ID_TI_1510 0xac56
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
#define PCI_VENDOR_ID_SONY 0x104d
......@@ -1971,6 +1974,7 @@
#define PCI_DEVICE_ID_TOPIC_TP560 0x0000
#define PCI_VENDOR_ID_ENE 0x1524
#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
#define PCI_DEVICE_ID_ENE_1211 0x1211
#define PCI_DEVICE_ID_ENE_1225 0x1225
#define PCI_DEVICE_ID_ENE_1410 0x1410
......
......@@ -17,7 +17,7 @@
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/kthread.h>
/* Host registers (relative to pci base address): */
enum {
......@@ -62,11 +62,10 @@ enum {
#define TIFM_IRQ_ENABLE 0x80000000
#define TIFM_IRQ_SOCKMASK 0x00000001
#define TIFM_IRQ_CARDMASK 0x00000100
#define TIFM_IRQ_FIFOMASK 0x00010000
#define TIFM_IRQ_SOCKMASK(x) (x)
#define TIFM_IRQ_CARDMASK(x) ((x) << 8)
#define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
#define TIFM_IRQ_SETALL 0xffffffff
#define TIFM_IRQ_SETALLSOCK 0x0000000f
#define TIFM_CTRL_LED 0x00000040
#define TIFM_CTRL_FAST_CLK 0x00000100
......@@ -89,10 +88,9 @@ struct tifm_dev {
char __iomem *addr;
spinlock_t lock;
tifm_media_id media_id;
char wq_name[KOBJ_NAME_LEN];
struct workqueue_struct *wq;
unsigned int socket_id;
unsigned int (*signal_irq)(struct tifm_dev *sock,
void (*signal_irq)(struct tifm_dev *sock,
unsigned int sock_irq_status);
struct tifm_driver *drv;
......@@ -103,24 +101,23 @@ struct tifm_driver {
tifm_media_id *id_table;
int (*probe)(struct tifm_dev *dev);
void (*remove)(struct tifm_dev *dev);
int (*suspend)(struct tifm_dev *dev,
pm_message_t state);
int (*resume)(struct tifm_dev *dev);
struct device_driver driver;
};
struct tifm_adapter {
char __iomem *addr;
unsigned int irq_status;
unsigned int insert_mask;
unsigned int remove_mask;
spinlock_t lock;
unsigned int irq_status;
unsigned int socket_change_set;
wait_queue_head_t change_set_notify;
unsigned int id;
unsigned int max_sockets;
char wq_name[KOBJ_NAME_LEN];
unsigned int inhibit_new_cards;
struct workqueue_struct *wq;
struct work_struct media_inserter;
struct work_struct media_remover;
unsigned int num_sockets;
struct tifm_dev **sockets;
struct task_struct *media_switcher;
struct class_device cdev;
struct device *dev;
......@@ -130,9 +127,9 @@ struct tifm_adapter {
struct tifm_adapter *tifm_alloc_adapter(void);
void tifm_free_device(struct device *dev);
void tifm_free_adapter(struct tifm_adapter *fm);
int tifm_add_adapter(struct tifm_adapter *fm);
int tifm_add_adapter(struct tifm_adapter *fm, int (*mediathreadfn)(void *data));
void tifm_remove_adapter(struct tifm_adapter *fm);
struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id);
struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm);
int tifm_register_driver(struct tifm_driver *drv);
void tifm_unregister_driver(struct tifm_driver *drv);
void tifm_eject(struct tifm_dev *sock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment