Commit 55db890a authored by Pierre Ossman's avatar Pierre Ossman

mmc: Allow host drivers to specify max block count

Many controllers have an upper limit on the number of blocks that can be
transferred in one request. Allow the host drivers to specify this and make
sure we avoid hitting this limit.

Also change the max_sectors field to avoid confusion. This makes it map
less directly to the block layer limits, but as they didn't apply directly
on MMC cards anyway, this isn't a great loss.
Signed-off-by: default avatarPierre Ossman <drzeus@drzeus.cx>
parent fe4a3c7a
...@@ -824,6 +824,7 @@ static int __init at91_mci_probe(struct platform_device *pdev) ...@@ -824,6 +824,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
mmc->caps = MMC_CAP_BYTEBLOCK; mmc->caps = MMC_CAP_BYTEBLOCK;
mmc->max_blk_size = 4095; mmc->max_blk_size = 4095;
mmc->max_blk_count = mmc->max_req_size;
host = mmc_priv(mmc); host = mmc_priv(mmc);
host->mmc = mmc; host->mmc = mmc;
......
...@@ -923,6 +923,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev) ...@@ -923,6 +923,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT; mmc->max_phys_segs = AU1XMMC_DESCRIPTOR_COUNT;
mmc->max_blk_size = 2048; mmc->max_blk_size = 2048;
mmc->max_blk_count = 512;
mmc->ocr_avail = AU1XMMC_OCR; mmc->ocr_avail = AU1XMMC_OCR;
......
...@@ -958,9 +958,10 @@ static int imxmci_probe(struct platform_device *pdev) ...@@ -958,9 +958,10 @@ static int imxmci_probe(struct platform_device *pdev)
/* MMC core transfer sizes tunable parameters */ /* MMC core transfer sizes tunable parameters */
mmc->max_hw_segs = 64; mmc->max_hw_segs = 64;
mmc->max_phys_segs = 64; mmc->max_phys_segs = 64;
mmc->max_sectors = 64; /* default 1 << (PAGE_CACHE_SHIFT - 9) */
mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
mmc->max_blk_size = 2048; mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
host = mmc_priv(mmc); host = mmc_priv(mmc);
host->mmc = mmc; host->mmc = mmc;
......
...@@ -109,6 +109,9 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) ...@@ -109,6 +109,9 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->cmd->mrq = mrq; mrq->cmd->mrq = mrq;
if (mrq->data) { if (mrq->data) {
BUG_ON(mrq->data->blksz > host->max_blk_size); BUG_ON(mrq->data->blksz > host->max_blk_size);
BUG_ON(mrq->data->blocks > host->max_blk_count);
BUG_ON(mrq->data->blocks * mrq->data->blksz >
host->max_req_size);
mrq->cmd->data = mrq->data; mrq->cmd->data = mrq->data;
mrq->data->error = 0; mrq->data->error = 0;
...@@ -1605,10 +1608,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) ...@@ -1605,10 +1608,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
*/ */
host->max_hw_segs = 1; host->max_hw_segs = 1;
host->max_phys_segs = 1; host->max_phys_segs = 1;
host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
host->max_seg_size = PAGE_CACHE_SIZE; host->max_seg_size = PAGE_CACHE_SIZE;
host->max_req_size = PAGE_CACHE_SIZE;
host->max_blk_size = 512; host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
} }
return host; return host;
......
...@@ -242,10 +242,12 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -242,10 +242,12 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.arg <<= 9; brq.cmd.arg <<= 9;
brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
brq.data.blksz = 1 << md->block_bits; brq.data.blksz = 1 << md->block_bits;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0; brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC; brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
if (brq.data.blocks > card->host->max_blk_count)
brq.data.blocks = card->host->max_blk_count;
mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ); mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);
......
...@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock ...@@ -147,7 +147,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_prep_rq(mq->queue, mmc_prep_request);
blk_queue_bounce_limit(mq->queue, limit); blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue, host->max_sectors); blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size); blk_queue_max_segment_size(mq->queue, host->max_seg_size);
......
...@@ -524,21 +524,25 @@ static int mmci_probe(struct amba_device *dev, void *id) ...@@ -524,21 +524,25 @@ static int mmci_probe(struct amba_device *dev, void *id)
/* /*
* Since we only have a 16-bit data length register, we must * Since we only have a 16-bit data length register, we must
* ensure that we don't exceed 2^16-1 bytes in a single request. * ensure that we don't exceed 2^16-1 bytes in a single request.
* Choose 64 (512-byte) sectors as the limit.
*/ */
mmc->max_sectors = 64; mmc->max_req_size = 65535;
/* /*
* Set the maximum segment size. Since we aren't doing DMA * Set the maximum segment size. Since we aren't doing DMA
* (yet) we are only limited by the data length register. * (yet) we are only limited by the data length register.
*/ */
mmc->max_seg_size = mmc->max_sectors << 9; mmc->max_seg_size = mmc->max_req_size;
/* /*
* Block size can be up to 2048 bytes, but must be a power of two. * Block size can be up to 2048 bytes, but must be a power of two.
*/ */
mmc->max_blk_size = 2048; mmc->max_blk_size = 2048;
/*
* No limit on the number of blocks transferred.
*/
mmc->max_blk_count = mmc->max_req_size;
spin_lock_init(&host->lock); spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0); writel(0, host->base + MMCIMASK0);
......
...@@ -1100,8 +1100,9 @@ static int __init mmc_omap_probe(struct platform_device *pdev) ...@@ -1100,8 +1100,9 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
mmc->max_phys_segs = 32; mmc->max_phys_segs = 32;
mmc->max_hw_segs = 32; mmc->max_hw_segs = 32;
mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */
mmc->max_sectors = 256; /* NBLK max 11-bits, OMAP also limited by DMA */ mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */
mmc->max_seg_size = mmc->max_sectors * 512; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
if (host->power_pin >= 0) { if (host->power_pin >= 0) {
if ((ret = omap_request_gpio(host->power_pin)) != 0) { if ((ret = omap_request_gpio(host->power_pin)) != 0) {
......
...@@ -455,6 +455,11 @@ static int pxamci_probe(struct platform_device *pdev) ...@@ -455,6 +455,11 @@ static int pxamci_probe(struct platform_device *pdev)
*/ */
mmc->max_blk_size = 1023; mmc->max_blk_size = 1023;
/*
* Block count register is 16 bits.
*/
mmc->max_blk_count = 65535;
host = mmc_priv(mmc); host = mmc_priv(mmc);
host->mmc = mmc; host->mmc = mmc;
host->dma = -1; host->dma = -1;
......
...@@ -1333,15 +1333,15 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) ...@@ -1333,15 +1333,15 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
/* /*
* Maximum number of sectors in one transfer. Limited by DMA boundary * Maximum number of sectors in one transfer. Limited by DMA boundary
* size (512KiB), which means (512 KiB/512=) 1024 entries. * size (512KiB).
*/ */
mmc->max_sectors = 1024; mmc->max_req_size = 524288;
/* /*
* Maximum segment size. Could be one segment with the maximum number * Maximum segment size. Could be one segment with the maximum number
* of sectors. * of bytes.
*/ */
mmc->max_seg_size = mmc->max_sectors * 512; mmc->max_seg_size = mmc->max_req_size;
/* /*
* Maximum block size. This varies from controller to controller and * Maximum block size. This varies from controller to controller and
...@@ -1356,6 +1356,11 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot) ...@@ -1356,6 +1356,11 @@ static int __devinit sdhci_probe_slot(struct pci_dev *pdev, int slot)
} }
mmc->max_blk_size = 512 << mmc->max_blk_size; mmc->max_blk_size = 512 << mmc->max_blk_size;
/*
* Maximum block count.
*/
mmc->max_blk_count = 65535;
/* /*
* Init tasklets. * Init tasklets.
*/ */
......
...@@ -885,10 +885,13 @@ static int tifm_sd_probe(struct tifm_dev *sock) ...@@ -885,10 +885,13 @@ static int tifm_sd_probe(struct tifm_dev *sock)
mmc->f_max = 24000000; mmc->f_max = 24000000;
mmc->max_hw_segs = 1; mmc->max_hw_segs = 1;
mmc->max_phys_segs = 1; mmc->max_phys_segs = 1;
mmc->max_sectors = 127; // limited by DMA counter - it's safer to stick with
//2k maximum hw block length // block counter has 11 bits though
mmc->max_seg_size = mmc->max_sectors << 11; mmc->max_blk_count = 256;
// 2k maximum hw block length
mmc->max_blk_size = 2048; mmc->max_blk_size = 2048;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
sock->signal_irq = tifm_sd_signal_irq; sock->signal_irq = tifm_sd_signal_irq;
rc = tifm_sd_initialize_host(host); rc = tifm_sd_initialize_host(host);
......
...@@ -1343,16 +1343,15 @@ static int __devinit wbsd_alloc_mmc(struct device *dev) ...@@ -1343,16 +1343,15 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
mmc->max_phys_segs = 128; mmc->max_phys_segs = 128;
/* /*
* Maximum number of sectors in one transfer. Also limited by 64kB * Maximum request size. Also limited by 64KiB buffer.
* buffer.
*/ */
mmc->max_sectors = 128; mmc->max_req_size = 65536;
/* /*
* Maximum segment size. Could be one segment with the maximum number * Maximum segment size. Could be one segment with the maximum number
* of segments. * of bytes.
*/ */
mmc->max_seg_size = mmc->max_sectors * 512; mmc->max_seg_size = mmc->max_req_size;
/* /*
* Maximum block size. We have 12 bits (= 4095) but have to subtract * Maximum block size. We have 12 bits (= 4095) but have to subtract
...@@ -1360,6 +1359,12 @@ static int __devinit wbsd_alloc_mmc(struct device *dev) ...@@ -1360,6 +1359,12 @@ static int __devinit wbsd_alloc_mmc(struct device *dev)
*/ */
mmc->max_blk_size = 4087; mmc->max_blk_size = 4087;
/*
* Maximum block count. There is no real limit so the maximum
* request size will be the only restriction.
*/
mmc->max_blk_count = mmc->max_req_size;
dev_set_drvdata(dev, mmc); dev_set_drvdata(dev, mmc);
return 0; return 0;
......
...@@ -92,9 +92,10 @@ struct mmc_host { ...@@ -92,9 +92,10 @@ struct mmc_host {
unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */ unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */ unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */
unsigned short max_sectors; /* see blk_queue_max_sectors */
unsigned short unused; unsigned short unused;
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */ unsigned int max_blk_size; /* maximum size of one mmc block */
unsigned int max_blk_count; /* maximum number of blocks in one req */
/* private data */ /* private data */
struct mmc_ios ios; /* current io bus settings */ struct mmc_ios ios; /* current io bus settings */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment