Commit f3eb0aaa authored by Pierre Ossman's avatar Pierre Ossman

mmc_block: inform block layer about sector count restriction

Make sure we consider the maximum block count when we tell the block
layer about the maximum sector count. That way we don't have to chop
up the request ourselves.
Signed-off-by: default avatarPierre Ossman <drzeus@drzeus.cx>
parent 6501ff60
......@@ -215,8 +215,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request brq;
int ret = 1, data_size, i;
struct scatterlist *sg;
int ret = 1;
mmc_claim_host(card->host);
......@@ -237,8 +236,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
if (brq.data.blocks > card->host->max_blk_count)
brq.data.blocks = card->host->max_blk_count;
if (brq.data.blocks > 1) {
/* SPI multiblock writes terminate using a special
......@@ -270,24 +267,6 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_queue_bounce_pre(mq);
/*
* Adjust the sg list so it is the same size as the
* request.
*/
if (brq.data.blocks !=
(req->nr_sectors >> (md->block_bits - 9))) {
data_size = brq.data.blocks * brq.data.blksz;
for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
data_size -= sg->length;
if (data_size <= 0) {
sg->length += data_size;
i++;
break;
}
}
brq.data.sg_len = i;
}
mmc_wait_for_req(card->host, &brq.mrq);
mmc_queue_bounce_post(mq);
......
......@@ -142,12 +142,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > (host->max_blk_count * 512))
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mq->bounce_buf) {
printk(KERN_WARNING "%s: unable to allocate "
"bounce buffer\n", mmc_card_name(card));
} else {
printk(KERN_WARNING "%s: unable to "
"allocate bounce buffer\n",
mmc_card_name(card));
}
}
if (mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_sectors(mq->queue, bouncesz / 512);
blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
......@@ -175,7 +182,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
if (!mq->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue, host->max_req_size / 512);
blk_queue_max_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment