Commit 38c38cb7 authored by Yoshihiro Shimoda's avatar Yoshihiro Shimoda Committed by Christoph Hellwig

mmc: queue: use bigger segments if DMA MAP layer can merge the segments

When the max_segs of a mmc host is smaller than 512, the mmc
subsystem tries to use 512 segments if DMA MAP layer can merge
the segments, and then the mmc subsystem exposes such information
to the block layer by using blk_queue_can_use_dma_map_merging().
Signed-off-by: default avatarYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Reviewed-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Reviewed-by: default avatarSimon Horman <horms+renesas@verge.net.au>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 45147fb5
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include "card.h" #include "card.h"
#include "host.h" #include "host.h"
#define MMC_DMA_MAP_MERGE_SEGMENTS 512
static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{ {
/* Allow only 1 DCMD at a time */ /* Allow only 1 DCMD at a time */
...@@ -193,6 +195,12 @@ static void mmc_queue_setup_discard(struct request_queue *q, ...@@ -193,6 +195,12 @@ static void mmc_queue_setup_discard(struct request_queue *q,
blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
} }
static unsigned int mmc_get_max_segments(struct mmc_host *host)
{
return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
host->max_segs;
}
/** /**
* mmc_init_request() - initialize the MMC-specific per-request data * mmc_init_request() - initialize the MMC-specific per-request data
* @q: the request queue * @q: the request queue
...@@ -206,7 +214,7 @@ static int __mmc_init_request(struct mmc_queue *mq, struct request *req, ...@@ -206,7 +214,7 @@ static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
struct mmc_card *card = mq->card; struct mmc_card *card = mq->card;
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), gfp);
if (!mq_rq->sg) if (!mq_rq->sg)
return -ENOMEM; return -ENOMEM;
...@@ -362,12 +370,22 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) ...@@ -362,12 +370,22 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
blk_queue_max_hw_sectors(mq->queue, blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512)); min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs); if (host->can_dma_map_merge)
WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
mmc_dev(host)),
"merging was advertised but not possible");
blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
if (mmc_card_mmc(card)) if (mmc_card_mmc(card))
block_size = card->ext_csd.data_sector_size; block_size = card->ext_csd.data_sector_size;
blk_queue_logical_block_size(mq->queue, block_size); blk_queue_logical_block_size(mq->queue, block_size);
/*
* After blk_queue_can_use_dma_map_merging() was called with succeed,
* since it calls blk_queue_virt_boundary(), the mmc should not call
* both blk_queue_max_segment_size().
*/
if (!host->can_dma_map_merge)
blk_queue_max_segment_size(mq->queue, blk_queue_max_segment_size(mq->queue,
round_down(host->max_seg_size, block_size)); round_down(host->max_seg_size, block_size));
...@@ -418,6 +436,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) ...@@ -418,6 +436,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq; mq->tag_set.driver_data = mq;
/*
* Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
* the host->can_dma_map_merge should be set before to get max_segs
* from mmc_get_max_segments().
*/
if (host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
dma_get_merge_boundary(mmc_dev(host)))
host->can_dma_map_merge = 1;
else
host->can_dma_map_merge = 0;
ret = blk_mq_alloc_tag_set(&mq->tag_set); ret = blk_mq_alloc_tag_set(&mq->tag_set);
if (ret) if (ret)
return ret; return ret;
......
...@@ -396,6 +396,7 @@ struct mmc_host { ...@@ -396,6 +396,7 @@ struct mmc_host {
unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
unsigned int use_blk_mq:1; /* use blk-mq */ unsigned int use_blk_mq:1; /* use blk-mq */
unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */ unsigned int retune_crc_disable:1; /* don't trigger retune upon crc */
unsigned int can_dma_map_merge:1; /* merging can be used */
int rescan_disable; /* disable card detection */ int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */ int rescan_entered; /* used with nonremovable devices */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment