Commit 2220eedf authored by Konstantin Dorfman's avatar Konstantin Dorfman Committed by Chris Ball

mmc: fix async request mechanism for sequential read scenarios

When current request is running on the bus and if next request fetched
by mmcqd is NULL, mmc context (mmcqd thread) gets blocked until the
current request completes. This means that if new request comes in while
the mmcqd thread is blocked, this new request can not be prepared in
parallel to current ongoing request. This may result in delaying the new
request execution and increase it's latency.

This change allows to wake up the MMC thread on new request arrival.
Now once the MMC thread is woken up, a new request can be fetched and
prepared in parallel to the current running request which means this new
request can be started immediately after the current running request
completes.

With this change read throughput is improved by 16%.
Signed-off-by: default avatarKonstantin Dorfman <kdorfman@codeaurora.org>
Reviewed-by: default avatarSeungwon Jeon <tgih.jun@samsung.com>
Signed-off-by: default avatarChris Ball <cjb@laptop.org>
parent 369d321e
...@@ -113,17 +113,6 @@ struct mmc_blk_data { ...@@ -113,17 +113,6 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock); static DEFINE_MUTEX(open_lock);
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
};
module_param(perdev_minors, int, 0444); module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
...@@ -1364,8 +1353,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) ...@@ -1364,8 +1353,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
} else } else
areq = NULL; areq = NULL;
areq = mmc_start_req(card->host, areq, (int *) &status); areq = mmc_start_req(card->host, areq, (int *) &status);
if (!areq) if (!areq) {
if (status == MMC_BLK_NEW_REQUEST)
mq->flags |= MMC_QUEUE_NEW_REQUEST;
return 0; return 0;
}
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq; brq = &mq_rq->brq;
...@@ -1438,6 +1430,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) ...@@ -1438,6 +1430,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
break; break;
case MMC_BLK_NOMEDIUM: case MMC_BLK_NOMEDIUM:
goto cmd_abort; goto cmd_abort;
default:
pr_err("%s: Unhandled return value (%d)",
req->rq_disk->disk_name, status);
goto cmd_abort;
} }
if (ret) { if (ret) {
...@@ -1472,6 +1468,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -1472,6 +1468,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
int ret; int ret;
struct mmc_blk_data *md = mq->data; struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card; struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
if (req && !mq->mqrq_prev->req) if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */ /* claim host only for the first request */
...@@ -1486,6 +1484,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -1486,6 +1484,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out; goto out;
} }
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
if (req && req->cmd_flags & REQ_DISCARD) { if (req && req->cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */ /* complete ongoing async transfer before issuing discard */
if (card->host->areq) if (card->host->areq)
...@@ -1501,11 +1500,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -1501,11 +1500,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_blk_issue_rw_rq(mq, NULL); mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req); ret = mmc_blk_issue_flush(mq, req);
} else { } else {
if (!req && host->areq) {
spin_lock_irqsave(&host->context_info.lock, flags);
host->context_info.is_waiting_last_req = true;
spin_unlock_irqrestore(&host->context_info.lock, flags);
}
ret = mmc_blk_issue_rw_rq(mq, req); ret = mmc_blk_issue_rw_rq(mq, req);
} }
out: out:
if (!req) if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
/* release host only when there are no more requests */ /* release host only when there are no more requests */
mmc_release_host(card->host); mmc_release_host(card->host);
return ret; return ret;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#define MMC_QUEUE_BOUNCESZ 65536 #define MMC_QUEUE_BOUNCESZ 65536
#define MMC_QUEUE_SUSPENDED (1 << 0)
#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) #define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
...@@ -72,6 +71,10 @@ static int mmc_queue_thread(void *d) ...@@ -72,6 +71,10 @@ static int mmc_queue_thread(void *d)
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
cmd_flags = req ? req->cmd_flags : 0; cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req); mq->issue_fn(mq, req);
if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
continue; /* fetch again */
}
/* /*
* Current request becomes previous request * Current request becomes previous request
...@@ -113,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q) ...@@ -113,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q)
{ {
struct mmc_queue *mq = q->queuedata; struct mmc_queue *mq = q->queuedata;
struct request *req; struct request *req;
unsigned long flags;
struct mmc_context_info *cntx;
if (!mq) { if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) { while ((req = blk_fetch_request(q)) != NULL) {
...@@ -122,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q) ...@@ -122,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q)
return; return;
} }
if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) cntx = &mq->card->host->context_info;
if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
/*
* New MMC request arrived when MMC thread may be
* blocked on the previous request to be complete
* with no current request fetched
*/
spin_lock_irqsave(&cntx->lock, flags);
if (cntx->is_waiting_last_req) {
cntx->is_new_req = true;
wake_up_interruptible(&cntx->wait);
}
spin_unlock_irqrestore(&cntx->lock, flags);
} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread); wake_up_process(mq->thread);
} }
......
...@@ -27,6 +27,9 @@ struct mmc_queue { ...@@ -27,6 +27,9 @@ struct mmc_queue {
struct task_struct *thread; struct task_struct *thread;
struct semaphore thread_sem; struct semaphore thread_sem;
unsigned int flags; unsigned int flags;
#define MMC_QUEUE_SUSPENDED (1 << 0)
#define MMC_QUEUE_NEW_REQUEST (1 << 1)
int (*issue_fn)(struct mmc_queue *, struct request *); int (*issue_fn)(struct mmc_queue *, struct request *);
void *data; void *data;
struct request_queue *queue; struct request_queue *queue;
......
...@@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card) ...@@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card)
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card); mmc_add_card_debugfs(card);
#endif #endif
mmc_init_context_info(card->host);
ret = device_add(&card->dev); ret = device_add(&card->dev);
if (ret) if (ret)
......
...@@ -319,11 +319,44 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) ...@@ -319,11 +319,44 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
} }
EXPORT_SYMBOL(mmc_start_bkops); EXPORT_SYMBOL(mmc_start_bkops);
/*
* mmc_wait_data_done() - done callback for data request
* @mrq: done data request
*
* Wakes up mmc context, passed as a callback to host controller driver
*/
static void mmc_wait_data_done(struct mmc_request *mrq)
{
mrq->host->context_info.is_done_rcv = true;
wake_up_interruptible(&mrq->host->context_info.wait);
}
static void mmc_wait_done(struct mmc_request *mrq) static void mmc_wait_done(struct mmc_request *mrq)
{ {
complete(&mrq->completion); complete(&mrq->completion);
} }
/*
*__mmc_start_data_req() - starts data request
* @host: MMC host to start the request
* @mrq: data request to start
*
* Sets the done callback to be called when request is completed by the card.
* Starts data mmc request execution
*/
static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
{
mrq->done = mmc_wait_data_done;
mrq->host = host;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
return 0;
}
static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{ {
init_completion(&mrq->completion); init_completion(&mrq->completion);
...@@ -337,6 +370,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) ...@@ -337,6 +370,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
return 0; return 0;
} }
/*
* mmc_wait_for_data_req_done() - wait for request completed
* @host: MMC host to prepare the command.
* @mrq: MMC request to wait for
*
* Blocks MMC context till host controller will ack end of data request
* execution or new request notification arrives from the block layer.
* Handles command retries.
*
* Returns enum mmc_blk_status after checking errors.
*/
static int mmc_wait_for_data_req_done(struct mmc_host *host,
struct mmc_request *mrq,
struct mmc_async_req *next_req)
{
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
int err;
unsigned long flags;
while (1) {
wait_event_interruptible(context_info->wait,
(context_info->is_done_rcv ||
context_info->is_new_req));
spin_lock_irqsave(&context_info->lock, flags);
context_info->is_waiting_last_req = false;
spin_unlock_irqrestore(&context_info->lock, flags);
if (context_info->is_done_rcv) {
context_info->is_done_rcv = false;
context_info->is_new_req = false;
cmd = mrq->cmd;
if (!cmd->error || !cmd->retries ||
mmc_card_removed(host->card)) {
err = host->areq->err_check(host->card,
host->areq);
break; /* return err */
} else {
pr_info("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host),
cmd->opcode, cmd->error);
cmd->retries--;
cmd->error = 0;
host->ops->request(host, mrq);
continue; /* wait for done/new event again */
}
} else if (context_info->is_new_req) {
context_info->is_new_req = false;
if (!next_req) {
err = MMC_BLK_NEW_REQUEST;
break; /* return err */
}
}
}
return err;
}
static void mmc_wait_for_req_done(struct mmc_host *host, static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_request *mrq) struct mmc_request *mrq)
{ {
...@@ -426,8 +515,17 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, ...@@ -426,8 +515,17 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
mmc_pre_req(host, areq->mrq, !host->areq); mmc_pre_req(host, areq->mrq, !host->areq);
if (host->areq) { if (host->areq) {
mmc_wait_for_req_done(host, host->areq->mrq); err = mmc_wait_for_data_req_done(host, host->areq->mrq,
err = host->areq->err_check(host->card, host->areq); areq);
if (err == MMC_BLK_NEW_REQUEST) {
if (error)
*error = err;
/*
* The previous request was not completed,
* nothing to return
*/
return NULL;
}
/* /*
* Check BKOPS urgency for each R1 response * Check BKOPS urgency for each R1 response
*/ */
...@@ -439,7 +537,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, ...@@ -439,7 +537,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
} }
if (!err && areq) if (!err && areq)
start_err = __mmc_start_req(host, areq->mrq); start_err = __mmc_start_data_req(host, areq->mrq);
if (host->areq) if (host->areq)
mmc_post_req(host, host->areq->mrq, 0); mmc_post_req(host, host->areq->mrq, 0);
...@@ -2581,6 +2679,23 @@ int mmc_pm_notify(struct notifier_block *notify_block, ...@@ -2581,6 +2679,23 @@ int mmc_pm_notify(struct notifier_block *notify_block,
} }
#endif #endif
/**
* mmc_init_context_info() - init synchronization context
* @host: mmc host
*
* Init struct context_info needed to implement asynchronous
* request mechanism, used by mmc core, host driver and mmc requests
* supplier.
*/
void mmc_init_context_info(struct mmc_host *host)
{
spin_lock_init(&host->context_info.lock);
host->context_info.is_new_req = false;
host->context_info.is_done_rcv = false;
host->context_info.is_waiting_last_req = false;
init_waitqueue_head(&host->context_info.wait);
}
static int __init mmc_init(void) static int __init mmc_init(void)
{ {
int ret; int ret;
......
...@@ -76,5 +76,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host); ...@@ -76,5 +76,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
void mmc_add_card_debugfs(struct mmc_card *card); void mmc_add_card_debugfs(struct mmc_card *card);
void mmc_remove_card_debugfs(struct mmc_card *card); void mmc_remove_card_debugfs(struct mmc_card *card);
void mmc_init_context_info(struct mmc_host *host);
#endif #endif
...@@ -187,6 +187,18 @@ struct sdio_func_tuple; ...@@ -187,6 +187,18 @@ struct sdio_func_tuple;
#define SDIO_MAX_FUNCS 7 #define SDIO_MAX_FUNCS 7
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
MMC_BLK_CMD_ERR,
MMC_BLK_RETRY,
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
MMC_BLK_NOMEDIUM,
MMC_BLK_NEW_REQUEST,
};
/* The number of MMC physical partitions. These consist of: /* The number of MMC physical partitions. These consist of:
* boot partitions (2), general purpose partitions (4) in MMC v4.4. * boot partitions (2), general purpose partitions (4) in MMC v4.4.
*/ */
......
...@@ -120,6 +120,7 @@ struct mmc_data { ...@@ -120,6 +120,7 @@ struct mmc_data {
s32 host_cookie; /* host private data */ s32 host_cookie; /* host private data */
}; };
struct mmc_host;
struct mmc_request { struct mmc_request {
struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */ struct mmc_command *sbc; /* SET_BLOCK_COUNT for multiblock */
struct mmc_command *cmd; struct mmc_command *cmd;
...@@ -128,9 +129,9 @@ struct mmc_request { ...@@ -128,9 +129,9 @@ struct mmc_request {
struct completion completion; struct completion completion;
void (*done)(struct mmc_request *);/* completion function */ void (*done)(struct mmc_request *);/* completion function */
struct mmc_host *host;
}; };
struct mmc_host;
struct mmc_card; struct mmc_card;
struct mmc_async_req; struct mmc_async_req;
......
...@@ -170,6 +170,22 @@ struct mmc_slot { ...@@ -170,6 +170,22 @@ struct mmc_slot {
void *handler_priv; void *handler_priv;
}; };
/**
* mmc_context_info - synchronization details for mmc context
* @is_done_rcv wake up reason was done request
* @is_new_req wake up reason was new request
* @is_waiting_last_req mmc context waiting for single running request
* @wait wait queue
* @lock lock to protect data fields
*/
struct mmc_context_info {
bool is_done_rcv;
bool is_new_req;
bool is_waiting_last_req;
wait_queue_head_t wait;
spinlock_t lock;
};
struct regulator; struct regulator;
struct mmc_supply { struct mmc_supply {
...@@ -331,6 +347,7 @@ struct mmc_host { ...@@ -331,6 +347,7 @@ struct mmc_host {
struct dentry *debugfs_root; struct dentry *debugfs_root;
struct mmc_async_req *areq; /* active async req */ struct mmc_async_req *areq; /* active async req */
struct mmc_context_info context_info; /* async synchronization info */
#ifdef CONFIG_FAIL_MMC_REQUEST #ifdef CONFIG_FAIL_MMC_REQUEST
struct fault_attr fail_mmc_request; struct fault_attr fail_mmc_request;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment