Commit 10f21df4 authored by Adrian Hunter's avatar Adrian Hunter Committed by Ulf Hansson

mmc: block: blk-mq: Add support for direct completion

For blk-mq, add support for completing requests directly in the ->done
callback. That means that error handling and urgent background operations
must be handled by recovery_work in that case.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Acked-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Tested-by: default avatarLinus Walleij <linus.walleij@linaro.org>
parent 8ee82bda
...@@ -2131,6 +2131,22 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) ...@@ -2131,6 +2131,22 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
} }
} }
static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
{
mmc_blk_eval_resp_error(brq);
return brq->sbc.error || brq->cmd.error || brq->stop.error ||
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
}
static inline void mmc_blk_rw_reset_success(struct mmc_queue *mq,
struct request *req)
{
int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_blk_reset_success(mq->blkdata, type);
}
static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req) static void mmc_blk_mq_complete_rq(struct mmc_queue *mq, struct request *req)
{ {
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
...@@ -2213,14 +2229,43 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) ...@@ -2213,14 +2229,43 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
mmc_post_req(host, mrq, 0); mmc_post_req(host, mrq, 0);
blk_mq_complete_request(req); /*
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
if (mq->in_recovery)
mmc_blk_mq_complete_rq(mq, req);
else
blk_mq_complete_request(req);
mmc_blk_mq_dec_in_flight(mq, req); mmc_blk_mq_dec_in_flight(mq, req);
} }
void mmc_blk_mq_recovery(struct mmc_queue *mq)
{
struct request *req = mq->recovery_req;
struct mmc_host *host = mq->card->host;
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
mq->recovery_req = NULL;
mq->rw_wait = false;
if (mmc_blk_rq_error(&mqrq->brq)) {
mmc_retune_hold_now(host);
mmc_blk_mq_rw_recovery(mq, req);
}
mmc_blk_urgent_bkops(mq, mqrq);
mmc_blk_mq_post_req(mq, req);
}
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq, static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
struct request **prev_req) struct request **prev_req)
{ {
if (mmc_host_done_complete(mq->card->host))
return;
mutex_lock(&mq->complete_lock); mutex_lock(&mq->complete_lock);
if (!mq->complete_req) if (!mq->complete_req)
...@@ -2254,29 +2299,56 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) ...@@ -2254,29 +2299,56 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
struct request *req = mmc_queue_req_to_req(mqrq); struct request *req = mmc_queue_req_to_req(mqrq);
struct request_queue *q = req->q; struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata; struct mmc_queue *mq = q->queuedata;
struct mmc_host *host = mq->card->host;
unsigned long flags; unsigned long flags;
bool waiting;
/* if (!mmc_host_done_complete(host)) {
* We cannot complete the request in this context, so record that there bool waiting;
* is a request to complete, and that a following request does not need
* to wait (although it does need to complete complete_req first).
*/
spin_lock_irqsave(q->queue_lock, flags);
mq->complete_req = req;
mq->rw_wait = false;
waiting = mq->waiting;
spin_unlock_irqrestore(q->queue_lock, flags);
/* /*
* If 'waiting' then the waiting task will complete this request, * We cannot complete the request in this context, so record
* otherwise queue a work to do it. Note that complete_work may still * that there is a request to complete, and that a following
* race with the dispatch of a following request. * request does not need to wait (although it does need to
*/ * complete complete_req first).
if (waiting) */
spin_lock_irqsave(q->queue_lock, flags);
mq->complete_req = req;
mq->rw_wait = false;
waiting = mq->waiting;
spin_unlock_irqrestore(q->queue_lock, flags);
/*
* If 'waiting' then the waiting task will complete this
* request, otherwise queue a work to do it. Note that
* complete_work may still race with the dispatch of a following
* request.
*/
if (waiting)
wake_up(&mq->wait);
else
kblockd_schedule_work(&mq->complete_work);
return;
}
/* Take the recovery path for errors or urgent background operations */
if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
spin_lock_irqsave(q->queue_lock, flags);
mq->recovery_needed = true;
mq->recovery_req = req;
spin_unlock_irqrestore(q->queue_lock, flags);
wake_up(&mq->wait); wake_up(&mq->wait);
else schedule_work(&mq->recovery_work);
kblockd_schedule_work(&mq->complete_work); return;
}
mmc_blk_rw_reset_success(mq, req);
mq->rw_wait = false;
wake_up(&mq->wait);
mmc_blk_mq_post_req(mq, req);
} }
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
...@@ -2286,11 +2358,16 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err) ...@@ -2286,11 +2358,16 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
bool done; bool done;
/* /*
* Wait while there is another request in progress. Also indicate that * Wait while there is another request in progress, but not if recovery
* there is a request waiting to start. * is needed. Also indicate whether there is a request waiting to start.
*/ */
spin_lock_irqsave(q->queue_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
done = !mq->rw_wait; if (mq->recovery_needed) {
*err = -EBUSY;
done = true;
} else {
done = !mq->rw_wait;
}
mq->waiting = !done; mq->waiting = !done;
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
...@@ -2334,10 +2411,12 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq, ...@@ -2334,10 +2411,12 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
if (prev_req) if (prev_req)
mmc_blk_mq_post_req(mq, prev_req); mmc_blk_mq_post_req(mq, prev_req);
if (err) { if (err)
mq->rw_wait = false; mq->rw_wait = false;
/* Release re-tuning here where there is no synchronization required */
if (err || mmc_host_done_complete(host))
mmc_retune_release(host); mmc_retune_release(host);
}
out_post_req: out_post_req:
if (err) if (err)
......
...@@ -13,6 +13,7 @@ enum mmc_issued; ...@@ -13,6 +13,7 @@ enum mmc_issued;
enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req); enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
void mmc_blk_mq_complete(struct request *req); void mmc_blk_mq_complete(struct request *req);
void mmc_blk_mq_recovery(struct mmc_queue *mq);
struct work_struct; struct work_struct;
......
...@@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host) ...@@ -41,6 +41,11 @@ static inline int mmc_host_cmd23(struct mmc_host *host)
return host->caps & MMC_CAP_CMD23; return host->caps & MMC_CAP_CMD23;
} }
static inline bool mmc_host_done_complete(struct mmc_host *host)
{
return host->caps & MMC_CAP_DONE_COMPLETE;
}
static inline int mmc_boot_partition_access(struct mmc_host *host) static inline int mmc_boot_partition_access(struct mmc_host *host)
{ {
return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC); return !(host->caps2 & MMC_CAP2_BOOTPART_NOACC);
......
...@@ -165,7 +165,10 @@ static void mmc_mq_recovery_handler(struct work_struct *work) ...@@ -165,7 +165,10 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
mq->in_recovery = true; mq->in_recovery = true;
mmc_blk_cqe_recovery(mq); if (mq->use_cqe)
mmc_blk_cqe_recovery(mq);
else
mmc_blk_mq_recovery(mq);
mq->in_recovery = false; mq->in_recovery = false;
......
...@@ -103,6 +103,7 @@ struct mmc_queue { ...@@ -103,6 +103,7 @@ struct mmc_queue {
bool waiting; bool waiting;
struct work_struct recovery_work; struct work_struct recovery_work;
wait_queue_head_t wait; wait_queue_head_t wait;
struct request *recovery_req;
struct request *complete_req; struct request *complete_req;
struct mutex complete_lock; struct mutex complete_lock;
struct work_struct complete_work; struct work_struct complete_work;
......
...@@ -324,6 +324,7 @@ struct mmc_host { ...@@ -324,6 +324,7 @@ struct mmc_host {
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
#define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */
#define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */
#define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment