Commit 7eb43d53 authored by Adrian Hunter's avatar Adrian Hunter Committed by Ulf Hansson

mmc: block: blk-mq: Stop using legacy recovery

There are only a few things the recovery needs to do. Primarily, it just
needs to:
	Determine the number of bytes transferred
	Get the card back to transfer state
	Determine whether to retry

There are also a couple of additional features:
	Reset the card before the last retry
	Read one sector at a time

The legacy code spent much effort analyzing command errors, but commands
fail fast, so it is simpler just to give all command errors the same number
of retries.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Acked-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Tested-by: default avatarLinus Walleij <linus.walleij@linaro.org>
parent 6b7a363d
...@@ -1557,9 +1557,11 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) ...@@ -1557,9 +1557,11 @@ static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq)
} }
} }
static enum mmc_blk_status __mmc_blk_err_check(struct mmc_card *card, static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
struct mmc_queue_req *mq_mrq) struct mmc_async_req *areq)
{ {
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
areq);
struct mmc_blk_request *brq = &mq_mrq->brq; struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mmc_queue_req_to_req(mq_mrq); struct request *req = mmc_queue_req_to_req(mq_mrq);
int need_retune = card->host->need_retune; int need_retune = card->host->need_retune;
...@@ -1665,15 +1667,6 @@ static enum mmc_blk_status __mmc_blk_err_check(struct mmc_card *card, ...@@ -1665,15 +1667,6 @@ static enum mmc_blk_status __mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_SUCCESS; return MMC_BLK_SUCCESS;
} }
static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card,
struct mmc_async_req *areq)
{
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
areq);
return __mmc_blk_err_check(card, mq_mrq);
}
static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
int disable_multi, bool *do_rel_wr_p, int disable_multi, bool *do_rel_wr_p,
bool *do_data_tag_p) bool *do_data_tag_p)
...@@ -1999,8 +1992,39 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, ...@@ -1999,8 +1992,39 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
} }
#define MMC_MAX_RETRIES 5 #define MMC_MAX_RETRIES 5
#define MMC_DATA_RETRIES 2
#define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1) #define MMC_NO_RETRIES (MMC_MAX_RETRIES + 1)
static int mmc_blk_send_stop(struct mmc_card *card, unsigned int timeout)
{
struct mmc_command cmd = {
.opcode = MMC_STOP_TRANSMISSION,
.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC,
/* Some hosts wait for busy anyway, so provide a busy timeout */
.busy_timeout = timeout,
};
return mmc_wait_for_cmd(card->host, &cmd, 5);
}
static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_blk_request *brq = &mqrq->brq;
unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data);
int err;
mmc_retune_hold_now(card->host);
mmc_blk_send_stop(card, timeout);
err = card_busy_detect(card, timeout, false, req, NULL);
mmc_retune_release(card->host);
return err;
}
#define MMC_READ_SINGLE_RETRIES 2 #define MMC_READ_SINGLE_RETRIES 2
/* Single sector read during recovery */ /* Single sector read during recovery */
...@@ -2012,7 +2036,6 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) ...@@ -2012,7 +2036,6 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK; blk_status_t error = BLK_STS_OK;
int retries = 0; int retries = 0;
unsigned int timeout = mmc_blk_data_timeout_ms(host, mrq->data);
do { do {
u32 status; u32 status;
...@@ -2027,12 +2050,8 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) ...@@ -2027,12 +2050,8 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
goto error_exit; goto error_exit;
if (!mmc_host_is_spi(host) && if (!mmc_host_is_spi(host) &&
R1_CURRENT_STATE(status) != R1_STATE_TRAN) { !mmc_blk_in_tran_state(status)) {
u32 stop_status = 0; err = mmc_blk_fix_state(card, req);
bool gen_err = false;
err = send_stop(card, timeout, req, &gen_err,
&stop_status);
if (err) if (err)
goto error_exit; goto error_exit;
} }
...@@ -2062,22 +2081,45 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req) ...@@ -2062,22 +2081,45 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
mqrq->retries = MMC_MAX_RETRIES - 1; mqrq->retries = MMC_MAX_RETRIES - 1;
} }
static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
return !!brq->mrq.sbc;
}
static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
{
return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
}
/*
* Check for errors the host controller driver might not have seen such as
* response mode errors or invalid card state.
*/
static bool mmc_blk_status_error(struct request *req, u32 status)
{ {
int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_blk_request *brq = &mqrq->brq; struct mmc_blk_request *brq = &mqrq->brq;
struct mmc_blk_data *md = mq->blkdata; struct mmc_queue *mq = req->q->queuedata;
struct mmc_card *card = mq->card; u32 stop_err_bits;
static enum mmc_blk_status status;
brq->retune_retry_done = mqrq->retries; if (mmc_host_is_spi(mq->card->host))
return 0;
status = __mmc_blk_err_check(card, mqrq); stop_err_bits = mmc_blk_stop_err_bits(brq);
mmc_retune_release(card->host); return brq->cmd.resp[0] & CMD_ERRORS ||
brq->stop.resp[0] & stop_err_bits ||
status & stop_err_bits ||
(rq_data_dir(req) == WRITE && !mmc_blk_in_tran_state(status));
}
/* static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq)
{
return !brq->sbc.error && !brq->cmd.error &&
!(brq->cmd.resp[0] & CMD_ERRORS);
}
/*
* Requests are completed by mmc_blk_mq_complete_rq() which sets simple * Requests are completed by mmc_blk_mq_complete_rq() which sets simple
* policy: * policy:
* 1. A request that has transferred at least some data is considered * 1. A request that has transferred at least some data is considered
...@@ -2093,107 +2135,93 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) ...@@ -2093,107 +2135,93 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
* 3. try to reset the card * 3. try to reset the card
* 4. read one sector at a time * 4. read one sector at a time
*/ */
switch (status) { static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
case MMC_BLK_SUCCESS: {
case MMC_BLK_PARTIAL: int type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
/* Reset success, and accept bytes_xfered */ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
mmc_blk_reset_success(md, type); struct mmc_blk_request *brq = &mqrq->brq;
break; struct mmc_blk_data *md = mq->blkdata;
case MMC_BLK_CMD_ERR: struct mmc_card *card = mq->card;
/* u32 status;
* For SD cards, get bytes written, but do not accept
* bytes_xfered if that fails. For MMC cards accept
* bytes_xfered. Then try to reset. If reset fails then
* error out the remaining request, otherwise retry
* once (N.B mmc_blk_reset() will not succeed twice in a
* row).
*/
if (mmc_card_sd(card)) {
u32 blocks; u32 blocks;
int err; int err;
err = mmc_sd_num_wr_blocks(card, &blocks);
if (err)
brq->data.bytes_xfered = 0;
else
brq->data.bytes_xfered = blocks << 9;
}
if (mmc_blk_reset(md, card->host, type))
mqrq->retries = MMC_NO_RETRIES;
else
mqrq->retries = MMC_MAX_RETRIES - 1;
break;
case MMC_BLK_RETRY:
/* /*
* Do not accept bytes_xfered, but retry up to 5 times, * Some errors the host driver might not have seen. Set the number of
* otherwise same as abort. * bytes transferred to zero in that case.
*/ */
err = __mmc_send_status(card, &status, 0);
if (err || mmc_blk_status_error(req, status))
brq->data.bytes_xfered = 0; brq->data.bytes_xfered = 0;
if (mqrq->retries < MMC_MAX_RETRIES)
break; mmc_retune_release(card->host);
/* Fall through */
case MMC_BLK_ABORT:
/* /*
* Do not accept bytes_xfered, but try to reset. If * Try again to get the status. This also provides an opportunity for
* reset succeeds, try once more, otherwise error out * re-tuning.
* the request.
*/ */
brq->data.bytes_xfered = 0; if (err)
if (mmc_blk_reset(md, card->host, type)) err = __mmc_send_status(card, &status, 0);
mqrq->retries = MMC_NO_RETRIES;
else
mqrq->retries = MMC_MAX_RETRIES - 1;
break;
case MMC_BLK_DATA_ERR: {
int err;
/* /*
* Do not accept bytes_xfered, but try to reset. If * Nothing more to do after the number of bytes transferred has been
* reset succeeds, try once more. If reset fails with * updated and there is no card.
* ENODEV which means the partition is wrong, then error */
* out the request. Otherwise attempt to read one sector if (err && mmc_detect_card_removed(card->host))
* at a time. return;
/* Try to get back to "tran" state */
if (!mmc_host_is_spi(mq->card->host) &&
(err || !mmc_blk_in_tran_state(status)))
err = mmc_blk_fix_state(mq->card, req);
/*
* Special case for SD cards where the card might record the number of
* blocks written.
*/ */
if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) &&
rq_data_dir(req) == WRITE) {
if (mmc_sd_num_wr_blocks(card, &blocks))
brq->data.bytes_xfered = 0; brq->data.bytes_xfered = 0;
err = mmc_blk_reset(md, card->host, type); else
if (!err) { brq->data.bytes_xfered = blocks << 9;
mqrq->retries = MMC_MAX_RETRIES - 1;
break;
} }
if (err == -ENODEV) {
/* Reset if the card is in a bad state */
if (!mmc_host_is_spi(mq->card->host) &&
err && mmc_blk_reset(md, card->host, type)) {
pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
mqrq->retries = MMC_NO_RETRIES; mqrq->retries = MMC_NO_RETRIES;
break; return;
}
/* Fall through */
} }
case MMC_BLK_ECC_ERR:
/* /*
* Do not accept bytes_xfered. If reading more than one * If anything was done, just return and if there is anything remaining
* sector, try reading one sector at a time. * on the request it will get requeued.
*/ */
brq->data.bytes_xfered = 0; if (brq->data.bytes_xfered)
return;
/* Reset before last retry */
if (mqrq->retries + 1 == MMC_MAX_RETRIES)
mmc_blk_reset(md, card->host, type);
/* Command errors fail fast, so use all MMC_MAX_RETRIES */
if (brq->sbc.error || brq->cmd.error)
return;
/* Reduce the remaining retries for data errors */
if (mqrq->retries < MMC_MAX_RETRIES - MMC_DATA_RETRIES) {
mqrq->retries = MMC_MAX_RETRIES - MMC_DATA_RETRIES;
return;
}
/* FIXME: Missing single sector read for large sector size */ /* FIXME: Missing single sector read for large sector size */
if (brq->data.blocks > 1 && !mmc_large_sector(card)) { if (!mmc_large_sector(card) && rq_data_dir(req) == READ &&
/* Redo read one sector at a time */ brq->data.blocks > 1) {
pr_warn("%s: retrying using single block read\n", /* Read one sector at a time */
req->rq_disk->disk_name);
mmc_blk_read_single(mq, req); mmc_blk_read_single(mq, req);
} else { return;
mqrq->retries = MMC_NO_RETRIES;
}
break;
case MMC_BLK_NOMEDIUM:
/* Do not accept bytes_xfered. Error out the request */
brq->data.bytes_xfered = 0;
mqrq->retries = MMC_NO_RETRIES;
break;
default:
/* Do not accept bytes_xfered. Error out the request */
brq->data.bytes_xfered = 0;
mqrq->retries = MMC_NO_RETRIES;
pr_err("%s: Unhandled return value (%d)",
req->rq_disk->disk_name, status);
break;
} }
} }
...@@ -2205,16 +2233,6 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) ...@@ -2205,16 +2233,6 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
} }
static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq)
{
return !!brq->mrq.sbc;
}
static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq)
{
return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR;
}
static int mmc_blk_card_busy(struct mmc_card *card, struct request *req) static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
{ {
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment