Commit 81196976 authored by Adrian Hunter's avatar Adrian Hunter Committed by Ulf Hansson

mmc: block: Add blk-mq support

Define and use a blk-mq queue. Discards and flushes are processed
synchronously, but reads and writes asynchronously. In order to support
slow DMA unmapping, DMA unmapping is not done until after the next request
is started. That means the request is not completed until then. If there is
no next request then the completion is done by queued work.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Acked-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Tested-by: default avatarLinus Walleij <linus.walleij@linaro.org>
parent c3d53d0d
This diff is collapsed.
......@@ -7,4 +7,13 @@ struct request;
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
enum mmc_issued;
enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req);
void mmc_blk_mq_complete(struct request *req);
struct work_struct;
void mmc_blk_mq_complete_work(struct work_struct *work);
#endif
......@@ -22,6 +22,7 @@
#include "block.h"
#include "core.h"
#include "card.h"
#include "host.h"
/*
* Prepare a MMC request. This just filters out odd stuff.
......@@ -34,10 +35,25 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
req_to_mmc_queue_req(req)->retries = 0;
return BLKPREP_OK;
}
enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
return MMC_ISSUE_ASYNC;
return MMC_ISSUE_SYNC;
}
static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
bool reserved)
{
return BLK_EH_RESET_TIMER;
}
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
......@@ -154,11 +170,10 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* @req: the request
* @gfp: memory allocation policy
*/
static int mmc_init_request(struct request_queue *q, struct request *req,
gfp_t gfp)
static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
gfp_t gfp)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
......@@ -169,6 +184,12 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
return 0;
}
static int mmc_init_request(struct request_queue *q, struct request *req,
gfp_t gfp)
{
return __mmc_init_request(q->queuedata, req, gfp);
}
static void mmc_exit_request(struct request_queue *q, struct request *req)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
......@@ -177,6 +198,112 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
mq_rq->sg = NULL;
}
static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx, unsigned int numa_node)
{
return __mmc_init_request(set->driver_data, req, GFP_KERNEL);
}
static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
unsigned int hctx_idx)
{
struct mmc_queue *mq = set->driver_data;
mmc_exit_request(mq->queue, req);
}
/*
* We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
* will not be dispatched in parallel.
*/
static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *req = bd->rq;
struct request_queue *q = req->q;
struct mmc_queue *mq = q->queuedata;
struct mmc_card *card = mq->card;
enum mmc_issue_type issue_type;
enum mmc_issued issued;
bool get_card;
int ret;
if (mmc_card_removed(mq->card)) {
req->rq_flags |= RQF_QUIET;
return BLK_STS_IOERR;
}
issue_type = mmc_issue_type(mq, req);
spin_lock_irq(q->queue_lock);
switch (issue_type) {
case MMC_ISSUE_ASYNC:
break;
default:
/*
* Timeouts are handled by mmc core, and we don't have a host
* API to abort requests, so we can't handle the timeout anyway.
* However, when the timeout happens, blk_mq_complete_request()
* no longer works (to stop the request disappearing under us).
* To avoid racing with that, set a large timeout.
*/
req->timeout = 600 * HZ;
break;
}
mq->in_flight[issue_type] += 1;
get_card = (mmc_tot_in_flight(mq) == 1);
spin_unlock_irq(q->queue_lock);
if (!(req->rq_flags & RQF_DONTPREP)) {
req_to_mmc_queue_req(req)->retries = 0;
req->rq_flags |= RQF_DONTPREP;
}
if (get_card)
mmc_get_card(card, &mq->ctx);
blk_mq_start_request(req);
issued = mmc_blk_mq_issue_rq(mq, req);
switch (issued) {
case MMC_REQ_BUSY:
ret = BLK_STS_RESOURCE;
break;
case MMC_REQ_FAILED_TO_START:
ret = BLK_STS_IOERR;
break;
default:
ret = BLK_STS_OK;
break;
}
if (issued != MMC_REQ_STARTED) {
bool put_card = false;
spin_lock_irq(q->queue_lock);
mq->in_flight[issue_type] -= 1;
if (mmc_tot_in_flight(mq) == 0)
put_card = true;
spin_unlock_irq(q->queue_lock);
if (put_card)
mmc_put_card(card, &mq->ctx);
}
return ret;
}
static const struct blk_mq_ops mmc_mq_ops = {
.queue_rq = mmc_mq_queue_rq,
.init_request = mmc_mq_init_request,
.exit_request = mmc_mq_exit_request,
.complete = mmc_blk_mq_complete,
.timeout = mmc_mq_timed_out,
};
static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_host *host = card->host;
......@@ -198,6 +325,70 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
/* Initialize thread_sem even if it is not used */
sema_init(&mq->thread_sem, 1);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
mutex_init(&mq->complete_lock);
init_waitqueue_head(&mq->wait);
}
static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
const struct blk_mq_ops *mq_ops, spinlock_t *lock)
{
int ret;
memset(&mq->tag_set, 0, sizeof(mq->tag_set));
mq->tag_set.ops = mq_ops;
mq->tag_set.queue_depth = q_depth;
mq->tag_set.numa_node = NUMA_NO_NODE;
mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
BLK_MQ_F_BLOCKING;
mq->tag_set.nr_hw_queues = 1;
mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
mq->tag_set.driver_data = mq;
ret = blk_mq_alloc_tag_set(&mq->tag_set);
if (ret)
return ret;
mq->queue = blk_mq_init_queue(&mq->tag_set);
if (IS_ERR(mq->queue)) {
ret = PTR_ERR(mq->queue);
goto free_tag_set;
}
mq->queue->queue_lock = lock;
mq->queue->queuedata = mq;
return 0;
free_tag_set:
blk_mq_free_tag_set(&mq->tag_set);
return ret;
}
/* Set queue depth to get a reasonable value for q->nr_requests */
#define MMC_QUEUE_DEPTH 64
static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock)
{
int q_depth;
int ret;
q_depth = MMC_QUEUE_DEPTH;
ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
if (ret)
return ret;
blk_queue_rq_timeout(mq->queue, 60 * HZ);
mmc_setup_queue(mq, card);
return 0;
}
/**
......@@ -216,6 +407,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
int ret = -ENOMEM;
mq->card = card;
if (mmc_host_use_blk_mq(host))
return mmc_mq_init(mq, card, lock);
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
return -ENOMEM;
......@@ -251,11 +446,70 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return ret;
}
static void mmc_mq_queue_suspend(struct mmc_queue *mq)
{
blk_mq_quiesce_queue(mq->queue);
/*
* The host remains claimed while there are outstanding requests, so
* simply claiming and releasing here ensures there are none.
*/
mmc_claim_host(mq->card->host);
mmc_release_host(mq->card->host);
}
static void mmc_mq_queue_resume(struct mmc_queue *mq)
{
blk_mq_unquiesce_queue(mq->queue);
}
static void __mmc_queue_suspend(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (!mq->suspended) {
mq->suspended |= true;
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
down(&mq->thread_sem);
}
}
static void __mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (mq->suspended) {
mq->suspended = false;
up(&mq->thread_sem);
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (q->mq_ops) {
/*
* The legacy code handled the possibility of being suspended,
* so do that here too.
*/
if (blk_queue_quiesced(q))
blk_mq_unquiesce_queue(q);
goto out_cleanup;
}
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
......@@ -268,8 +522,16 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
out_cleanup:
blk_cleanup_queue(q);
/*
* A request can be completed before the next request, potentially
* leaving a complete_work with nothing to do. Such a work item might
* still be queued at this point. Flush it.
*/
flush_work(&mq->complete_work);
mq->card = NULL;
}
......@@ -284,17 +546,11 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
void mmc_queue_suspend(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (!mq->suspended) {
mq->suspended |= true;
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
down(&mq->thread_sem);
}
if (q->mq_ops)
mmc_mq_queue_suspend(mq);
else
__mmc_queue_suspend(mq);
}
/**
......@@ -304,17 +560,11 @@ void mmc_queue_suspend(struct mmc_queue *mq)
void mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (mq->suspended) {
mq->suspended = false;
up(&mq->thread_sem);
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
if (q->mq_ops)
mmc_mq_queue_resume(mq);
else
__mmc_queue_resume(mq);
}
/*
......
......@@ -8,6 +8,19 @@
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
enum mmc_issued {
MMC_REQ_STARTED,
MMC_REQ_BUSY,
MMC_REQ_FAILED_TO_START,
MMC_REQ_FINISHED,
};
enum mmc_issue_type {
MMC_ISSUE_SYNC,
MMC_ISSUE_ASYNC,
MMC_ISSUE_MAX,
};
static inline struct mmc_queue_req *req_to_mmc_queue_req(struct request *rq)
{
return blk_mq_rq_to_pdu(rq);
......@@ -57,12 +70,15 @@ struct mmc_queue_req {
int drv_op_result;
void *drv_op_data;
unsigned int ioc_count;
int retries;
};
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
struct mmc_ctx ctx;
struct blk_mq_tag_set tag_set;
bool suspended;
bool asleep;
struct mmc_blk_data *blkdata;
......@@ -74,6 +90,14 @@ struct mmc_queue {
* associated mmc_queue_req data.
*/
int qcnt;
int in_flight[MMC_ISSUE_MAX];
bool rw_wait;
bool waiting;
wait_queue_head_t wait;
struct request *complete_req;
struct mutex complete_lock;
struct work_struct complete_work;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
......@@ -84,4 +108,12 @@ extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req);
static inline int mmc_tot_in_flight(struct mmc_queue *mq)
{
return mq->in_flight[MMC_ISSUE_SYNC] +
mq->in_flight[MMC_ISSUE_ASYNC];
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment