Commit 0fbfd125 authored by Adrian Hunter's avatar Adrian Hunter Committed by Ulf Hansson

mmc: block: Remove code no longer needed after the switch to blk-mq

Remove code no longer needed after the switch to blk-mq.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Acked-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Tested-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
parent 1bec43a3
This diff is collapsed.
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
struct mmc_queue; struct mmc_queue;
struct request; struct request;
void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req);
void mmc_blk_cqe_recovery(struct mmc_queue *mq); void mmc_blk_cqe_recovery(struct mmc_queue *mq);
enum mmc_issued; enum mmc_issued;
......
...@@ -24,22 +24,6 @@ ...@@ -24,22 +24,6 @@
#include "card.h" #include "card.h"
#include "host.h" #include "host.h"
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
struct mmc_queue *mq = q->queuedata;
if (mq && mmc_card_removed(mq->card))
return BLKPREP_KILL;
req->rq_flags |= RQF_DONTPREP;
req_to_mmc_queue_req(req)->retries = 0;
return BLKPREP_OK;
}
static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
{ {
/* Allow only 1 DCMD at a time */ /* Allow only 1 DCMD at a time */
...@@ -181,86 +165,6 @@ static void mmc_mq_recovery_handler(struct work_struct *work) ...@@ -181,86 +165,6 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
blk_mq_run_hw_queues(q, true); blk_mq_run_hw_queues(q, true);
} }
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
struct mmc_context_info *cntx = &mq->card->host->context_info;
current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
struct request *req;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
mq->asleep = false;
cntx->is_waiting_last_req = false;
cntx->is_new_req = false;
if (!req) {
/*
* Dispatch queue is empty so set flags for
* mmc_request_fn() to wake us up.
*/
if (mq->qcnt)
cntx->is_waiting_last_req = true;
else
mq->asleep = true;
}
spin_unlock_irq(q->queue_lock);
if (req || mq->qcnt) {
set_current_state(TASK_RUNNING);
mmc_blk_issue_rq(mq, req);
cond_resched();
} else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
}
} while (1);
up(&mq->thread_sem);
return 0;
}
/*
* Generic MMC request handler. This is called for any queue on a
* particular host. When the host is not busy, we look for a request
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
req->rq_flags |= RQF_QUIET;
__blk_end_request_all(req, BLK_STS_IOERR);
}
return;
}
cntx = &mq->card->host->context_info;
if (cntx->is_waiting_last_req) {
cntx->is_new_req = true;
wake_up_interruptible(&cntx->wait);
}
if (mq->asleep)
wake_up_process(mq->thread);
}
static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp)
{ {
struct scatterlist *sg; struct scatterlist *sg;
...@@ -311,12 +215,6 @@ static int __mmc_init_request(struct mmc_queue *mq, struct request *req, ...@@ -311,12 +215,6 @@ static int __mmc_init_request(struct mmc_queue *mq, struct request *req,
return 0; return 0;
} }
static int mmc_init_request(struct request_queue *q, struct request *req,
gfp_t gfp)
{
return __mmc_init_request(q->queuedata, req, gfp);
}
static void mmc_exit_request(struct request_queue *q, struct request *req) static void mmc_exit_request(struct request_queue *q, struct request *req)
{ {
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
...@@ -469,9 +367,6 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) ...@@ -469,9 +367,6 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size); blk_queue_max_segment_size(mq->queue, host->max_seg_size);
/* Initialize thread_sem even if it is not used */
sema_init(&mq->thread_sem, 1);
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
...@@ -559,51 +454,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -559,51 +454,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
spinlock_t *lock, const char *subname) spinlock_t *lock, const char *subname)
{ {
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
int ret = -ENOMEM;
mq->card = card; mq->card = card;
mq->use_cqe = host->cqe_enabled; mq->use_cqe = host->cqe_enabled;
if (mq->use_cqe || mmc_host_use_blk_mq(host))
return mmc_mq_init(mq, card, lock); return mmc_mq_init(mq, card, lock);
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
return -ENOMEM;
mq->queue->queue_lock = lock;
mq->queue->request_fn = mmc_request_fn;
mq->queue->init_rq_fn = mmc_init_request;
mq->queue->exit_rq_fn = mmc_exit_request;
mq->queue->cmd_size = sizeof(struct mmc_queue_req);
mq->queue->queuedata = mq;
mq->qcnt = 0;
ret = blk_init_allocated_queue(mq->queue);
if (ret) {
blk_cleanup_queue(mq->queue);
return ret;
}
blk_queue_prep_rq(mq->queue, mmc_prep_request);
mmc_setup_queue(mq, card);
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
goto cleanup_queue;
}
return 0;
cleanup_queue:
blk_cleanup_queue(mq->queue);
return ret;
} }
static void mmc_mq_queue_suspend(struct mmc_queue *mq) void mmc_queue_suspend(struct mmc_queue *mq)
{ {
blk_mq_quiesce_queue(mq->queue); blk_mq_quiesce_queue(mq->queue);
...@@ -615,71 +474,22 @@ static void mmc_mq_queue_suspend(struct mmc_queue *mq) ...@@ -615,71 +474,22 @@ static void mmc_mq_queue_suspend(struct mmc_queue *mq)
mmc_release_host(mq->card->host); mmc_release_host(mq->card->host);
} }
static void mmc_mq_queue_resume(struct mmc_queue *mq) void mmc_queue_resume(struct mmc_queue *mq)
{ {
blk_mq_unquiesce_queue(mq->queue); blk_mq_unquiesce_queue(mq->queue);
} }
static void __mmc_queue_suspend(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (!mq->suspended) {
mq->suspended |= true;
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
down(&mq->thread_sem);
}
}
static void __mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
if (mq->suspended) {
mq->suspended = false;
up(&mq->thread_sem);
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
}
void mmc_cleanup_queue(struct mmc_queue *mq) void mmc_cleanup_queue(struct mmc_queue *mq)
{ {
struct request_queue *q = mq->queue; struct request_queue *q = mq->queue;
unsigned long flags;
if (q->mq_ops) {
/* /*
* The legacy code handled the possibility of being suspended, * The legacy code handled the possibility of being suspended,
* so do that here too. * so do that here too.
*/ */
if (blk_queue_quiesced(q)) if (blk_queue_quiesced(q))
blk_mq_unquiesce_queue(q); blk_mq_unquiesce_queue(q);
goto out_cleanup;
}
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
/* Then terminate our worker thread */
kthread_stop(mq->thread);
/* Empty the queue */
spin_lock_irqsave(q->queue_lock, flags);
q->queuedata = NULL;
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
out_cleanup:
blk_cleanup_queue(q); blk_cleanup_queue(q);
/* /*
...@@ -692,38 +502,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq) ...@@ -692,38 +502,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
mq->card = NULL; mq->card = NULL;
} }
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
*
* Stop the block request queue, and wait for our thread to
* complete any outstanding requests. This ensures that we
* won't suspend while a request is being processed.
*/
void mmc_queue_suspend(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
if (q->mq_ops)
mmc_mq_queue_suspend(mq);
else
__mmc_queue_suspend(mq);
}
/**
* mmc_queue_resume - resume a previously suspended MMC request queue
* @mq: MMC queue to resume
*/
void mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
if (q->mq_ops)
mmc_mq_queue_resume(mq);
else
__mmc_queue_resume(mq);
}
/* /*
* Prepare the sg list(s) to be handed of to the host driver * Prepare the sg list(s) to be handed of to the host driver
*/ */
......
...@@ -34,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr) ...@@ -34,7 +34,6 @@ static inline struct request *mmc_queue_req_to_req(struct mmc_queue_req *mqr)
return blk_mq_rq_from_pdu(mqr); return blk_mq_rq_from_pdu(mqr);
} }
struct task_struct;
struct mmc_blk_data; struct mmc_blk_data;
struct mmc_blk_ioc_data; struct mmc_blk_ioc_data;
...@@ -44,7 +43,6 @@ struct mmc_blk_request { ...@@ -44,7 +43,6 @@ struct mmc_blk_request {
struct mmc_command cmd; struct mmc_command cmd;
struct mmc_command stop; struct mmc_command stop;
struct mmc_data data; struct mmc_data data;
int retune_retry_done;
}; };
/** /**
...@@ -66,7 +64,6 @@ enum mmc_drv_op { ...@@ -66,7 +64,6 @@ enum mmc_drv_op {
struct mmc_queue_req { struct mmc_queue_req {
struct mmc_blk_request brq; struct mmc_blk_request brq;
struct scatterlist *sg; struct scatterlist *sg;
struct mmc_async_req areq;
enum mmc_drv_op drv_op; enum mmc_drv_op drv_op;
int drv_op_result; int drv_op_result;
void *drv_op_data; void *drv_op_data;
...@@ -76,22 +73,10 @@ struct mmc_queue_req { ...@@ -76,22 +73,10 @@ struct mmc_queue_req {
struct mmc_queue { struct mmc_queue {
struct mmc_card *card; struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
struct mmc_ctx ctx; struct mmc_ctx ctx;
struct blk_mq_tag_set tag_set; struct blk_mq_tag_set tag_set;
bool suspended;
bool asleep;
struct mmc_blk_data *blkdata; struct mmc_blk_data *blkdata;
struct request_queue *queue; struct request_queue *queue;
/*
* FIXME: this counter is not a very reliable way of keeping
* track of how many requests that are ongoing. Switch to just
* letting the block core keep track of requests and per-request
* associated mmc_queue_req data.
*/
int qcnt;
int in_flight[MMC_ISSUE_MAX]; int in_flight[MMC_ISSUE_MAX];
unsigned int cqe_busy; unsigned int cqe_busy;
#define MMC_CQE_DCMD_BUSY BIT(0) #define MMC_CQE_DCMD_BUSY BIT(0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment