Commit 4accf5fc authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: pass an explicit gfp_t to get_request

blk_old_get_request already has it at hand, and in blk_queue_bio, which
is the fast path, it is constant.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ff005a06
...@@ -1332,6 +1332,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -1332,6 +1332,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* @op: operation and flags * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL) * @bio: bio to allocate request for (can be %NULL)
* @flags: BLQ_MQ_REQ_* flags * @flags: BLQ_MQ_REQ_* flags
* @gfp_mask: allocator flags
* *
* Get a free request from @q. This function may fail under memory * Get a free request from @q. This function may fail under memory
* pressure or if @q is dead. * pressure or if @q is dead.
...@@ -1341,7 +1342,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -1341,7 +1342,7 @@ int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
* Returns request pointer on success, with @q->queue_lock *not held*. * Returns request pointer on success, with @q->queue_lock *not held*.
*/ */
static struct request *__get_request(struct request_list *rl, unsigned int op, static struct request *__get_request(struct request_list *rl, unsigned int op,
struct bio *bio, blk_mq_req_flags_t flags) struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
{ {
struct request_queue *q = rl->q; struct request_queue *q = rl->q;
struct request *rq; struct request *rq;
...@@ -1350,8 +1351,6 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, ...@@ -1350,8 +1351,6 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
struct io_cq *icq = NULL; struct io_cq *icq = NULL;
const bool is_sync = op_is_sync(op); const bool is_sync = op_is_sync(op);
int may_queue; int may_queue;
gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC :
__GFP_DIRECT_RECLAIM;
req_flags_t rq_flags = RQF_ALLOCED; req_flags_t rq_flags = RQF_ALLOCED;
lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock);
...@@ -1515,6 +1514,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, ...@@ -1515,6 +1514,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
* @op: operation and flags * @op: operation and flags
* @bio: bio to allocate request for (can be %NULL) * @bio: bio to allocate request for (can be %NULL)
* @flags: BLK_MQ_REQ_* flags. * @flags: BLK_MQ_REQ_* flags.
* @gfp: allocator flags
* *
* Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags, * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
* this function keeps retrying under memory pressure and fails iff @q is dead. * this function keeps retrying under memory pressure and fails iff @q is dead.
...@@ -1524,7 +1524,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op, ...@@ -1524,7 +1524,7 @@ static struct request *__get_request(struct request_list *rl, unsigned int op,
* Returns request pointer on success, with @q->queue_lock *not held*. * Returns request pointer on success, with @q->queue_lock *not held*.
*/ */
static struct request *get_request(struct request_queue *q, unsigned int op, static struct request *get_request(struct request_queue *q, unsigned int op,
struct bio *bio, blk_mq_req_flags_t flags) struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
{ {
const bool is_sync = op_is_sync(op); const bool is_sync = op_is_sync(op);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -1536,7 +1536,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op, ...@@ -1536,7 +1536,7 @@ static struct request *get_request(struct request_queue *q, unsigned int op,
rl = blk_get_rl(q, bio); /* transferred to @rq on success */ rl = blk_get_rl(q, bio); /* transferred to @rq on success */
retry: retry:
rq = __get_request(rl, op, bio, flags); rq = __get_request(rl, op, bio, flags, gfp);
if (!IS_ERR(rq)) if (!IS_ERR(rq))
return rq; return rq;
...@@ -1590,7 +1590,7 @@ static struct request *blk_old_get_request(struct request_queue *q, ...@@ -1590,7 +1590,7 @@ static struct request *blk_old_get_request(struct request_queue *q,
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rq = get_request(q, op, NULL, flags); rq = get_request(q, op, NULL, flags, gfp_mask);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
blk_queue_exit(q); blk_queue_exit(q);
...@@ -2056,7 +2056,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -2056,7 +2056,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
* Returns with the queue unlocked. * Returns with the queue unlocked.
*/ */
blk_queue_enter_live(q); blk_queue_enter_live(q);
req = get_request(q, bio->bi_opf, bio, 0); req = get_request(q, bio->bi_opf, bio, 0, __GFP_DIRECT_RECLAIM);
if (IS_ERR(req)) { if (IS_ERR(req)) {
blk_queue_exit(q); blk_queue_exit(q);
__wbt_done(q->rq_wb, wb_acct); __wbt_done(q->rq_wb, wb_acct);
......
...@@ -1933,10 +1933,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) ...@@ -1933,10 +1933,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
struct request *req; struct request *req;
struct scsi_request *rq; struct scsi_request *rq;
/*
* blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
* request becomes available
*/
req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0); req = blk_get_request(sdev->request_queue, REQ_OP_SCSI_IN, 0);
if (IS_ERR(req)) if (IS_ERR(req))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment