Commit 2383ffc4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "Main thing here is the fix for the regression in flush handling which
  caused IO hangs/stalls for a few reporters. Hopefully that should all
  be sorted out now. Outside of that, just a few minor fixes for issues
  that were introduced in this cycle"

* tag 'block-6.5-2023-08-19' of git://git.kernel.dk/linux:
  blk-mq: release scheduler resource when request completes
  blk-crypto: dynamically allocate fallback profile
  blk-cgroup: hold queue_lock when removing blkg->q_node
  drivers/rnbd: restore sysfs interface to rnbd-client
parents aa9ea98c e5c0ca13
...@@ -136,7 +136,9 @@ static void blkg_free_workfn(struct work_struct *work) ...@@ -136,7 +136,9 @@ static void blkg_free_workfn(struct work_struct *work)
blkcg_policy[i]->pd_free_fn(blkg->pd[i]); blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
if (blkg->parent) if (blkg->parent)
blkg_put(blkg->parent); blkg_put(blkg->parent);
spin_lock_irq(&q->queue_lock);
list_del_init(&blkg->q_node); list_del_init(&blkg->q_node);
spin_unlock_irq(&q->queue_lock);
mutex_unlock(&q->blkcg_mutex); mutex_unlock(&q->blkcg_mutex);
blk_put_queue(q); blk_put_queue(q);
......
...@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot { ...@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX]; struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
} *blk_crypto_keyslots; } *blk_crypto_keyslots;
static struct blk_crypto_profile blk_crypto_fallback_profile; static struct blk_crypto_profile *blk_crypto_fallback_profile;
static struct workqueue_struct *blk_crypto_wq; static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool; static mempool_t *blk_crypto_bounce_page_pool;
static struct bio_set crypto_bio_split; static struct bio_set crypto_bio_split;
...@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr) ...@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key. * this bio's algorithm and key.
*/ */
blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot); bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) { if (blk_st != BLK_STS_OK) {
src_bio->bi_status = blk_st; src_bio->bi_status = blk_st;
...@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work) ...@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key. * this bio's algorithm and key.
*/ */
blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile, blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot); bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) { if (blk_st != BLK_STS_OK) {
bio->bi_status = blk_st; bio->bi_status = blk_st;
...@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) ...@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
return false; return false;
} }
if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile, if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
&bc->bc_key->crypto_cfg)) { &bc->bc_key->crypto_cfg)) {
bio->bi_status = BLK_STS_NOTSUPP; bio->bi_status = BLK_STS_NOTSUPP;
return false; return false;
...@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr) ...@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key) int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{ {
return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key); return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
} }
static bool blk_crypto_fallback_inited; static bool blk_crypto_fallback_inited;
...@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void) ...@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void)
{ {
int i; int i;
int err; int err;
struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
if (blk_crypto_fallback_inited) if (blk_crypto_fallback_inited)
return 0; return 0;
...@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void) ...@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void)
if (err) if (err)
goto out; goto out;
err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots); /* Dynamic allocation is needed because of lockdep_register_key(). */
if (err) blk_crypto_fallback_profile =
kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
if (!blk_crypto_fallback_profile) {
err = -ENOMEM;
goto fail_free_bioset; goto fail_free_bioset;
}
err = blk_crypto_profile_init(blk_crypto_fallback_profile,
blk_crypto_num_keyslots);
if (err)
goto fail_free_profile;
err = -ENOMEM; err = -ENOMEM;
profile->ll_ops = blk_crypto_fallback_ll_ops; blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE; blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
/* All blk-crypto modes have a crypto API fallback. */ /* All blk-crypto modes have a crypto API fallback. */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
profile->modes_supported[i] = 0xFFFFFFFF; blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0; blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_wq = alloc_workqueue("blk_crypto_wq", blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
WQ_UNBOUND | WQ_HIGHPRI | WQ_UNBOUND | WQ_HIGHPRI |
...@@ -597,7 +605,9 @@ static int blk_crypto_fallback_init(void) ...@@ -597,7 +605,9 @@ static int blk_crypto_fallback_init(void)
fail_free_wq: fail_free_wq:
destroy_workqueue(blk_crypto_wq); destroy_workqueue(blk_crypto_wq);
fail_destroy_profile: fail_destroy_profile:
blk_crypto_profile_destroy(profile); blk_crypto_profile_destroy(blk_crypto_fallback_profile);
fail_free_profile:
kfree(blk_crypto_fallback_profile);
fail_free_bioset: fail_free_bioset:
bioset_exit(&crypto_bio_split); bioset_exit(&crypto_bio_split);
out: out:
......
...@@ -681,6 +681,21 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, ...@@ -681,6 +681,21 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
} }
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
static void blk_mq_finish_request(struct request *rq)
{
struct request_queue *q = rq->q;
if (rq->rq_flags & RQF_USE_SCHED) {
q->elevator->type->ops.finish_request(rq);
/*
* For postflush request that may need to be
* completed twice, we should clear this flag
* to avoid double finish_request() on the rq.
*/
rq->rq_flags &= ~RQF_USE_SCHED;
}
}
static void __blk_mq_free_request(struct request *rq) static void __blk_mq_free_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
...@@ -707,9 +722,7 @@ void blk_mq_free_request(struct request *rq) ...@@ -707,9 +722,7 @@ void blk_mq_free_request(struct request *rq)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
if ((rq->rq_flags & RQF_USE_SCHED) && blk_mq_finish_request(rq);
q->elevator->type->ops.finish_request)
q->elevator->type->ops.finish_request(rq);
if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq)))
laptop_io_completion(q->disk->bdi); laptop_io_completion(q->disk->bdi);
...@@ -1020,6 +1033,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error) ...@@ -1020,6 +1033,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
if (blk_mq_need_time_stamp(rq)) if (blk_mq_need_time_stamp(rq))
__blk_mq_end_request_acct(rq, ktime_get_ns()); __blk_mq_end_request_acct(rq, ktime_get_ns());
blk_mq_finish_request(rq);
if (rq->end_io) { if (rq->end_io) {
rq_qos_done(rq->q, rq); rq_qos_done(rq->q, rq);
if (rq->end_io(rq, error) == RQ_END_IO_FREE) if (rq->end_io(rq, error) == RQ_END_IO_FREE)
...@@ -1074,6 +1089,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob) ...@@ -1074,6 +1089,8 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
if (iob->need_ts) if (iob->need_ts)
__blk_mq_end_request_acct(rq, now); __blk_mq_end_request_acct(rq, now);
blk_mq_finish_request(rq);
rq_qos_done(rq->q, rq); rq_qos_done(rq->q, rq);
/* /*
......
...@@ -499,6 +499,9 @@ void elv_unregister_queue(struct request_queue *q) ...@@ -499,6 +499,9 @@ void elv_unregister_queue(struct request_queue *q)
int elv_register(struct elevator_type *e) int elv_register(struct elevator_type *e)
{ {
/* finish request is mandatory */
if (WARN_ON_ONCE(!e->ops.finish_request))
return -EINVAL;
/* insert_requests and dispatch_request are mandatory */ /* insert_requests and dispatch_request are mandatory */
if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request)) if (WARN_ON_ONCE(!e->ops.insert_requests || !e->ops.dispatch_request))
return -EINVAL; return -EINVAL;
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
static struct device *rnbd_dev; static struct device *rnbd_dev;
static const struct class rnbd_dev_class = { static const struct class rnbd_dev_class = {
.name = "rnbd_client", .name = "rnbd-client",
}; };
static struct kobject *rnbd_devs_kobj; static struct kobject *rnbd_devs_kobj;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment