Commit d589ae0d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Either fixes or a few additions that got missed in the initial merge
  window pull. In detail:

   - List iterator fix to avoid leaking value post loop (Jakob)

   - One-off fix in minor count (Christophe)

   - Fix for a regression in how io priority setting works for an
     exiting task (Jiri)

   - Fix a regression in this merge window with blkg_free() being called
     in an inappropriate context (Ming)

   - Misc fixes (Ming, Tom)"

* tag 'for-5.18/block-2022-04-01' of git://git.kernel.dk/linux-block:
  blk-wbt: remove wbt_track stub
  block: use dedicated list iterator variable
  block: Fix the maximum minor value is blk_alloc_ext_minor()
  block: restore the old set_task_ioprio() behaviour wrt PF_EXITING
  block: avoid calling blkg_free() in atomic context
  lib/sbitmap: allocate sb->map via kvzalloc_node
parents 3b1509f2 8d7829eb
...@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q, ...@@ -65,19 +65,12 @@ static bool blkcg_policy_enabled(struct request_queue *q,
return pol && test_bit(pol->plid, q->blkcg_pols); return pol && test_bit(pol->plid, q->blkcg_pols);
} }
/** static void blkg_free_workfn(struct work_struct *work)
* blkg_free - free a blkg
* @blkg: blkg to free
*
* Free @blkg which may be partially allocated.
*/
static void blkg_free(struct blkcg_gq *blkg)
{ {
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
free_work);
int i; int i;
if (!blkg)
return;
for (i = 0; i < BLKCG_MAX_POLS; i++) for (i = 0; i < BLKCG_MAX_POLS; i++)
if (blkg->pd[i]) if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]); blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
...@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg) ...@@ -89,6 +82,25 @@ static void blkg_free(struct blkcg_gq *blkg)
kfree(blkg); kfree(blkg);
} }
/**
* blkg_free - free a blkg
* @blkg: blkg to free
*
* Free @blkg which may be partially allocated.
*/
static void blkg_free(struct blkcg_gq *blkg)
{
if (!blkg)
return;
/*
* Both ->pd_free_fn() and request queue's release handler may
* sleep, so free us by scheduling one work func
*/
INIT_WORK(&blkg->free_work, blkg_free_workfn);
schedule_work(&blkg->free_work);
}
static void __blkg_release(struct rcu_head *rcu) static void __blkg_release(struct rcu_head *rcu)
{ {
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head); struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
......
...@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio) ...@@ -280,7 +280,6 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
task_lock(task); task_lock(task);
if (task->flags & PF_EXITING) { if (task->flags & PF_EXITING) {
err = -ESRCH;
kmem_cache_free(iocontext_cachep, ioc); kmem_cache_free(iocontext_cachep, ioc);
goto out; goto out;
} }
...@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio) ...@@ -292,7 +291,7 @@ int set_task_ioprio(struct task_struct *task, int ioprio)
task->io_context->ioprio = ioprio; task->io_context->ioprio = ioprio;
out: out:
task_unlock(task); task_unlock(task);
return err; return 0;
} }
EXPORT_SYMBOL_GPL(set_task_ioprio); EXPORT_SYMBOL_GPL(set_task_ioprio);
......
...@@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head, ...@@ -4462,21 +4462,28 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
return true; return true;
} }
static void blk_mq_elv_switch_back(struct list_head *head, static struct blk_mq_qe_pair *blk_lookup_qe_pair(struct list_head *head,
struct request_queue *q) struct request_queue *q)
{ {
struct blk_mq_qe_pair *qe; struct blk_mq_qe_pair *qe;
struct elevator_type *t = NULL;
list_for_each_entry(qe, head, node) list_for_each_entry(qe, head, node)
if (qe->q == q) { if (qe->q == q)
t = qe->type; return qe;
break;
}
if (!t) return NULL;
return; }
static void blk_mq_elv_switch_back(struct list_head *head,
struct request_queue *q)
{
struct blk_mq_qe_pair *qe;
struct elevator_type *t;
qe = blk_lookup_qe_pair(head, q);
if (!qe)
return;
t = qe->type;
list_del(&qe->node); list_del(&qe->node);
kfree(qe); kfree(qe);
......
...@@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *); ...@@ -101,9 +101,6 @@ u64 wbt_default_latency_nsec(struct request_queue *);
#else #else
static inline void wbt_track(struct request *rq, enum wbt_flags flags)
{
}
static inline int wbt_init(struct request_queue *q) static inline int wbt_init(struct request_queue *q)
{ {
return -EINVAL; return -EINVAL;
......
...@@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void) ...@@ -335,7 +335,7 @@ int blk_alloc_ext_minor(void)
{ {
int idx; int idx;
idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT, GFP_KERNEL); idx = ida_alloc_range(&ext_devt_ida, 0, NR_EXT_DEVT - 1, GFP_KERNEL);
if (idx == -ENOSPC) if (idx == -ENOSPC)
return -EBUSY; return -EBUSY;
return idx; return idx;
......
...@@ -95,7 +95,10 @@ struct blkcg_gq { ...@@ -95,7 +95,10 @@ struct blkcg_gq {
spinlock_t async_bio_lock; spinlock_t async_bio_lock;
struct bio_list async_bios; struct bio_list async_bios;
union {
struct work_struct async_bio_work; struct work_struct async_bio_work;
struct work_struct free_work;
};
atomic_t use_delay; atomic_t use_delay;
atomic64_t delay_nsec; atomic64_t delay_nsec;
......
...@@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index) ...@@ -174,7 +174,7 @@ static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
static inline void sbitmap_free(struct sbitmap *sb) static inline void sbitmap_free(struct sbitmap *sb)
{ {
free_percpu(sb->alloc_hint); free_percpu(sb->alloc_hint);
kfree(sb->map); kvfree(sb->map);
sb->map = NULL; sb->map = NULL;
} }
......
...@@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, ...@@ -110,7 +110,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
sb->alloc_hint = NULL; sb->alloc_hint = NULL;
} }
sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
if (!sb->map) { if (!sb->map) {
free_percpu(sb->alloc_hint); free_percpu(sb->alloc_hint);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment