Commit 5dfcc139 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.6-2020-03-07' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Here are a few fixes that should go into this release. This contains:

   - Revert of a bad bcache patch from this merge window

   - Removed unused function (Daniel)

   - Fixup for the blktrace fix from Jan from this release (Cengiz)

   - Fix of deeper level bfqq overwrite in BFQ (Carlo)"

* tag 'block-5.6-2020-03-07' of git://git.kernel.dk/linux-block:
  block, bfq: fix overwrite of bfq_group pointer in bfq_find_set_group()
  blktrace: fix dereference after null check
  Revert "bcache: ignore pending signals when creating gc and allocator thread"
  block: Remove used kblockd_schedule_work_on()
parents 6f784a31 14afc593
...@@ -610,12 +610,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, ...@@ -610,12 +610,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
*/ */
entity = &bfqg->entity; entity = &bfqg->entity;
for_each_entity(entity) { for_each_entity(entity) {
bfqg = container_of(entity, struct bfq_group, entity); struct bfq_group *curr_bfqg = container_of(entity,
if (bfqg != bfqd->root_group) { struct bfq_group, entity);
parent = bfqg_parent(bfqg); if (curr_bfqg != bfqd->root_group) {
parent = bfqg_parent(curr_bfqg);
if (!parent) if (!parent)
parent = bfqd->root_group; parent = bfqd->root_group;
bfq_group_set_parent(bfqg, parent); bfq_group_set_parent(curr_bfqg, parent);
} }
} }
......
...@@ -1663,12 +1663,6 @@ int kblockd_schedule_work(struct work_struct *work) ...@@ -1663,12 +1663,6 @@ int kblockd_schedule_work(struct work_struct *work)
} }
EXPORT_SYMBOL(kblockd_schedule_work); EXPORT_SYMBOL(kblockd_schedule_work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work_on);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay) unsigned long delay)
{ {
......
...@@ -67,7 +67,6 @@ ...@@ -67,7 +67,6 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/sched/signal.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
#define MAX_OPEN_BUCKETS 128 #define MAX_OPEN_BUCKETS 128
...@@ -734,21 +733,8 @@ int bch_open_buckets_alloc(struct cache_set *c) ...@@ -734,21 +733,8 @@ int bch_open_buckets_alloc(struct cache_set *c)
int bch_cache_allocator_start(struct cache *ca) int bch_cache_allocator_start(struct cache *ca)
{ {
struct task_struct *k; struct task_struct *k = kthread_run(bch_allocator_thread,
ca, "bcache_allocator");
/*
* In case previous btree check operation occupies too many
* system memory for bcache btree node cache, and the
* registering process is selected by OOM killer. Here just
* ignore the SIGKILL sent by OOM killer if there is, to
* avoid kthread_run() being failed by pending signals. The
* bcache registering process will exit after the registration
* done.
*/
if (signal_pending(current))
flush_signals(current);
k = kthread_run(bch_allocator_thread, ca, "bcache_allocator");
if (IS_ERR(k)) if (IS_ERR(k))
return PTR_ERR(k); return PTR_ERR(k);
......
...@@ -34,7 +34,6 @@ ...@@ -34,7 +34,6 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <trace/events/bcache.h> #include <trace/events/bcache.h>
...@@ -1914,18 +1913,6 @@ static int bch_gc_thread(void *arg) ...@@ -1914,18 +1913,6 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c) int bch_gc_thread_start(struct cache_set *c)
{ {
/*
* In case previous btree check operation occupies too many
* system memory for bcache btree node cache, and the
* registering process is selected by OOM killer. Here just
* ignore the SIGKILL sent by OOM killer if there is, to
* avoid kthread_run() being failed by pending signals. The
* bcache registering process will exit after the registration
* done.
*/
if (signal_pending(current))
flush_signals(current);
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
return PTR_ERR_OR_ZERO(c->gc_thread); return PTR_ERR_OR_ZERO(c->gc_thread);
} }
......
...@@ -1494,7 +1494,6 @@ static inline void put_dev_sector(Sector p) ...@@ -1494,7 +1494,6 @@ static inline void put_dev_sector(Sector p)
} }
int kblockd_schedule_work(struct work_struct *work); int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \ #define MODULE_ALIAS_BLOCKDEV(major,minor) \
......
...@@ -1896,8 +1896,11 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, ...@@ -1896,8 +1896,11 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
} }
ret = 0; ret = 0;
if (bt == NULL) if (bt == NULL) {
ret = blk_trace_setup_queue(q, bdev); ret = blk_trace_setup_queue(q, bdev);
bt = rcu_dereference_protected(q->blk_trace,
lockdep_is_held(&q->blk_trace_mutex));
}
if (ret == 0) { if (ret == 0) {
if (attr == &dev_attr_act_mask) if (attr == &dev_attr_act_mask)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment