Commit d7ad0581 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.9-20240412' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - MD pull request via Song:
       - UAF fix (Yu)

 - Avoid out-of-bounds shift in blk-iocost (Rik)

 - Fix for q->blkg_list corruption (Ming)

 - Relax virt boundary mask/size segment checking (Ming)

* tag 'block-6.9-20240412' of git://git.kernel.dk/linux:
  block: fix that blk_time_get_ns() doesn't update time after schedule
  block: allow device to have both virt_boundary_mask and max segment size
  block: fix q->blkg_list corruption during disk rebind
  blk-iocost: avoid out of bounds shift
  raid1: fix use-after-free for original bio in raid1_write_request()
parents c7adbe2e 3ec48489
...@@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css) ...@@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
return 0; return 0;
} }
void blkg_init_queue(struct request_queue *q)
{
INIT_LIST_HEAD(&q->blkg_list);
mutex_init(&q->blkcg_mutex);
}
int blkcg_init_disk(struct gendisk *disk) int blkcg_init_disk(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
...@@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk) ...@@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk)
bool preloaded; bool preloaded;
int ret; int ret;
INIT_LIST_HEAD(&q->blkg_list);
mutex_init(&q->blkcg_mutex);
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL); new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg) if (!new_blkg)
return -ENOMEM; return -ENOMEM;
......
...@@ -189,6 +189,7 @@ struct blkcg_policy { ...@@ -189,6 +189,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root; extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats; extern bool blkcg_debug_stats;
void blkg_init_queue(struct request_queue *q);
int blkcg_init_disk(struct gendisk *disk); int blkcg_init_disk(struct gendisk *disk);
void blkcg_exit_disk(struct gendisk *disk); void blkcg_exit_disk(struct gendisk *disk);
...@@ -482,6 +483,7 @@ struct blkcg { ...@@ -482,6 +483,7 @@ struct blkcg {
}; };
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
static inline void blkg_init_queue(struct request_queue *q) { }
static inline int blkcg_init_disk(struct gendisk *disk) { return 0; } static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
static inline void blkcg_exit_disk(struct gendisk *disk) { } static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
......
...@@ -442,6 +442,8 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id) ...@@ -442,6 +442,8 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
init_waitqueue_head(&q->mq_freeze_wq); init_waitqueue_head(&q->mq_freeze_wq);
mutex_init(&q->mq_freeze_lock); mutex_init(&q->mq_freeze_lock);
blkg_init_queue(q);
/* /*
* Init percpu_ref in atomic mode so that it's faster to shutdown. * Init percpu_ref in atomic mode so that it's faster to shutdown.
* See blk_register_queue() for details. * See blk_register_queue() for details.
...@@ -1195,6 +1197,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule) ...@@ -1195,6 +1197,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
if (unlikely(!rq_list_empty(plug->cached_rq))) if (unlikely(!rq_list_empty(plug->cached_rq)))
blk_mq_free_plug_rqs(plug); blk_mq_free_plug_rqs(plug);
plug->cur_ktime = 0;
current->flags &= ~PF_BLOCK_TS; current->flags &= ~PF_BLOCK_TS;
} }
......
...@@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) ...@@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
{ {
struct ioc *ioc = iocg->ioc; struct ioc *ioc = iocg->ioc;
struct blkcg_gq *blkg = iocg_to_blkg(iocg); struct blkcg_gq *blkg = iocg_to_blkg(iocg);
u64 tdelta, delay, new_delay; u64 tdelta, delay, new_delay, shift;
s64 vover, vover_pct; s64 vover, vover_pct;
u32 hwa; u32 hwa;
...@@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) ...@@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
/* calculate the current delay in effect - 1/2 every second */ /* calculate the current delay in effect - 1/2 every second */
tdelta = now->now - iocg->delay_at; tdelta = now->now - iocg->delay_at;
if (iocg->delay) shift = div64_u64(tdelta, USEC_PER_SEC);
delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC); if (iocg->delay && shift < BITS_PER_LONG)
delay = iocg->delay >> shift;
else else
delay = 0; delay = 0;
......
...@@ -182,17 +182,13 @@ static int blk_validate_limits(struct queue_limits *lim) ...@@ -182,17 +182,13 @@ static int blk_validate_limits(struct queue_limits *lim)
return -EINVAL; return -EINVAL;
/* /*
* Devices that require a virtual boundary do not support scatter/gather * Stacking device may have both virtual boundary and max segment
* I/O natively, but instead require a descriptor list entry for each * size limit, so allow this setting now, and long-term the two
* page (which might not be identical to the Linux PAGE_SIZE). Because * might need to move out of stacking limits since we have immutable
* of that they are not limited by our notion of "segment size". * bvec and lower layer bio splitting is supposed to handle the two
* correctly.
*/ */
if (lim->virt_boundary_mask) { if (!lim->virt_boundary_mask) {
if (WARN_ON_ONCE(lim->max_segment_size &&
lim->max_segment_size != UINT_MAX))
return -EINVAL;
lim->max_segment_size = UINT_MAX;
} else {
/* /*
* The maximum segment size has an odd historic 64k default that * The maximum segment size has an odd historic 64k default that
* drivers probably should override. Just like the I/O size we * drivers probably should override. Just like the I/O size we
......
...@@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, ...@@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
if (r1_bio->bios[j]) if (r1_bio->bios[j])
rdev_dec_pending(conf->mirrors[j].rdev, mddev); rdev_dec_pending(conf->mirrors[j].rdev, mddev);
free_r1bio(r1_bio); mempool_free(r1_bio, &conf->r1bio_pool);
allow_barrier(conf, bio->bi_iter.bi_sector); allow_barrier(conf, bio->bi_iter.bi_sector);
if (bio->bi_opf & REQ_NOWAIT) { if (bio->bi_opf & REQ_NOWAIT) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment