Commit a92c9ab6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.4-2023-05-26' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:
 "A few fixes for the storage side of things:

   - Fix bio caching condition for passthrough IO (Anuj)

   - end-of-device check fix for zero sized devices (Christoph)

   - Update Paolo's email address

   - NVMe pull request via Keith with a single quirk addition

   - Fix regression in how wbt enablement is done (Yu)

   - Fix race in active queue accounting (Tian)"

* tag 'block-6.4-2023-05-26' of git://git.kernel.dk/linux:
  NVMe: Add MAXIO 1602 to bogus nid list.
  block: make bio_check_eod work for zero sized devices
  block: fix bio-cache for passthru IO
  block, bfq: update Paolo's address in maintainer list
  blk-mq: fix race condition in active queue accounting
  blk-wbt: fix that wbt can't be disabled by default
parents 6fae9129 9491d01f
...@@ -3536,7 +3536,7 @@ F: Documentation/filesystems/befs.rst ...@@ -3536,7 +3536,7 @@ F: Documentation/filesystems/befs.rst
F: fs/befs/ F: fs/befs/
BFQ I/O SCHEDULER BFQ I/O SCHEDULER
M: Paolo Valente <paolo.valente@linaro.org> M: Paolo Valente <paolo.valente@unimore.it>
M: Jens Axboe <axboe@kernel.dk> M: Jens Axboe <axboe@kernel.dk>
L: linux-block@vger.kernel.org L: linux-block@vger.kernel.org
S: Maintained S: Maintained
......
...@@ -520,7 +520,7 @@ static inline int bio_check_eod(struct bio *bio) ...@@ -520,7 +520,7 @@ static inline int bio_check_eod(struct bio *bio)
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
unsigned int nr_sectors = bio_sectors(bio); unsigned int nr_sectors = bio_sectors(bio);
if (nr_sectors && maxsector && if (nr_sectors &&
(nr_sectors > maxsector || (nr_sectors > maxsector ||
bio->bi_iter.bi_sector > maxsector - nr_sectors)) { bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
pr_info_ratelimited("%s: attempt to access beyond end of device\n" pr_info_ratelimited("%s: attempt to access beyond end of device\n"
......
...@@ -248,7 +248,7 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq, ...@@ -248,7 +248,7 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq,
{ {
struct bio *bio; struct bio *bio;
if (rq->cmd_flags & REQ_ALLOC_CACHE) { if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
&fs_bio_set); &fs_bio_set);
if (!bio) if (!bio)
......
...@@ -39,16 +39,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) ...@@ -39,16 +39,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{ {
unsigned int users; unsigned int users;
/*
* calling test_bit() prior to test_and_set_bit() is intentional,
* it avoids dirtying the cacheline if the queue is already active.
*/
if (blk_mq_is_shared_tags(hctx->flags)) { if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue; struct request_queue *q = hctx->queue;
if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return; return;
set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
} else { } else {
if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return; return;
set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
} }
users = atomic_inc_return(&hctx->tags->active_queues); users = atomic_inc_return(&hctx->tags->active_queues);
......
...@@ -730,14 +730,16 @@ void wbt_enable_default(struct gendisk *disk) ...@@ -730,14 +730,16 @@ void wbt_enable_default(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue; struct request_queue *q = disk->queue;
struct rq_qos *rqos; struct rq_qos *rqos;
bool disable_flag = q->elevator && bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ);
test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags);
if (q->elevator &&
test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags))
enable = false;
/* Throttling already enabled? */ /* Throttling already enabled? */
rqos = wbt_rq_qos(q); rqos = wbt_rq_qos(q);
if (rqos) { if (rqos) {
if (!disable_flag && if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT; RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
return; return;
} }
...@@ -746,7 +748,7 @@ void wbt_enable_default(struct gendisk *disk) ...@@ -746,7 +748,7 @@ void wbt_enable_default(struct gendisk *disk)
if (!blk_queue_registered(q)) if (!blk_queue_registered(q))
return; return;
if (queue_is_mq(q) && !disable_flag) if (queue_is_mq(q) && enable)
wbt_init(disk); wbt_init(disk);
} }
EXPORT_SYMBOL_GPL(wbt_enable_default); EXPORT_SYMBOL_GPL(wbt_enable_default);
......
...@@ -3424,6 +3424,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -3424,6 +3424,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e4B, 0x1602), /* MAXIO MAP1602 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, }, .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment