Commit 0facb892 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20190118' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - block size setting fixes for loop/nbd (Jan Kara)

 - md bio_alloc_mddev() cleanup (Marcos)

 - Ensure we don't lose the REQ_INTEGRITY flag (Ming)

 - Two NVMe fixes by way of Christoph:
    - Fix NVMe IRQ calculation (Ming)
    - Uninitialized variable in nvmet-tcp (Sagi)

 - BFQ comment fix (Paolo)

 - License cleanup for recently added blk-mq-debugfs-zoned (Thomas)

* tag 'for-linus-20190118' of git://git.kernel.dk/linux-block:
  block: Cleanup license notice
  nvme-pci: fix nvme_setup_irqs()
  nvmet-tcp: fix uninitialized variable access
  block: don't lose track of REQ_INTEGRITY flag
  blockdev: Fix livelocks on loop device
  nbd: Use set_blocksize() to set device blocksize
  md: Make bio_alloc_mddev use bio_alloc_bioset
  block, bfq: fix comments on __bfq_deactivate_entity
parents 2339e91d 38197ca1
...@@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity, ...@@ -1154,15 +1154,14 @@ static void bfq_activate_requeue_entity(struct bfq_entity *entity,
} }
/** /**
* __bfq_deactivate_entity - deactivate an entity from its service tree. * __bfq_deactivate_entity - update sched_data and service trees for
* @entity: the entity to deactivate. * entity, so as to represent entity as inactive
* @entity: the entity being deactivated.
* @ins_into_idle_tree: if false, the entity will not be put into the * @ins_into_idle_tree: if false, the entity will not be put into the
* idle tree. * idle tree.
* *
* Deactivates an entity, independently of its previous state. Must * If necessary and allowed, puts entity into the idle tree. NOTE:
* be invoked only if entity is on a service tree. Extracts the entity * entity may be on no tree if in service.
* from that tree, and if necessary and allowed, puts it into the idle
* tree.
*/ */
bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree) bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
{ {
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2017 Western Digital Corporation or its affiliates. * Copyright (C) 2017 Western Digital Corporation or its affiliates.
*
* This file is released under the GPL.
*/ */
#include <linux/blkdev.h> #include <linux/blkdev.h>
......
...@@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1906,7 +1906,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{ {
const int is_sync = op_is_sync(bio->bi_opf); const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf); const int is_flush_fua = op_is_flush(bio->bi_opf);
struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf }; struct blk_mq_alloc_data data = { .flags = 0};
struct request *rq; struct request *rq;
struct blk_plug *plug; struct blk_plug *plug;
struct request *same_queue_rq = NULL; struct request *same_queue_rq = NULL;
...@@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) ...@@ -1928,6 +1928,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio); rq_qos_throttle(q, bio);
data.cmd_flags = bio->bi_opf;
rq = blk_mq_get_request(q, bio, &data); rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) { if (unlikely(!rq)) {
rq_qos_cleanup(q, bio); rq_qos_cleanup(q, bio);
......
...@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd) ...@@ -288,9 +288,10 @@ static void nbd_size_update(struct nbd_device *nbd)
blk_queue_physical_block_size(nbd->disk->queue, config->blksize); blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
set_capacity(nbd->disk, config->bytesize >> 9); set_capacity(nbd->disk, config->bytesize >> 9);
if (bdev) { if (bdev) {
if (bdev->bd_disk) if (bdev->bd_disk) {
bd_set_size(bdev, config->bytesize); bd_set_size(bdev, config->bytesize);
else set_blocksize(bdev, config->blksize);
} else
bdev->bd_invalidated = 1; bdev->bd_invalidated = 1;
bdput(bdev); bdput(bdev);
} }
......
...@@ -207,15 +207,10 @@ static bool create_on_open = true; ...@@ -207,15 +207,10 @@ static bool create_on_open = true;
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev) struct mddev *mddev)
{ {
struct bio *b;
if (!mddev || !bioset_initialized(&mddev->bio_set)) if (!mddev || !bioset_initialized(&mddev->bio_set))
return bio_alloc(gfp_mask, nr_iovecs); return bio_alloc(gfp_mask, nr_iovecs);
b = bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set);
if (!b)
return NULL;
return b;
} }
EXPORT_SYMBOL_GPL(bio_alloc_mddev); EXPORT_SYMBOL_GPL(bio_alloc_mddev);
......
...@@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) ...@@ -2041,14 +2041,18 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
return ret; return ret;
} }
/* irq_queues covers admin queue */
static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
{ {
unsigned int this_w_queues = write_queues; unsigned int this_w_queues = write_queues;
WARN_ON(!irq_queues);
/* /*
* Setup read/write queue split * Setup read/write queue split, assign admin queue one independent
* irq vector if irq_queues is > 1.
*/ */
if (irq_queues == 1) { if (irq_queues <= 2) {
dev->io_queues[HCTX_TYPE_DEFAULT] = 1; dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
dev->io_queues[HCTX_TYPE_READ] = 0; dev->io_queues[HCTX_TYPE_READ] = 0;
return; return;
...@@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) ...@@ -2056,21 +2060,21 @@ static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
/* /*
* If 'write_queues' is set, ensure it leaves room for at least * If 'write_queues' is set, ensure it leaves room for at least
* one read queue * one read queue and one admin queue
*/ */
if (this_w_queues >= irq_queues) if (this_w_queues >= irq_queues)
this_w_queues = irq_queues - 1; this_w_queues = irq_queues - 2;
/* /*
* If 'write_queues' is set to zero, reads and writes will share * If 'write_queues' is set to zero, reads and writes will share
* a queue set. * a queue set.
*/ */
if (!this_w_queues) { if (!this_w_queues) {
dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues; dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1;
dev->io_queues[HCTX_TYPE_READ] = 0; dev->io_queues[HCTX_TYPE_READ] = 0;
} else { } else {
dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues; dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1;
} }
} }
...@@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) ...@@ -2095,7 +2099,7 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
this_p_queues = nr_io_queues - 1; this_p_queues = nr_io_queues - 1;
irq_queues = 1; irq_queues = 1;
} else { } else {
irq_queues = nr_io_queues - this_p_queues; irq_queues = nr_io_queues - this_p_queues + 1;
} }
dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
...@@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) ...@@ -2115,8 +2119,9 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
* If we got a failure and we're down to asking for just * If we got a failure and we're down to asking for just
* 1 + 1 queues, just ask for a single vector. We'll share * 1 + 1 queues, just ask for a single vector. We'll share
* that between the single IO queue and the admin queue. * that between the single IO queue and the admin queue.
* Otherwise, we assign one independent vector to admin queue.
*/ */
if (result >= 0 && irq_queues > 1) if (irq_queues > 1)
irq_queues = irq_sets[0] + irq_sets[1] + 1; irq_queues = irq_sets[0] + irq_sets[1] + 1;
result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
......
...@@ -1089,7 +1089,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue) ...@@ -1089,7 +1089,7 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue) static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
{ {
int result; int result = 0;
if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR)) if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
return 0; return 0;
......
...@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev) ...@@ -104,6 +104,20 @@ void invalidate_bdev(struct block_device *bdev)
} }
EXPORT_SYMBOL(invalidate_bdev); EXPORT_SYMBOL(invalidate_bdev);
static void set_init_blocksize(struct block_device *bdev)
{
unsigned bsize = bdev_logical_block_size(bdev);
loff_t size = i_size_read(bdev->bd_inode);
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
bdev->bd_block_size = bsize;
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
}
int set_blocksize(struct block_device *bdev, int size) int set_blocksize(struct block_device *bdev, int size)
{ {
/* Size must be a power of two, and between 512 and PAGE_SIZE */ /* Size must be a power of two, and between 512 and PAGE_SIZE */
...@@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change); ...@@ -1431,18 +1445,9 @@ EXPORT_SYMBOL(check_disk_change);
void bd_set_size(struct block_device *bdev, loff_t size) void bd_set_size(struct block_device *bdev, loff_t size)
{ {
unsigned bsize = bdev_logical_block_size(bdev);
inode_lock(bdev->bd_inode); inode_lock(bdev->bd_inode);
i_size_write(bdev->bd_inode, size); i_size_write(bdev->bd_inode, size);
inode_unlock(bdev->bd_inode); inode_unlock(bdev->bd_inode);
while (bsize < PAGE_SIZE) {
if (size & bsize)
break;
bsize <<= 1;
}
bdev->bd_block_size = bsize;
bdev->bd_inode->i_blkbits = blksize_bits(bsize);
} }
EXPORT_SYMBOL(bd_set_size); EXPORT_SYMBOL(bd_set_size);
...@@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1519,8 +1524,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
} }
} }
if (!ret) if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9); bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
set_init_blocksize(bdev);
}
/* /*
* If the device is invalidated, rescan partition * If the device is invalidated, rescan partition
...@@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1555,6 +1562,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear; goto out_clear;
} }
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9); bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
set_init_blocksize(bdev);
} }
if (bdev->bd_bdi == &noop_backing_dev_info) if (bdev->bd_bdi == &noop_backing_dev_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment