Commit 3831761e authored by Jens Axboe's avatar Jens Axboe Committed by Keith Busch

nvme: only reconfigure discard if necessary

Currently nvme reconfigures discard for every disk revalidation. This
is problematic because any O_WRONLY or O_RDWR open will trigger a
partition scan through udev/systemd, and we will reconfigure discard.
This blows away any user settings, like discard_max_bytes.

Only re-configure the user settable settings if we need to.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
[removed redundant queue flag setting]
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
parent 18119775
...@@ -1347,13 +1347,19 @@ static void nvme_set_chunk_size(struct nvme_ns *ns) ...@@ -1347,13 +1347,19 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
} }
static void nvme_config_discard(struct nvme_ctrl *ctrl, static void nvme_config_discard(struct nvme_ns *ns)
unsigned stream_alignment, struct request_queue *queue)
{ {
struct nvme_ctrl *ctrl = ns->ctrl;
struct request_queue *queue = ns->queue;
u32 size = queue_logical_block_size(queue); u32 size = queue_logical_block_size(queue);
if (stream_alignment) if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
size *= stream_alignment; blk_queue_flag_clear(QUEUE_FLAG_DISCARD, queue);
return;
}
if (ctrl->nr_streams && ns->sws && ns->sgs)
size *= ns->sws * ns->sgs;
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) < BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES); NVME_DSM_MAX_RANGES);
...@@ -1361,9 +1367,12 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, ...@@ -1361,9 +1367,12 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl,
queue->limits.discard_alignment = 0; queue->limits.discard_alignment = 0;
queue->limits.discard_granularity = size; queue->limits.discard_granularity = size;
/* If discard is already enabled, don't reset queue limits */
if (blk_queue_flag_test_and_set(QUEUE_FLAG_DISCARD, queue))
return;
blk_queue_max_discard_sectors(queue, UINT_MAX); blk_queue_max_discard_sectors(queue, UINT_MAX);
blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, queue);
if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
...@@ -1407,10 +1416,6 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1407,10 +1416,6 @@ static void nvme_update_disk_info(struct gendisk *disk,
{ {
sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9); sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
unsigned short bs = 1 << ns->lba_shift; unsigned short bs = 1 << ns->lba_shift;
unsigned stream_alignment = 0;
if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
stream_alignment = ns->sws * ns->sgs;
blk_mq_freeze_queue(disk->queue); blk_mq_freeze_queue(disk->queue);
blk_integrity_unregister(disk); blk_integrity_unregister(disk);
...@@ -1424,10 +1429,9 @@ static void nvme_update_disk_info(struct gendisk *disk, ...@@ -1424,10 +1429,9 @@ static void nvme_update_disk_info(struct gendisk *disk,
nvme_init_integrity(disk, ns->ms, ns->pi_type); nvme_init_integrity(disk, ns->ms, ns->pi_type);
if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
capacity = 0; capacity = 0;
set_capacity(disk, capacity);
if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) set_capacity(disk, capacity);
nvme_config_discard(ns->ctrl, stream_alignment, disk->queue); nvme_config_discard(ns);
blk_mq_unfreeze_queue(disk->queue); blk_mq_unfreeze_queue(disk->queue);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment