Commit b277da0a authored by Mike Snitzer's avatar Mike Snitzer Committed by Jens Axboe

block: disable entropy contributions for nonrot devices

Clear QUEUE_FLAG_ADD_RANDOM in all block drivers that set
QUEUE_FLAG_NONROT.

Historically, all block devices have automatically made entropy
contributions.  But as previously stated in commit e2e1a148 ("block: add
sysfs knob for turning off disk entropy contributions"):
    - On SSD disks, the completion times aren't as random as they
      are for rotational drives. So it's questionable whether they
      should contribute to the random pool in the first place.
    - Calling add_disk_randomness() has a lot of overhead.

There are more reliable sources for randomness than non-rotational block
devices.  From a security perspective it is better to err on the side of
caution than to allow entropy contributions from unreliable "random"
sources.
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 7b7b7f7e
...@@ -3952,6 +3952,7 @@ static int mtip_block_initialize(struct driver_data *dd) ...@@ -3952,6 +3952,7 @@ static int mtip_block_initialize(struct driver_data *dd)
/* Set device limits. */ /* Set device limits. */
set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
clear_bit(QUEUE_FLAG_ADD_RANDOM, &dd->queue->queue_flags);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG); blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096); blk_queue_physical_block_size(dd->queue, 4096);
blk_queue_max_hw_sectors(dd->queue, 0xffff); blk_queue_max_hw_sectors(dd->queue, 0xffff);
......
...@@ -847,6 +847,7 @@ static int __init nbd_init(void) ...@@ -847,6 +847,7 @@ static int __init nbd_init(void)
* Tell the block layer that we are not a rotational device * Tell the block layer that we are not a rotational device
*/ */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512; disk->queue->limits.discard_granularity = 512;
disk->queue->limits.max_discard_sectors = UINT_MAX; disk->queue->limits.max_discard_sectors = UINT_MAX;
disk->queue->limits.discard_zeroes_data = 0; disk->queue->limits.discard_zeroes_data = 0;
......
...@@ -507,6 +507,7 @@ static int null_add_dev(void) ...@@ -507,6 +507,7 @@ static int null_add_dev(void)
nullb->q->queuedata = nullb; nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
disk = nullb->disk = alloc_disk_node(1, home_node); disk = nullb->disk = alloc_disk_node(1, home_node);
if (!disk) if (!disk)
......
...@@ -1916,6 +1916,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, ...@@ -1916,6 +1916,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, ns->queue);
blk_queue_make_request(ns->queue, nvme_make_request); blk_queue_make_request(ns->queue, nvme_make_request);
ns->dev = dev; ns->dev = dev;
ns->queue->queuedata = ns; ns->queue->queuedata = ns;
......
...@@ -307,6 +307,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) ...@@ -307,6 +307,7 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue);
if (rsxx_discard_supported(card)) { if (rsxx_discard_supported(card)) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
blk_queue_max_discard_sectors(card->queue, blk_queue_max_discard_sectors(card->queue,
......
...@@ -4426,6 +4426,7 @@ static int skd_cons_disk(struct skd_device *skdev) ...@@ -4426,6 +4426,7 @@ static int skd_cons_disk(struct skd_device *skdev)
q->limits.discard_zeroes_data = 1; q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
spin_lock_irqsave(&skdev->lock, flags); spin_lock_irqsave(&skdev->lock, flags);
pr_debug("%s:%s:%d stopping %s queue\n", pr_debug("%s:%s:%d stopping %s queue\n",
......
...@@ -925,6 +925,7 @@ static int create_device(struct zram *zram, int device_id) ...@@ -925,6 +925,7 @@ static int create_device(struct zram *zram, int device_id)
set_capacity(zram->disk, 0); set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */ /* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/* /*
* To ensure that we always get PAGE_SIZE aligned * To ensure that we always get PAGE_SIZE aligned
* and n*PAGE_SIZED sized I/O requests. * and n*PAGE_SIZED sized I/O requests.
......
...@@ -685,8 +685,10 @@ static void ide_disk_setup(ide_drive_t *drive) ...@@ -685,8 +685,10 @@ static void ide_disk_setup(ide_drive_t *drive)
printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
queue_max_sectors(q) / 2); queue_max_sectors(q) / 2);
if (ata_id_is_ssd(id)) if (ata_id_is_ssd(id)) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
}
/* calculate drive capacity, and select LBA if possible */ /* calculate drive capacity, and select LBA if possible */
ide_disk_get_capacity(drive); ide_disk_get_capacity(drive);
......
...@@ -842,6 +842,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, ...@@ -842,6 +842,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
q->limits.logical_block_size = block_size; q->limits.logical_block_size = block_size;
q->limits.physical_block_size = block_size; q->limits.physical_block_size = block_size;
set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
blk_queue_flush(q, REQ_FLUSH|REQ_FUA); blk_queue_flush(q, REQ_FLUSH|REQ_FUA);
......
...@@ -210,6 +210,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -210,6 +210,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card)) if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card); mmc_queue_setup_discard(mq->queue, card);
......
...@@ -417,6 +417,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) ...@@ -417,6 +417,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
blk_queue_logical_block_size(new->rq, tr->blksize); blk_queue_logical_block_size(new->rq, tr->blksize);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);
if (tr->discard) { if (tr->discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
......
...@@ -386,6 +386,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) ...@@ -386,6 +386,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
blk_queue_max_segments(rq, nr_max_blk); blk_queue_max_segments(rq, nr_max_blk);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
scm_blk_dev_cluster_setup(bdev); scm_blk_dev_cluster_setup(bdev);
bdev->gendisk = alloc_disk(SCM_NR_PARTS); bdev->gendisk = alloc_disk(SCM_NR_PARTS);
......
...@@ -346,6 +346,7 @@ static int __init xpram_setup_blkdev(void) ...@@ -346,6 +346,7 @@ static int __init xpram_setup_blkdev(void)
goto out; goto out;
} }
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]);
blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_make_request(xpram_queues[i], xpram_make_request);
blk_queue_logical_block_size(xpram_queues[i], 4096); blk_queue_logical_block_size(xpram_queues[i], 4096);
} }
......
...@@ -2660,8 +2660,10 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) ...@@ -2660,8 +2660,10 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
rot = get_unaligned_be16(&buffer[4]); rot = get_unaligned_be16(&buffer[4]);
if (rot == 1) if (rot == 1) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, sdkp->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, sdkp->disk->queue);
}
out: out:
kfree(buffer); kfree(buffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment