Commit 75f64f68 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A selection of fixes/changes that should make it into this series.
  This contains:

   - NVMe, two merges, containing:
        - pci-e, rdma, and fc fixes
        - Device quirks

   - Fix for a badblocks leak in null_blk

   - bcache fix from Rui Hua for a race condition regression where
     -EINTR was returned to upper layers that didn't expect it.

   - Regression fix for blktrace for a bug introduced in this series.

   - blktrace cleanup for cgroup id.

   - bdi registration error handling.

   - Small series with cleanups for blk-wbt.

   - Various little fixes for typos and the like.

  Nothing earth shattering, most important are the NVMe and bcache fixes"

* 'for-linus' of git://git.kernel.dk/linux-block: (34 commits)
  nvme-pci: fix NULL pointer dereference in nvme_free_host_mem()
  nvme-rdma: fix memory leak during queue allocation
  blktrace: fix trace mutex deadlock
  nvme-rdma: Use mr pool
  nvme-rdma: Check remotely invalidated rkey matches our expected rkey
  nvme-rdma: wait for local invalidation before completing a request
  nvme-rdma: don't complete requests before a send work request has completed
  nvme-rdma: don't suppress send completions
  bcache: check return value of register_shrinker
  bcache: recover data from backing when data is clean
  bcache: Fix building error on MIPS
  bcache: add a comment in journal bucket reading
  nvme-fc: don't use bit masks for set/test_bit() numbers
  blk-wbt: fix comments typo
  blk-wbt: move wbt_clear_stat to common place in wbt_done
  blk-sysfs: remove NULL pointer checking in queue_wb_lat_store
  blk-wbt: remove duplicated setting in wbt_init
  nvme-pci: add quirk for delay before CHK RDY for WDC SN200
  block: remove useless assignment in bio_split
  null_blk: fix dev->badblocks leak
  ...
parents df8ba95c ed565371
...@@ -1819,7 +1819,7 @@ EXPORT_SYMBOL(bio_endio); ...@@ -1819,7 +1819,7 @@ EXPORT_SYMBOL(bio_endio);
struct bio *bio_split(struct bio *bio, int sectors, struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs) gfp_t gfp, struct bio_set *bs)
{ {
struct bio *split = NULL; struct bio *split;
BUG_ON(sectors <= 0); BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio)); BUG_ON(sectors >= bio_sectors(bio));
......
...@@ -450,12 +450,9 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, ...@@ -450,12 +450,9 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
ret = wbt_init(q); ret = wbt_init(q);
if (ret) if (ret)
return ret; return ret;
rwb = q->rq_wb;
if (!rwb)
return -EINVAL;
} }
rwb = q->rq_wb;
if (val == -1) if (val == -1)
rwb->min_lat_nsec = wbt_default_latency_nsec(q); rwb->min_lat_nsec = wbt_default_latency_nsec(q);
else if (val >= 0) else if (val >= 0)
......
...@@ -178,12 +178,11 @@ void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat) ...@@ -178,12 +178,11 @@ void wbt_done(struct rq_wb *rwb, struct blk_issue_stat *stat)
if (wbt_is_read(stat)) if (wbt_is_read(stat))
wb_timestamp(rwb, &rwb->last_comp); wb_timestamp(rwb, &rwb->last_comp);
wbt_clear_state(stat);
} else { } else {
WARN_ON_ONCE(stat == rwb->sync_cookie); WARN_ON_ONCE(stat == rwb->sync_cookie);
__wbt_done(rwb, wbt_stat_to_mask(stat)); __wbt_done(rwb, wbt_stat_to_mask(stat));
wbt_clear_state(stat);
} }
wbt_clear_state(stat);
} }
/* /*
...@@ -482,7 +481,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw) ...@@ -482,7 +481,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
/* /*
* At this point we know it's a buffered write. If this is * At this point we know it's a buffered write. If this is
* kswapd trying to free memory, or REQ_SYNC is set, set, then * kswapd trying to free memory, or REQ_SYNC is set, then
* it's WB_SYNC_ALL writeback, and we'll use the max limit for * it's WB_SYNC_ALL writeback, and we'll use the max limit for
* that. If the write is marked as a background write, then use * that. If the write is marked as a background write, then use
* the idle limit, or go to normal if we haven't had competing * the idle limit, or go to normal if we haven't had competing
...@@ -723,8 +722,6 @@ int wbt_init(struct request_queue *q) ...@@ -723,8 +722,6 @@ int wbt_init(struct request_queue *q)
init_waitqueue_head(&rwb->rq_wait[i].wait); init_waitqueue_head(&rwb->rq_wait[i].wait);
} }
rwb->wc = 1;
rwb->queue_depth = RWB_DEF_DEPTH;
rwb->last_comp = rwb->last_issue = jiffies; rwb->last_comp = rwb->last_issue = jiffies;
rwb->queue = q; rwb->queue = q;
rwb->win_nsec = RWB_WINDOW_NSEC; rwb->win_nsec = RWB_WINDOW_NSEC;
......
...@@ -671,10 +671,13 @@ void device_add_disk(struct device *parent, struct gendisk *disk) ...@@ -671,10 +671,13 @@ void device_add_disk(struct device *parent, struct gendisk *disk)
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->flags |= GENHD_FL_NO_PART_SCAN; disk->flags |= GENHD_FL_NO_PART_SCAN;
} else { } else {
int ret;
/* Register BDI before referencing it from bdev */ /* Register BDI before referencing it from bdev */
disk_to_dev(disk)->devt = devt; disk_to_dev(disk)->devt = devt;
bdi_register_owner(disk->queue->backing_dev_info, ret = bdi_register_owner(disk->queue->backing_dev_info,
disk_to_dev(disk)); disk_to_dev(disk));
WARN_ON(ret);
blk_register_region(disk_devt(disk), disk->minors, NULL, blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk); exact_match, exact_lock, disk);
} }
...@@ -1389,7 +1392,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id) ...@@ -1389,7 +1392,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
if (minors > DISK_MAX_PARTS) { if (minors > DISK_MAX_PARTS) {
printk(KERN_ERR printk(KERN_ERR
"block: can't allocated more than %d partitions\n", "block: can't allocate more than %d partitions\n",
DISK_MAX_PARTS); DISK_MAX_PARTS);
minors = DISK_MAX_PARTS; minors = DISK_MAX_PARTS;
} }
......
...@@ -471,7 +471,6 @@ static void nullb_device_release(struct config_item *item) ...@@ -471,7 +471,6 @@ static void nullb_device_release(struct config_item *item)
{ {
struct nullb_device *dev = to_nullb_device(item); struct nullb_device *dev = to_nullb_device(item);
badblocks_exit(&dev->badblocks);
null_free_device_storage(dev, false); null_free_device_storage(dev, false);
null_free_dev(dev); null_free_dev(dev);
} }
...@@ -582,6 +581,10 @@ static struct nullb_device *null_alloc_dev(void) ...@@ -582,6 +581,10 @@ static struct nullb_device *null_alloc_dev(void)
static void null_free_dev(struct nullb_device *dev) static void null_free_dev(struct nullb_device *dev)
{ {
if (!dev)
return;
badblocks_exit(&dev->badblocks);
kfree(dev); kfree(dev);
} }
......
...@@ -490,7 +490,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, ...@@ -490,7 +490,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
if (b == -1) if (b == -1)
goto err; goto err;
k->ptr[i] = PTR(ca->buckets[b].gen, k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b), bucket_to_sector(c, b),
ca->sb.nr_this_dev); ca->sb.nr_this_dev);
......
...@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c) ...@@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->shrink.scan_objects = bch_mca_scan; c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4; c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2; c->shrink.batch = c->btree_pages * 2;
register_shrinker(&c->shrink);
if (register_shrinker(&c->shrink))
pr_warn("bcache: %s: could not register shrinker",
__func__);
return 0; return 0;
} }
......
...@@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey ...@@ -585,7 +585,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
return false; return false;
for (i = 0; i < KEY_PTRS(l); i++) for (i = 0; i < KEY_PTRS(l); i++)
if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] || if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i)) PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
return false; return false;
......
...@@ -170,6 +170,11 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) ...@@ -170,6 +170,11 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
* find a sequence of buckets with valid journal entries * find a sequence of buckets with valid journal entries
*/ */
for (i = 0; i < ca->sb.njournal_buckets; i++) { for (i = 0; i < ca->sb.njournal_buckets; i++) {
/*
* We must try the index l with ZERO first for
* correctness due to the scenario that the journal
* bucket is circular buffer which might have wrapped
*/
l = (i * 2654435769U) % ca->sb.njournal_buckets; l = (i * 2654435769U) % ca->sb.njournal_buckets;
if (test_bit(l, bitmap)) if (test_bit(l, bitmap))
...@@ -507,7 +512,7 @@ static void journal_reclaim(struct cache_set *c) ...@@ -507,7 +512,7 @@ static void journal_reclaim(struct cache_set *c)
continue; continue;
ja->cur_idx = next; ja->cur_idx = next;
k->ptr[n++] = PTR(0, k->ptr[n++] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]), bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev); ca->sb.nr_this_dev);
} }
......
...@@ -708,16 +708,15 @@ static void cached_dev_read_error(struct closure *cl) ...@@ -708,16 +708,15 @@ static void cached_dev_read_error(struct closure *cl)
{ {
struct search *s = container_of(cl, struct search, cl); struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio; struct bio *bio = &s->bio.bio;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/* /*
* If cache device is dirty (dc->has_dirty is non-zero), then * If read request hit dirty data (s->read_dirty_data is true),
* recovery a failed read request from cached device may get a * then recovery a failed read request from cached device may
* stale data back. So read failure recovery is only permitted * get a stale data back. So read failure recovery is only
* when cache device is clean. * permitted when read request hit clean data in cache device,
* or when cache read race happened.
*/ */
if (s->recoverable && if (s->recoverable && !s->read_dirty_data) {
(dc && !atomic_read(&dc->has_dirty))) {
/* Retry from the backing device: */ /* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio); trace_bcache_read_retry(s->orig_bio);
......
...@@ -1449,19 +1449,19 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10, ...@@ -1449,19 +1449,19 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
int srcu_idx, ret; int srcu_idx, ret;
u8 data[16] = { 0, }; u8 data[16] = { 0, };
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
if (unlikely(!ns))
return -EWOULDBLOCK;
put_unaligned_le64(key, &data[0]); put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]); put_unaligned_le64(sa_key, &data[8]);
memset(&c, 0, sizeof(c)); memset(&c, 0, sizeof(c));
c.common.opcode = op; c.common.opcode = op;
c.common.nsid = cpu_to_le32(head->ns_id); c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10[0] = cpu_to_le32(cdw10); c.common.cdw10[0] = cpu_to_le32(cdw10);
ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx); ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
if (unlikely(!ns))
ret = -EWOULDBLOCK;
else
ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
nvme_put_ns_from_disk(head, srcu_idx); nvme_put_ns_from_disk(head, srcu_idx);
return ret; return ret;
} }
...@@ -2961,8 +2961,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ...@@ -2961,8 +2961,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
static void nvme_ns_remove(struct nvme_ns *ns) static void nvme_ns_remove(struct nvme_ns *ns)
{ {
struct nvme_ns_head *head = ns->head;
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return; return;
...@@ -2980,15 +2978,14 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -2980,15 +2978,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
mutex_lock(&ns->ctrl->subsys->lock); mutex_lock(&ns->ctrl->subsys->lock);
nvme_mpath_clear_current_path(ns); nvme_mpath_clear_current_path(ns);
if (head) list_del_rcu(&ns->siblings);
list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock); mutex_unlock(&ns->ctrl->subsys->lock);
mutex_lock(&ns->ctrl->namespaces_mutex); mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list); list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex); mutex_unlock(&ns->ctrl->namespaces_mutex);
synchronize_srcu(&head->srcu); synchronize_srcu(&ns->head->srcu);
nvme_put_ns(ns); nvme_put_ns(ns);
} }
......
...@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts); ...@@ -156,4 +156,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
struct request *rq)
{
struct nvme_command *cmd = nvme_req(rq)->cmd;
/*
* We cannot accept any other command until the connect command has
* completed, so only allow connect to pass.
*/
if (!blk_rq_is_passthrough(rq) ||
cmd->common.opcode != nvme_fabrics_command ||
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
/*
* Reconnecting state means transport disruption, which can take
* a long time and even might fail permanently, fail fast to
* give upper layers a chance to failover.
* Deleting state means that the ctrl will never accept commands
* again, fail it permanently.
*/
if (ctrl->state == NVME_CTRL_RECONNECTING ||
ctrl->state == NVME_CTRL_DELETING) {
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
}
return BLK_STS_RESOURCE; /* try again later */
}
return BLK_STS_OK;
}
#endif /* _NVME_FABRICS_H */ #endif /* _NVME_FABRICS_H */
...@@ -31,7 +31,8 @@ ...@@ -31,7 +31,8 @@
enum nvme_fc_queue_flags { enum nvme_fc_queue_flags {
NVME_FC_Q_CONNECTED = (1 << 0), NVME_FC_Q_CONNECTED = 0,
NVME_FC_Q_LIVE,
}; };
#define NVMEFC_QUEUE_DELAY 3 /* ms units */ #define NVMEFC_QUEUE_DELAY 3 /* ms units */
...@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) ...@@ -1927,6 +1928,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
return; return;
clear_bit(NVME_FC_Q_LIVE, &queue->flags);
/* /*
* Current implementation never disconnects a single queue. * Current implementation never disconnects a single queue.
* It always terminates a whole association. So there is never * It always terminates a whole association. So there is never
...@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue) ...@@ -1934,7 +1936,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
*/ */
queue->connection_id = 0; queue->connection_id = 0;
clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
} }
static void static void
...@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) ...@@ -2013,6 +2014,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret) if (ret)
break; break;
set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
} }
return ret; return ret;
...@@ -2320,6 +2323,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, ...@@ -2320,6 +2323,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
return BLK_STS_RESOURCE; return BLK_STS_RESOURCE;
} }
static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
return BLK_STS_OK;
}
static blk_status_t static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
...@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -2335,6 +2346,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
u32 data_len; u32 data_len;
blk_status_t ret; blk_status_t ret;
ret = nvme_fc_is_ready(queue, rq);
if (unlikely(ret))
return ret;
ret = nvme_setup_cmd(ns, rq, sqe); ret = nvme_setup_cmd(ns, rq, sqe);
if (ret) if (ret)
return ret; return ret;
...@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ...@@ -2727,6 +2742,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret) if (ret)
goto out_disconnect_admin_queue; goto out_disconnect_admin_queue;
set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
/* /*
* Check controller capabilities * Check controller capabilities
* *
......
...@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, ...@@ -131,7 +131,7 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
bio->bi_opf |= REQ_NVME_MPATH; bio->bi_opf |= REQ_NVME_MPATH;
ret = direct_make_request(bio); ret = direct_make_request(bio);
} else if (!list_empty_careful(&head->list)) { } else if (!list_empty_careful(&head->list)) {
dev_warn_ratelimited(dev, "no path available - requeing I/O\n"); dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
spin_lock_irq(&head->requeue_lock); spin_lock_irq(&head->requeue_lock);
bio_list_add(&head->requeue_list, bio); bio_list_add(&head->requeue_list, bio);
......
...@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req) ...@@ -114,7 +114,7 @@ static inline struct nvme_request *nvme_req(struct request *req)
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
* found empirically. * found empirically.
*/ */
#define NVME_QUIRK_DELAY_AMOUNT 2000 #define NVME_QUIRK_DELAY_AMOUNT 2300
enum nvme_ctrl_state { enum nvme_ctrl_state {
NVME_CTRL_NEW, NVME_CTRL_NEW,
......
...@@ -1759,6 +1759,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev) ...@@ -1759,6 +1759,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL; dev->host_mem_descs = NULL;
dev->nr_host_mem_descs = 0;
} }
static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
...@@ -1787,7 +1788,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, ...@@ -1787,7 +1788,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (!bufs) if (!bufs)
goto out_free_descs; goto out_free_descs;
for (size = 0; size < preferred; size += len) { for (size = 0; size < preferred && i < max_entries; size += len) {
dma_addr_t dma_addr; dma_addr_t dma_addr;
len = min_t(u64, chunk_size, preferred - size); len = min_t(u64, chunk_size, preferred - size);
...@@ -2428,7 +2429,7 @@ static int nvme_dev_map(struct nvme_dev *dev) ...@@ -2428,7 +2429,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
return -ENODEV; return -ENODEV;
} }
static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
{ {
if (pdev->vendor == 0x144d && pdev->device == 0xa802) { if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
/* /*
...@@ -2443,6 +2444,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev) ...@@ -2443,6 +2444,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
(dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
return NVME_QUIRK_NO_DEEPEST_PS; return NVME_QUIRK_NO_DEEPEST_PS;
} else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
/*
* Samsung SSD 960 EVO drops off the PCIe bus after system
* suspend on a Ryzen board, ASUS PRIME B350M-A.
*/
if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
return NVME_QUIRK_NO_APST;
} }
return 0; return 0;
...@@ -2482,7 +2491,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2482,7 +2491,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result) if (result)
goto unmap; goto unmap;
quirks |= check_dell_samsung_bug(pdev); quirks |= check_vendor_combination_bug(pdev);
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks); quirks);
...@@ -2665,6 +2674,8 @@ static const struct pci_device_id nvme_id_table[] = { ...@@ -2665,6 +2674,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_IDENTIFY_CNS, }, .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
......
This diff is collapsed.
...@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue, ...@@ -533,15 +533,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
/* release the queue lookup reference on the completed IO */
nvmet_fc_tgt_q_put(queue);
spin_lock_irqsave(&queue->qlock, flags); spin_lock_irqsave(&queue->qlock, flags);
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
struct nvmet_fc_defer_fcp_req, req_list); struct nvmet_fc_defer_fcp_req, req_list);
if (!deferfcp) { if (!deferfcp) {
list_add_tail(&fod->fcp_list, &fod->queue->fod_list); list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
spin_unlock_irqrestore(&queue->qlock, flags); spin_unlock_irqrestore(&queue->qlock, flags);
/* Release reference taken at queue lookup and fod allocation */
nvmet_fc_tgt_q_put(queue);
return; return;
} }
...@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) ...@@ -760,6 +760,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
deferfcp->fcp_req); deferfcp->fcp_req);
/* release the queue lookup reference */
nvmet_fc_tgt_q_put(queue);
kfree(deferfcp); kfree(deferfcp);
spin_lock_irqsave(&queue->qlock, flags); spin_lock_irqsave(&queue->qlock, flags);
......
...@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) ...@@ -52,10 +52,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
return container_of(ctrl, struct nvme_loop_ctrl, ctrl); return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
} }
enum nvme_loop_queue_flags {
NVME_LOOP_Q_LIVE = 0,
};
struct nvme_loop_queue { struct nvme_loop_queue {
struct nvmet_cq nvme_cq; struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq; struct nvmet_sq nvme_sq;
struct nvme_loop_ctrl *ctrl; struct nvme_loop_ctrl *ctrl;
unsigned long flags;
}; };
static struct nvmet_port *nvmet_loop_port; static struct nvmet_port *nvmet_loop_port;
...@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved) ...@@ -144,6 +149,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
return BLK_EH_HANDLED; return BLK_EH_HANDLED;
} }
static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
return BLK_STS_OK;
}
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
...@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -153,6 +166,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret; blk_status_t ret;
ret = nvme_loop_is_ready(queue, req);
if (unlikely(ret))
return ret;
ret = nvme_setup_cmd(ns, req, &iod->cmd); ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret) if (ret)
return ret; return ret;
...@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { ...@@ -267,6 +284,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{ {
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set); blk_mq_free_tag_set(&ctrl->admin_tag_set);
...@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -297,8 +315,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
{ {
int i; int i;
for (i = 1; i < ctrl->ctrl.queue_count; i++) for (i = 1; i < ctrl->ctrl.queue_count; i++) {
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
}
} }
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
...@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) ...@@ -338,6 +358,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
ret = nvmf_connect_io_queue(&ctrl->ctrl, i); ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret) if (ret)
return ret; return ret;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
} }
return 0; return 0;
...@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) ...@@ -380,6 +401,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
if (error) if (error)
goto out_cleanup_queue; goto out_cleanup_queue;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
if (error) { if (error) {
dev_err(ctrl->ctrl.device, dev_err(ctrl->ctrl.device,
......
...@@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN, 0, 8) ...@@ -91,7 +91,7 @@ PTR_FIELD(PTR_GEN, 0, 8)
#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1) #define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
#define PTR(gen, offset, dev) \ #define MAKE_PTR(gen, offset, dev) \
((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen) ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
/* Bkey utility code */ /* Bkey utility code */
......
...@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, ...@@ -591,7 +591,7 @@ static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
return ret; return ret;
if (copy_to_user(arg, &buts, sizeof(buts))) { if (copy_to_user(arg, &buts, sizeof(buts))) {
blk_trace_remove(q); __blk_trace_remove(q);
return -EFAULT; return -EFAULT;
} }
return 0; return 0;
...@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, ...@@ -637,7 +637,7 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
return ret; return ret;
if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
blk_trace_remove(q); __blk_trace_remove(q);
return -EFAULT; return -EFAULT;
} }
...@@ -872,7 +872,7 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq, ...@@ -872,7 +872,7 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
* *
**/ **/
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what, int error, union kernfs_node_id *cgid) u32 what, int error)
{ {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt = q->blk_trace;
...@@ -880,22 +880,21 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, ...@@ -880,22 +880,21 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
return; return;
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid); bio_op(bio), bio->bi_opf, what, error, 0, NULL,
blk_trace_bio_get_cgid(q, bio));
} }
static void blk_add_trace_bio_bounce(void *ignore, static void blk_add_trace_bio_bounce(void *ignore,
struct request_queue *q, struct bio *bio) struct request_queue *q, struct bio *bio)
{ {
blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0, blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
blk_trace_bio_get_cgid(q, bio));
} }
static void blk_add_trace_bio_complete(void *ignore, static void blk_add_trace_bio_complete(void *ignore,
struct request_queue *q, struct bio *bio, struct request_queue *q, struct bio *bio,
int error) int error)
{ {
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error, blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
blk_trace_bio_get_cgid(q, bio));
} }
static void blk_add_trace_bio_backmerge(void *ignore, static void blk_add_trace_bio_backmerge(void *ignore,
...@@ -903,8 +902,7 @@ static void blk_add_trace_bio_backmerge(void *ignore, ...@@ -903,8 +902,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
struct request *rq, struct request *rq,
struct bio *bio) struct bio *bio)
{ {
blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0, blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
blk_trace_bio_get_cgid(q, bio));
} }
static void blk_add_trace_bio_frontmerge(void *ignore, static void blk_add_trace_bio_frontmerge(void *ignore,
...@@ -912,15 +910,13 @@ static void blk_add_trace_bio_frontmerge(void *ignore, ...@@ -912,15 +910,13 @@ static void blk_add_trace_bio_frontmerge(void *ignore,
struct request *rq, struct request *rq,
struct bio *bio) struct bio *bio)
{ {
blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0, blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
blk_trace_bio_get_cgid(q, bio));
} }
static void blk_add_trace_bio_queue(void *ignore, static void blk_add_trace_bio_queue(void *ignore,
struct request_queue *q, struct bio *bio) struct request_queue *q, struct bio *bio)
{ {
blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0, blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
blk_trace_bio_get_cgid(q, bio));
} }
static void blk_add_trace_getrq(void *ignore, static void blk_add_trace_getrq(void *ignore,
...@@ -928,8 +924,7 @@ static void blk_add_trace_getrq(void *ignore, ...@@ -928,8 +924,7 @@ static void blk_add_trace_getrq(void *ignore,
struct bio *bio, int rw) struct bio *bio, int rw)
{ {
if (bio) if (bio)
blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0, blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
blk_trace_bio_get_cgid(q, bio));
else { else {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt = q->blk_trace;
...@@ -945,8 +940,7 @@ static void blk_add_trace_sleeprq(void *ignore, ...@@ -945,8 +940,7 @@ static void blk_add_trace_sleeprq(void *ignore,
struct bio *bio, int rw) struct bio *bio, int rw)
{ {
if (bio) if (bio)
blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0, blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
blk_trace_bio_get_cgid(q, bio));
else { else {
struct blk_trace *bt = q->blk_trace; struct blk_trace *bt = q->blk_trace;
......
...@@ -113,11 +113,23 @@ static const struct file_operations bdi_debug_stats_fops = { ...@@ -113,11 +113,23 @@ static const struct file_operations bdi_debug_stats_fops = {
.release = single_release, .release = single_release,
}; };
static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
{ {
if (!bdi_debug_root)
return -ENOMEM;
bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
if (!bdi->debug_dir)
return -ENOMEM;
bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
bdi, &bdi_debug_stats_fops); bdi, &bdi_debug_stats_fops);
if (!bdi->debug_stats) {
debugfs_remove(bdi->debug_dir);
return -ENOMEM;
}
return 0;
} }
static void bdi_debug_unregister(struct backing_dev_info *bdi) static void bdi_debug_unregister(struct backing_dev_info *bdi)
...@@ -129,9 +141,10 @@ static void bdi_debug_unregister(struct backing_dev_info *bdi) ...@@ -129,9 +141,10 @@ static void bdi_debug_unregister(struct backing_dev_info *bdi)
static inline void bdi_debug_init(void) static inline void bdi_debug_init(void)
{ {
} }
static inline void bdi_debug_register(struct backing_dev_info *bdi, static inline int bdi_debug_register(struct backing_dev_info *bdi,
const char *name) const char *name)
{ {
return 0;
} }
static inline void bdi_debug_unregister(struct backing_dev_info *bdi) static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
{ {
...@@ -869,10 +882,13 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) ...@@ -869,10 +882,13 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
if (IS_ERR(dev)) if (IS_ERR(dev))
return PTR_ERR(dev); return PTR_ERR(dev);
if (bdi_debug_register(bdi, dev_name(dev))) {
device_destroy(bdi_class, dev->devt);
return -ENOMEM;
}
cgwb_bdi_register(bdi); cgwb_bdi_register(bdi);
bdi->dev = dev; bdi->dev = dev;
bdi_debug_register(bdi, dev_name(dev));
set_bit(WB_registered, &bdi->wb.state); set_bit(WB_registered, &bdi->wb.state);
spin_lock_bh(&bdi_lock); spin_lock_bh(&bdi_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment