Commit 5fc6b075 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.10-2020-10-30' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - null_blk zone fixes (Damien, Kanchan)

 - NVMe pull request from Christoph:
       - improve zone revalidation (Keith Busch)
       - gracefully handle zero length messages in nvme-rdma (zhenwei pi)
       - nvme-fc error handling fixes (James Smart)
       - nvmet tracing NULL pointer dereference fix (Chaitanya Kulkarni)"

 - xsysace platform fixes (Andy)

 - scatterlist type cleanup (David)

 - blk-cgroup memory fixes (Gabriel)

 - nbd block size update fix (Ming)

 - Flush completion state fix (Ming)

 - bio_add_hw_page() iteration fix (Naohiro)

* tag 'block-5.10-2020-10-30' of git://git.kernel.dk/linux-block:
  blk-mq: mark flush request as IDLE in flush_end_io()
  lib/scatterlist: use consistent sg_copy_buffer() return type
  xsysace: use platform_get_resource() and platform_get_irq_optional()
  null_blk: Fix locking in zoned mode
  null_blk: Fix zone reset all tracing
  nbd: don't update block size after device is started
  block: advance iov_iter on bio_add_hw_page failure
  null_blk: synchronization fix for zoned device
  nvmet: fix a NULL pointer dereference when tracing the flush command
  nvme-fc: remove nvme_fc_terminate_io()
  nvme-fc: eliminate terminate_io use by nvme_fc_error_recovery
  nvme-fc: remove err_work work item
  nvme-fc: track error_recovery while connecting
  nvme-rdma: handle unexpected nvme completion data length
  nvme: ignore zone validate errors on subsequent scans
  blk-cgroup: Pre-allocate tree node on blkg_conf_prep
  blk-cgroup: Fix memleak on error path
parents cf9446cc 65ff5cd0
...@@ -1044,6 +1044,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -1044,6 +1044,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
ssize_t size, left; ssize_t size, left;
unsigned len, i; unsigned len, i;
size_t offset; size_t offset;
int ret = 0;
if (WARN_ON_ONCE(!max_append_sectors)) if (WARN_ON_ONCE(!max_append_sectors))
return 0; return 0;
...@@ -1066,15 +1067,17 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) ...@@ -1066,15 +1067,17 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
len = min_t(size_t, PAGE_SIZE - offset, left); len = min_t(size_t, PAGE_SIZE - offset, left);
if (bio_add_hw_page(q, bio, page, len, offset, if (bio_add_hw_page(q, bio, page, len, offset,
max_append_sectors, &same_page) != len) max_append_sectors, &same_page) != len) {
return -EINVAL; ret = -EINVAL;
break;
}
if (same_page) if (same_page)
put_page(page); put_page(page);
offset = 0; offset = 0;
} }
iov_iter_advance(iter, size); iov_iter_advance(iter, size - left);
return 0; return ret;
} }
/** /**
......
...@@ -657,13 +657,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ...@@ -657,13 +657,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto fail; goto fail;
} }
if (radix_tree_preload(GFP_KERNEL)) {
blkg_free(new_blkg);
ret = -ENOMEM;
goto fail;
}
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q); blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg); ret = PTR_ERR(blkg);
goto fail_unlock; blkg_free(new_blkg);
goto fail_preloaded;
} }
if (blkg) { if (blkg) {
...@@ -672,10 +679,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ...@@ -672,10 +679,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg = blkg_create(pos, q, new_blkg); blkg = blkg_create(pos, q, new_blkg);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg); ret = PTR_ERR(blkg);
goto fail_unlock; goto fail_preloaded;
} }
} }
radix_tree_preload_end();
if (pos == blkcg) if (pos == blkcg)
goto success; goto success;
} }
...@@ -685,6 +694,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, ...@@ -685,6 +694,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
ctx->body = input; ctx->body = input;
return 0; return 0;
fail_preloaded:
radix_tree_preload_end();
fail_unlock: fail_unlock:
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -225,6 +225,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) ...@@ -225,6 +225,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */ /* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags); spin_lock_irqsave(&fq->mq_flush_lock, flags);
WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
if (!refcount_dec_and_test(&flush_rq->ref)) { if (!refcount_dec_and_test(&flush_rq->ref)) {
fq->rq_status = error; fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
......
...@@ -296,7 +296,7 @@ static void nbd_size_clear(struct nbd_device *nbd) ...@@ -296,7 +296,7 @@ static void nbd_size_clear(struct nbd_device *nbd)
} }
} }
static void nbd_size_update(struct nbd_device *nbd) static void nbd_size_update(struct nbd_device *nbd, bool start)
{ {
struct nbd_config *config = nbd->config; struct nbd_config *config = nbd->config;
struct block_device *bdev = bdget_disk(nbd->disk, 0); struct block_device *bdev = bdget_disk(nbd->disk, 0);
...@@ -313,7 +313,8 @@ static void nbd_size_update(struct nbd_device *nbd) ...@@ -313,7 +313,8 @@ static void nbd_size_update(struct nbd_device *nbd)
if (bdev) { if (bdev) {
if (bdev->bd_disk) { if (bdev->bd_disk) {
bd_set_nr_sectors(bdev, nr_sectors); bd_set_nr_sectors(bdev, nr_sectors);
set_blocksize(bdev, config->blksize); if (start)
set_blocksize(bdev, config->blksize);
} else } else
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
bdput(bdev); bdput(bdev);
...@@ -328,7 +329,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, ...@@ -328,7 +329,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
config->blksize = blocksize; config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks; config->bytesize = blocksize * nr_blocks;
if (nbd->task_recv != NULL) if (nbd->task_recv != NULL)
nbd_size_update(nbd); nbd_size_update(nbd, false);
} }
static void nbd_complete_rq(struct request *req) static void nbd_complete_rq(struct request *req)
...@@ -1308,7 +1309,7 @@ static int nbd_start_device(struct nbd_device *nbd) ...@@ -1308,7 +1309,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i; args->index = i;
queue_work(nbd->recv_workq, &args->work); queue_work(nbd->recv_workq, &args->work);
} }
nbd_size_update(nbd); nbd_size_update(nbd, true);
return error; return error;
} }
......
...@@ -47,6 +47,8 @@ struct nullb_device { ...@@ -47,6 +47,8 @@ struct nullb_device {
unsigned int nr_zones_closed; unsigned int nr_zones_closed;
struct blk_zone *zones; struct blk_zone *zones;
sector_t zone_size_sects; sector_t zone_size_sects;
spinlock_t zone_dev_lock;
unsigned long *zone_locks;
unsigned long size; /* device size in MB */ unsigned long size; /* device size in MB */
unsigned long completion_nsec; /* time in ns to complete a request */ unsigned long completion_nsec; /* time in ns to complete a request */
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/bitmap.h>
#include "null_blk.h" #include "null_blk.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
...@@ -45,6 +46,13 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) ...@@ -45,6 +46,13 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
if (!dev->zones) if (!dev->zones)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&dev->zone_dev_lock);
dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL);
if (!dev->zone_locks) {
kvfree(dev->zones);
return -ENOMEM;
}
if (dev->zone_nr_conv >= dev->nr_zones) { if (dev->zone_nr_conv >= dev->nr_zones) {
dev->zone_nr_conv = dev->nr_zones - 1; dev->zone_nr_conv = dev->nr_zones - 1;
pr_info("changed the number of conventional zones to %u", pr_info("changed the number of conventional zones to %u",
...@@ -123,15 +131,26 @@ int null_register_zoned_dev(struct nullb *nullb) ...@@ -123,15 +131,26 @@ int null_register_zoned_dev(struct nullb *nullb)
void null_free_zoned_dev(struct nullb_device *dev) void null_free_zoned_dev(struct nullb_device *dev)
{ {
bitmap_free(dev->zone_locks);
kvfree(dev->zones); kvfree(dev->zones);
} }
static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
{
wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE);
}
static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno)
{
clear_and_wake_up_bit(zno, dev->zone_locks);
}
int null_report_zones(struct gendisk *disk, sector_t sector, int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data) unsigned int nr_zones, report_zones_cb cb, void *data)
{ {
struct nullb *nullb = disk->private_data; struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev; struct nullb_device *dev = nullb->dev;
unsigned int first_zone, i; unsigned int first_zone, i, zno;
struct blk_zone zone; struct blk_zone zone;
int error; int error;
...@@ -142,15 +161,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector, ...@@ -142,15 +161,18 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
nr_zones = min(nr_zones, dev->nr_zones - first_zone); nr_zones = min(nr_zones, dev->nr_zones - first_zone);
trace_nullb_report_zones(nullb, nr_zones); trace_nullb_report_zones(nullb, nr_zones);
for (i = 0; i < nr_zones; i++) { zno = first_zone;
for (i = 0; i < nr_zones; i++, zno++) {
/* /*
* Stacked DM target drivers will remap the zone information by * Stacked DM target drivers will remap the zone information by
* modifying the zone information passed to the report callback. * modifying the zone information passed to the report callback.
* So use a local copy to avoid corruption of the device zone * So use a local copy to avoid corruption of the device zone
* array. * array.
*/ */
memcpy(&zone, &dev->zones[first_zone + i], null_lock_zone(dev, zno);
sizeof(struct blk_zone)); memcpy(&zone, &dev->zones[zno], sizeof(struct blk_zone));
null_unlock_zone(dev, zno);
error = cb(&zone, i, data); error = cb(&zone, i, data);
if (error) if (error)
return error; return error;
...@@ -159,6 +181,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector, ...@@ -159,6 +181,10 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
return nr_zones; return nr_zones;
} }
/*
* This is called in the case of memory backing from null_process_cmd()
* with the target zone already locked.
*/
size_t null_zone_valid_read_len(struct nullb *nullb, size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len) sector_t sector, unsigned int len)
{ {
...@@ -295,22 +321,27 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -295,22 +321,27 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
null_lock_zone(dev, zno);
spin_lock(&dev->zone_dev_lock);
switch (zone->cond) { switch (zone->cond) {
case BLK_ZONE_COND_FULL: case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */ /* Cannot write to a full zone */
return BLK_STS_IOERR; ret = BLK_STS_IOERR;
goto unlock;
case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_CLOSED: case BLK_ZONE_COND_CLOSED:
ret = null_check_zone_resources(dev, zone); ret = null_check_zone_resources(dev, zone);
if (ret != BLK_STS_OK) if (ret != BLK_STS_OK)
return ret; goto unlock;
break; break;
case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN: case BLK_ZONE_COND_EXP_OPEN:
break; break;
default: default:
/* Invalid zone condition */ /* Invalid zone condition */
return BLK_STS_IOERR; ret = BLK_STS_IOERR;
goto unlock;
} }
/* /*
...@@ -326,11 +357,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -326,11 +357,14 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
else else
cmd->rq->__sector = sector; cmd->rq->__sector = sector;
} else if (sector != zone->wp) { } else if (sector != zone->wp) {
return BLK_STS_IOERR; ret = BLK_STS_IOERR;
goto unlock;
} }
if (zone->wp + nr_sectors > zone->start + zone->capacity) if (zone->wp + nr_sectors > zone->start + zone->capacity) {
return BLK_STS_IOERR; ret = BLK_STS_IOERR;
goto unlock;
}
if (zone->cond == BLK_ZONE_COND_CLOSED) { if (zone->cond == BLK_ZONE_COND_CLOSED) {
dev->nr_zones_closed--; dev->nr_zones_closed--;
...@@ -341,9 +375,11 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -341,9 +375,11 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->cond != BLK_ZONE_COND_EXP_OPEN) if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->cond = BLK_ZONE_COND_IMP_OPEN;
spin_unlock(&dev->zone_dev_lock);
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
spin_lock(&dev->zone_dev_lock);
if (ret != BLK_STS_OK) if (ret != BLK_STS_OK)
return ret; goto unlock;
zone->wp += nr_sectors; zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->capacity) { if (zone->wp == zone->start + zone->capacity) {
...@@ -353,7 +389,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -353,7 +389,13 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
dev->nr_zones_imp_open--; dev->nr_zones_imp_open--;
zone->cond = BLK_ZONE_COND_FULL; zone->cond = BLK_ZONE_COND_FULL;
} }
return BLK_STS_OK; ret = BLK_STS_OK;
unlock:
spin_unlock(&dev->zone_dev_lock);
null_unlock_zone(dev, zno);
return ret;
} }
static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone) static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone)
...@@ -464,16 +506,33 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, ...@@ -464,16 +506,33 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector) sector_t sector)
{ {
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
unsigned int zone_no = null_zone_no(dev, sector); unsigned int zone_no;
struct blk_zone *zone = &dev->zones[zone_no]; struct blk_zone *zone;
blk_status_t ret = BLK_STS_OK; blk_status_t ret;
size_t i; size_t i;
if (op == REQ_OP_ZONE_RESET_ALL) {
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
null_lock_zone(dev, i);
zone = &dev->zones[i];
if (zone->cond != BLK_ZONE_COND_EMPTY) {
spin_lock(&dev->zone_dev_lock);
null_reset_zone(dev, zone);
spin_unlock(&dev->zone_dev_lock);
trace_nullb_zone_op(cmd, i, zone->cond);
}
null_unlock_zone(dev, i);
}
return BLK_STS_OK;
}
zone_no = null_zone_no(dev, sector);
zone = &dev->zones[zone_no];
null_lock_zone(dev, zone_no);
spin_lock(&dev->zone_dev_lock);
switch (op) { switch (op) {
case REQ_OP_ZONE_RESET_ALL:
for (i = dev->zone_nr_conv; i < dev->nr_zones; i++)
null_reset_zone(dev, &dev->zones[i]);
break;
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
ret = null_reset_zone(dev, zone); ret = null_reset_zone(dev, zone);
break; break;
...@@ -487,30 +546,46 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, ...@@ -487,30 +546,46 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
ret = null_finish_zone(dev, zone); ret = null_finish_zone(dev, zone);
break; break;
default: default:
return BLK_STS_NOTSUPP; ret = BLK_STS_NOTSUPP;
break;
} }
spin_unlock(&dev->zone_dev_lock);
if (ret == BLK_STS_OK) if (ret == BLK_STS_OK)
trace_nullb_zone_op(cmd, zone_no, zone->cond); trace_nullb_zone_op(cmd, zone_no, zone->cond);
null_unlock_zone(dev, zone_no);
return ret; return ret;
} }
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors) sector_t sector, sector_t nr_sectors)
{ {
struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector);
blk_status_t sts;
switch (op) { switch (op) {
case REQ_OP_WRITE: case REQ_OP_WRITE:
return null_zone_write(cmd, sector, nr_sectors, false); sts = null_zone_write(cmd, sector, nr_sectors, false);
break;
case REQ_OP_ZONE_APPEND: case REQ_OP_ZONE_APPEND:
return null_zone_write(cmd, sector, nr_sectors, true); sts = null_zone_write(cmd, sector, nr_sectors, true);
break;
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL: case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_FINISH:
return null_zone_mgmt(cmd, op, sector); sts = null_zone_mgmt(cmd, op, sector);
break;
default: default:
return null_process_cmd(cmd, op, sector, nr_sectors); null_lock_zone(dev, zno);
sts = null_process_cmd(cmd, op, sector, nr_sectors);
null_unlock_zone(dev, zno);
} }
return sts;
} }
...@@ -443,22 +443,27 @@ static void ace_fix_driveid(u16 *id) ...@@ -443,22 +443,27 @@ static void ace_fix_driveid(u16 *id)
#define ACE_FSM_NUM_STATES 11 #define ACE_FSM_NUM_STATES 11
/* Set flag to exit FSM loop and reschedule tasklet */ /* Set flag to exit FSM loop and reschedule tasklet */
static inline void ace_fsm_yield(struct ace_device *ace) static inline void ace_fsm_yieldpoll(struct ace_device *ace)
{ {
dev_dbg(ace->dev, "ace_fsm_yield()\n");
tasklet_schedule(&ace->fsm_tasklet); tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0; ace->fsm_continue_flag = 0;
} }
static inline void ace_fsm_yield(struct ace_device *ace)
{
dev_dbg(ace->dev, "%s()\n", __func__);
ace_fsm_yieldpoll(ace);
}
/* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */
static inline void ace_fsm_yieldirq(struct ace_device *ace) static inline void ace_fsm_yieldirq(struct ace_device *ace)
{ {
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
if (!ace->irq) if (ace->irq > 0)
/* No IRQ assigned, so need to poll */ ace->fsm_continue_flag = 0;
tasklet_schedule(&ace->fsm_tasklet); else
ace->fsm_continue_flag = 0; ace_fsm_yieldpoll(ace);
} }
static bool ace_has_next_request(struct request_queue *q) static bool ace_has_next_request(struct request_queue *q)
...@@ -1053,12 +1058,12 @@ static int ace_setup(struct ace_device *ace) ...@@ -1053,12 +1058,12 @@ static int ace_setup(struct ace_device *ace)
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */ /* Now we can hook up the irq handler */
if (ace->irq) { if (ace->irq > 0) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) { if (rc) {
/* Failure - fall back to polled mode */ /* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n"); dev_err(ace->dev, "request_irq failed\n");
ace->irq = 0; ace->irq = rc;
} }
} }
...@@ -1110,7 +1115,7 @@ static void ace_teardown(struct ace_device *ace) ...@@ -1110,7 +1115,7 @@ static void ace_teardown(struct ace_device *ace)
tasklet_kill(&ace->fsm_tasklet); tasklet_kill(&ace->fsm_tasklet);
if (ace->irq) if (ace->irq > 0)
free_irq(ace->irq, ace); free_irq(ace->irq, ace);
iounmap(ace->baseaddr); iounmap(ace->baseaddr);
...@@ -1123,11 +1128,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr, ...@@ -1123,11 +1128,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
int rc; int rc;
dev_dbg(dev, "ace_alloc(%p)\n", dev); dev_dbg(dev, "ace_alloc(%p)\n", dev);
if (!physaddr) {
rc = -ENODEV;
goto err_noreg;
}
/* Allocate and initialize the ace device structure */ /* Allocate and initialize the ace device structure */
ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL);
if (!ace) { if (!ace) {
...@@ -1153,7 +1153,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr, ...@@ -1153,7 +1153,6 @@ static int ace_alloc(struct device *dev, int id, resource_size_t physaddr,
dev_set_drvdata(dev, NULL); dev_set_drvdata(dev, NULL);
kfree(ace); kfree(ace);
err_alloc: err_alloc:
err_noreg:
dev_err(dev, "could not initialize device, err=%i\n", rc); dev_err(dev, "could not initialize device, err=%i\n", rc);
return rc; return rc;
} }
...@@ -1176,10 +1175,11 @@ static void ace_free(struct device *dev) ...@@ -1176,10 +1175,11 @@ static void ace_free(struct device *dev)
static int ace_probe(struct platform_device *dev) static int ace_probe(struct platform_device *dev)
{ {
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
resource_size_t physaddr;
struct resource *res;
u32 id = dev->id; u32 id = dev->id;
int irq = 0; int irq;
int i; int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
...@@ -1190,12 +1190,15 @@ static int ace_probe(struct platform_device *dev) ...@@ -1190,12 +1190,15 @@ static int ace_probe(struct platform_device *dev)
if (of_find_property(dev->dev.of_node, "8-bit", NULL)) if (of_find_property(dev->dev.of_node, "8-bit", NULL))
bus_width = ACE_BUS_WIDTH_8; bus_width = ACE_BUS_WIDTH_8;
for (i = 0; i < dev->num_resources; i++) { res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (dev->resource[i].flags & IORESOURCE_MEM) if (!res)
physaddr = dev->resource[i].start; return -EINVAL;
if (dev->resource[i].flags & IORESOURCE_IRQ)
irq = dev->resource[i].start; physaddr = res->start;
} if (!physaddr)
return -ENODEV;
irq = platform_get_irq_optional(dev, 0);
/* Call the bus-independent setup code */ /* Call the bus-independent setup code */
return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); return ace_alloc(&dev->dev, id, physaddr, irq, bus_width);
......
...@@ -2125,7 +2125,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id) ...@@ -2125,7 +2125,7 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
if (blk_queue_is_zoned(ns->queue)) { if (blk_queue_is_zoned(ns->queue)) {
ret = nvme_revalidate_zones(ns); ret = nvme_revalidate_zones(ns);
if (ret) if (ret && !nvme_first_scan(ns->disk))
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -1768,6 +1768,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -1768,6 +1768,14 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return; return;
} }
/* sanity checking for received data length */
if (unlikely(wc->byte_len < len)) {
dev_err(queue->ctrl->ctrl.device,
"Unexpected nvme completion length(%d)\n", wc->byte_len);
nvme_rdma_error_recovery(queue->ctrl);
return;
}
ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
/* /*
* AEN requests are special as they don't time out and can * AEN requests are special as they don't time out and can
......
...@@ -907,8 +907,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -907,8 +907,6 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->error_loc = NVMET_NO_ERROR_LOC; req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0; req->error_slba = 0;
trace_nvmet_req_init(req, req->cmd);
/* no support for fused commands yet */ /* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
req->error_loc = offsetof(struct nvme_common_command, flags); req->error_loc = offsetof(struct nvme_common_command, flags);
...@@ -938,6 +936,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, ...@@ -938,6 +936,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
if (status) if (status)
goto fail; goto fail;
trace_nvmet_req_init(req, req->cmd);
if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail; goto fail;
......
...@@ -46,19 +46,12 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req) ...@@ -46,19 +46,12 @@ static inline struct nvmet_ctrl *nvmet_req_to_ctrl(struct nvmet_req *req)
return req->sq->ctrl; return req->sq->ctrl;
} }
static inline void __assign_disk_name(char *name, struct nvmet_req *req, static inline void __assign_req_name(char *name, struct nvmet_req *req)
bool init)
{ {
struct nvmet_ctrl *ctrl = nvmet_req_to_ctrl(req); if (req->ns)
struct nvmet_ns *ns; strncpy(name, req->ns->device_path, DISK_NAME_LEN);
else
if ((init && req->sq->qid) || (!init && req->cq->qid)) { memset(name, 0, DISK_NAME_LEN);
ns = nvmet_find_namespace(ctrl, req->cmd->rw.nsid);
strncpy(name, ns->device_path, DISK_NAME_LEN);
return;
}
memset(name, 0, DISK_NAME_LEN);
} }
#endif #endif
...@@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init, ...@@ -81,7 +74,7 @@ TRACE_EVENT(nvmet_req_init,
TP_fast_assign( TP_fast_assign(
__entry->cmd = cmd; __entry->cmd = cmd;
__entry->ctrl = nvmet_req_to_ctrl(req); __entry->ctrl = nvmet_req_to_ctrl(req);
__assign_disk_name(__entry->disk, req, true); __assign_req_name(__entry->disk, req);
__entry->qid = req->sq->qid; __entry->qid = req->sq->qid;
__entry->cid = cmd->common.command_id; __entry->cid = cmd->common.command_id;
__entry->opcode = cmd->common.opcode; __entry->opcode = cmd->common.opcode;
...@@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete, ...@@ -121,7 +114,7 @@ TRACE_EVENT(nvmet_req_complete,
__entry->cid = req->cqe->command_id; __entry->cid = req->cqe->command_id;
__entry->result = le64_to_cpu(req->cqe->result.u64); __entry->result = le64_to_cpu(req->cqe->result.u64);
__entry->status = le16_to_cpu(req->cqe->status) >> 1; __entry->status = le16_to_cpu(req->cqe->status) >> 1;
__assign_disk_name(__entry->disk, req, false); __assign_req_name(__entry->disk, req);
), ),
TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x", TP_printk("nvmet%s: %sqid=%d, cmdid=%u, res=%#llx, status=%#x",
__print_ctrl_name(__entry->ctrl), __print_ctrl_name(__entry->ctrl),
......
...@@ -933,7 +933,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, ...@@ -933,7 +933,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
sg_miter_start(&miter, sgl, nents, sg_flags); sg_miter_start(&miter, sgl, nents, sg_flags);
if (!sg_miter_skip(&miter, skip)) if (!sg_miter_skip(&miter, skip))
return false; return 0;
while ((offset < buflen) && sg_miter_next(&miter)) { while ((offset < buflen) && sg_miter_next(&miter)) {
unsigned int len; unsigned int len;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment