Commit 4d8d9f54 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20180913' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Three fixes that should go into this series. This contains:

   - Increase number of policies supported by blk-cgroup.

     With blk-iolatency, we now have four in kernel, but we had a hard
     limit of three...

   - Fix regression in null_blk, where the zoned supported broke
     queue_mode=0 (bio based).

   - NVMe pull request, with a single fix for an issue in the rdma code"

* tag 'for-linus-20180913' of git://git.kernel.dk/linux-block:
  null_blk: fix zoned support for non-rq based operation
  blk-cgroup: increase number of supported policies
  nvmet-rdma: fix possible bogus dereference under heavy load
parents a0efc03b b228ba1c
...@@ -1510,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol) ...@@ -1510,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
for (i = 0; i < BLKCG_MAX_POLS; i++) for (i = 0; i < BLKCG_MAX_POLS; i++)
if (!blkcg_policy[i]) if (!blkcg_policy[i])
break; break;
if (i >= BLKCG_MAX_POLS) if (i >= BLKCG_MAX_POLS) {
pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
goto err_unlock; goto err_unlock;
}
/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
......
...@@ -87,10 +87,10 @@ struct nullb { ...@@ -87,10 +87,10 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev); int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev); void null_zone_exit(struct nullb_device *dev);
blk_status_t null_zone_report(struct nullb *nullb, blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
struct nullb_cmd *cmd); void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
void null_zone_write(struct nullb_cmd *cmd); unsigned int nr_sectors);
void null_zone_reset(struct nullb_cmd *cmd); void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
#else #else
static inline int null_zone_init(struct nullb_device *dev) static inline int null_zone_init(struct nullb_device *dev)
{ {
...@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev) ...@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
} }
static inline void null_zone_exit(struct nullb_device *dev) {} static inline void null_zone_exit(struct nullb_device *dev) {}
static inline blk_status_t null_zone_report(struct nullb *nullb, static inline blk_status_t null_zone_report(struct nullb *nullb,
struct nullb_cmd *cmd) struct bio *bio)
{ {
return BLK_STS_NOTSUPP; return BLK_STS_NOTSUPP;
} }
static inline void null_zone_write(struct nullb_cmd *cmd) {} static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
static inline void null_zone_reset(struct nullb_cmd *cmd) {} unsigned int nr_sectors)
{
}
static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
#endif /* CONFIG_BLK_DEV_ZONED */ #endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */ #endif /* __NULL_BLK_H */
...@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb) ...@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
} }
} }
static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
if (dev->queue_mode == NULL_Q_BIO) {
if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
cmd->error = null_zone_report(nullb, cmd->bio);
return true;
}
} else {
if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
cmd->error = null_zone_report(nullb, cmd->rq->bio);
return true;
}
}
return false;
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
{ {
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb; struct nullb *nullb = dev->nullb;
int err = 0; int err = 0;
if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { if (cmd_report_zone(nullb, cmd))
cmd->error = null_zone_report(nullb, cmd);
goto out; goto out;
}
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq; struct request *rq = cmd->rq;
...@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) ...@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
cmd->error = errno_to_blk_status(err); cmd->error = errno_to_blk_status(err);
if (!cmd->error && dev->zoned) { if (!cmd->error && dev->zoned) {
if (req_op(cmd->rq) == REQ_OP_WRITE) sector_t sector;
null_zone_write(cmd); unsigned int nr_sectors;
else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET) int op;
null_zone_reset(cmd);
if (dev->queue_mode == NULL_Q_BIO) {
op = bio_op(cmd->bio);
sector = cmd->bio->bi_iter.bi_sector;
nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
} else {
op = req_op(cmd->rq);
sector = blk_rq_pos(cmd->rq);
nr_sectors = blk_rq_sectors(cmd->rq);
}
if (op == REQ_OP_WRITE)
null_zone_write(cmd, sector, nr_sectors);
else if (op == REQ_OP_ZONE_RESET)
null_zone_reset(cmd, sector);
} }
out: out:
/* Complete IO by inline, softirq or timer */ /* Complete IO by inline, softirq or timer */
......
...@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev) ...@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones); kvfree(dev->zones);
} }
static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
unsigned int zno, unsigned int nr_zones) unsigned int zno, unsigned int nr_zones)
{ {
struct blk_zone_report_hdr *hdr = NULL; struct blk_zone_report_hdr *hdr = NULL;
struct bio_vec bvec; struct bio_vec bvec;
...@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, ...@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
void *addr; void *addr;
unsigned int zones_to_cpy; unsigned int zones_to_cpy;
bio_for_each_segment(bvec, rq->bio, iter) { bio_for_each_segment(bvec, bio, iter) {
addr = kmap_atomic(bvec.bv_page); addr = kmap_atomic(bvec.bv_page);
zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
...@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, ...@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
} }
} }
blk_status_t null_zone_report(struct nullb *nullb, blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
struct nullb_cmd *cmd)
{ {
struct nullb_device *dev = nullb->dev; struct nullb_device *dev = nullb->dev;
struct request *rq = cmd->rq; unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
unsigned int nr_zones = dev->nr_zones - zno; unsigned int nr_zones = dev->nr_zones - zno;
unsigned int max_zones = (blk_rq_bytes(rq) / unsigned int max_zones;
sizeof(struct blk_zone)) - 1;
max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
nr_zones = min_t(unsigned int, nr_zones, max_zones); nr_zones = min_t(unsigned int, nr_zones, max_zones);
null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
return BLK_STS_OK; return BLK_STS_OK;
} }
void null_zone_write(struct nullb_cmd *cmd) void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
{ {
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
struct request *rq = cmd->rq;
sector_t sector = blk_rq_pos(rq);
unsigned int rq_sectors = blk_rq_sectors(rq);
unsigned int zno = null_zone_no(dev, sector); unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno]; struct blk_zone *zone = &dev->zones[zno];
...@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd) ...@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_IMP_OPEN:
/* Writes must be at the write pointer position */ /* Writes must be at the write pointer position */
if (blk_rq_pos(rq) != zone->wp) { if (sector != zone->wp) {
cmd->error = BLK_STS_IOERR; cmd->error = BLK_STS_IOERR;
break; break;
} }
...@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd) ...@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
if (zone->cond == BLK_ZONE_COND_EMPTY) if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->cond = BLK_ZONE_COND_IMP_OPEN;
zone->wp += rq_sectors; zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->len) if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL; zone->cond = BLK_ZONE_COND_FULL;
break; break;
...@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd) ...@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
} }
} }
void null_zone_reset(struct nullb_cmd *cmd) void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
{ {
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
struct request *rq = cmd->rq; unsigned int zno = null_zone_no(dev, sector);
unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
struct blk_zone *zone = &dev->zones[zno]; struct blk_zone *zone = &dev->zones[zno];
zone->cond = BLK_ZONE_COND_EMPTY; zone->cond = BLK_ZONE_COND_EMPTY;
......
...@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp { ...@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
struct nvmet_req req; struct nvmet_req req;
bool allocated;
u8 n_rdma; u8 n_rdma;
u32 flags; u32 flags;
u32 invalidate_rkey; u32 invalidate_rkey;
...@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) ...@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&queue->rsps_lock, flags); spin_lock_irqsave(&queue->rsps_lock, flags);
rsp = list_first_entry(&queue->free_rsps, rsp = list_first_entry_or_null(&queue->free_rsps,
struct nvmet_rdma_rsp, free_list); struct nvmet_rdma_rsp, free_list);
list_del(&rsp->free_list); if (likely(rsp))
list_del(&rsp->free_list);
spin_unlock_irqrestore(&queue->rsps_lock, flags); spin_unlock_irqrestore(&queue->rsps_lock, flags);
if (unlikely(!rsp)) {
rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
if (unlikely(!rsp))
return NULL;
rsp->allocated = true;
}
return rsp; return rsp;
} }
...@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) ...@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{ {
unsigned long flags; unsigned long flags;
if (rsp->allocated) {
kfree(rsp);
return;
}
spin_lock_irqsave(&rsp->queue->rsps_lock, flags); spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
...@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
cmd->queue = queue; cmd->queue = queue;
rsp = nvmet_rdma_get_rsp(queue); rsp = nvmet_rdma_get_rsp(queue);
if (unlikely(!rsp)) {
/*
* we get here only under memory pressure,
* silently drop and have the host retry
* as we can't even fail it.
*/
nvmet_rdma_post_recv(queue->dev, cmd);
return;
}
rsp->queue = queue; rsp->queue = queue;
rsp->cmd = cmd; rsp->cmd = cmd;
rsp->flags = 0; rsp->flags = 0;
......
...@@ -54,7 +54,7 @@ struct blk_stat_callback; ...@@ -54,7 +54,7 @@ struct blk_stat_callback;
* Maximum number of blkcg policies allowed to be registered concurrently. * Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency. * Defined here to simplify include dependency.
*/ */
#define BLKCG_MAX_POLS 3 #define BLKCG_MAX_POLS 5
typedef void (rq_end_io_fn)(struct request *, blk_status_t); typedef void (rq_end_io_fn)(struct request *, blk_status_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment