Commit f0aad9ba authored by Jack Wang's avatar Jack Wang Committed by Jason Gunthorpe

block/rnbd: server: functionality for IO submitting to block dev

This provides helper functions for IO submitting to block dev.

Link: https://lore.kernel.org/r/20200511135131.27580-22-danil.kipnis@cloud.ionos.comSigned-off-by: default avatarDanil Kipnis <danil.kipnis@cloud.ionos.com>
Signed-off-by: default avatarJack Wang <jinpu.wang@cloud.ionos.com>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Acked-by: default avatarJens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 2de6c8de
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Network Block Driver
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include "rnbd-srv-dev.h"
#include "rnbd-log.h"
struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
struct bio_set *bs)
{
struct rnbd_dev *dev;
int ret;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
dev->blk_open_flags = flags;
dev->bdev = blkdev_get_by_path(path, flags, THIS_MODULE);
ret = PTR_ERR_OR_ZERO(dev->bdev);
if (ret)
goto err;
dev->blk_open_flags = flags;
bdevname(dev->bdev, dev->name);
dev->ibd_bio_set = bs;
return dev;
err:
kfree(dev);
return ERR_PTR(ret);
}
void rnbd_dev_close(struct rnbd_dev *dev)
{
blkdev_put(dev->bdev, dev->blk_open_flags);
kfree(dev);
}
static void rnbd_dev_bi_end_io(struct bio *bio)
{
struct rnbd_dev_blk_io *io = bio->bi_private;
rnbd_endio(io->priv, blk_status_to_errno(bio->bi_status));
bio_put(bio);
}
/**
* rnbd_bio_map_kern - map kernel address into bio
* @data: pointer to buffer to map
* @bs: bio_set to use.
* @len: length in bytes
* @gfp_mask: allocation flags for bio allocation
*
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
static struct bio *rnbd_bio_map_kern(void *data, struct bio_set *bs,
unsigned int len, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = kaddr >> PAGE_SHIFT;
const int nr_pages = end - start;
int offset, i;
struct bio *bio;
bio = bio_alloc_bioset(gfp_mask, nr_pages, bs);
if (!bio)
return ERR_PTR(-ENOMEM);
offset = offset_in_page(kaddr);
for (i = 0; i < nr_pages; i++) {
unsigned int bytes = PAGE_SIZE - offset;
if (len <= 0)
break;
if (bytes > len)
bytes = len;
if (bio_add_page(bio, virt_to_page(data), bytes,
offset) < bytes) {
/* we don't support partial mappings */
bio_put(bio);
return ERR_PTR(-EINVAL);
}
data += bytes;
len -= bytes;
offset = 0;
}
bio->bi_end_io = bio_put;
return bio;
}
int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
size_t len, u32 bi_size, enum rnbd_io_flags flags,
short prio, void *priv)
{
struct rnbd_dev_blk_io *io;
struct bio *bio;
/* Generate bio with pages pointing to the rdma buffer */
bio = rnbd_bio_map_kern(data, dev->ibd_bio_set, len, GFP_KERNEL);
if (IS_ERR(bio))
return PTR_ERR(bio);
io = container_of(bio, struct rnbd_dev_blk_io, bio);
io->dev = dev;
io->priv = priv;
bio->bi_end_io = rnbd_dev_bi_end_io;
bio->bi_private = io;
bio->bi_opf = rnbd_to_bio_flags(flags);
bio->bi_iter.bi_sector = sector;
bio->bi_iter.bi_size = bi_size;
bio_set_prio(bio, prio);
bio_set_dev(bio, dev->bdev);
submit_bio(bio);
return 0;
}
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* RDMA Network Block Driver
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#ifndef RNBD_SRV_DEV_H
#define RNBD_SRV_DEV_H
#include <linux/fs.h>
#include "rnbd-proto.h"
struct rnbd_dev {
struct block_device *bdev;
struct bio_set *ibd_bio_set;
fmode_t blk_open_flags;
char name[BDEVNAME_SIZE];
};
struct rnbd_dev_blk_io {
struct rnbd_dev *dev;
void *priv;
/* have to be last member for front_pad usage of bioset_init */
struct bio bio;
};
/**
* rnbd_dev_open() - Open a device
* @flags: open flags
* @bs: bio_set to use during block io,
*/
struct rnbd_dev *rnbd_dev_open(const char *path, fmode_t flags,
struct bio_set *bs);
/**
* rnbd_dev_close() - Close a device
*/
void rnbd_dev_close(struct rnbd_dev *dev);
void rnbd_endio(void *priv, int error);
static inline int rnbd_dev_get_max_segs(const struct rnbd_dev *dev)
{
return queue_max_segments(bdev_get_queue(dev->bdev));
}
static inline int rnbd_dev_get_max_hw_sects(const struct rnbd_dev *dev)
{
return queue_max_hw_sectors(bdev_get_queue(dev->bdev));
}
static inline int rnbd_dev_get_secure_discard(const struct rnbd_dev *dev)
{
return blk_queue_secure_erase(bdev_get_queue(dev->bdev));
}
static inline int rnbd_dev_get_max_discard_sects(const struct rnbd_dev *dev)
{
if (!blk_queue_discard(bdev_get_queue(dev->bdev)))
return 0;
return blk_queue_get_max_sectors(bdev_get_queue(dev->bdev),
REQ_OP_DISCARD);
}
static inline int rnbd_dev_get_discard_granularity(const struct rnbd_dev *dev)
{
return bdev_get_queue(dev->bdev)->limits.discard_granularity;
}
static inline int rnbd_dev_get_discard_alignment(const struct rnbd_dev *dev)
{
return bdev_get_queue(dev->bdev)->limits.discard_alignment;
}
/**
* rnbd_dev_submit_io() - Submit an I/O to the disk
* @dev: device to that the I/O is submitted
* @sector: address to read/write data to
* @data: I/O data to write or buffer to read I/O date into
* @len: length of @data
* @bi_size: Amount of data that will be read/written
* @prio: IO priority
* @priv: private data passed to @io_fn
*/
int rnbd_dev_submit_io(struct rnbd_dev *dev, sector_t sector, void *data,
size_t len, u32 bi_size, enum rnbd_io_flags flags,
short prio, void *priv);
#endif /* RNBD_SRV_DEV_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment