Commit 631b001f authored by Jan Kara's avatar Jan Kara Committed by Christian Brauner

bcache: Convert to bdev_open_by_path()

Convert bcache to use bdev_open_by_path() and pass the handle around.

CC: linux-bcache@vger.kernel.org
CC: Coly Li <colyli@suse.de>
CC: Kent Overstreet <kent.overstreet@gmail.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarChristian Brauner <brauner@kernel.org>
Acked-by: default avatarColy Li <colyli@suse.de>
Signed-off-by: default avatarJan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20230927093442.25915-9-jack@suse.czSigned-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent eed993a0
......@@ -299,6 +299,7 @@ struct cached_dev {
struct list_head list;
struct bcache_device disk;
struct block_device *bdev;
struct bdev_handle *bdev_handle;
struct cache_sb sb;
struct cache_sb_disk *sb_disk;
......@@ -421,6 +422,7 @@ struct cache {
struct kobject kobj;
struct block_device *bdev;
struct bdev_handle *bdev_handle;
struct task_struct *alloc_thread;
......
......@@ -1368,8 +1368,8 @@ static void cached_dev_free(struct closure *cl)
if (dc->sb_disk)
put_page(virt_to_page(dc->sb_disk));
if (!IS_ERR_OR_NULL(dc->bdev))
blkdev_put(dc->bdev, dc);
if (dc->bdev_handle)
bdev_release(dc->bdev_handle);
wake_up(&unregister_wait);
......@@ -1444,7 +1444,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
/* Cached device - bcache superblock */
static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev,
struct bdev_handle *bdev_handle,
struct cached_dev *dc)
{
const char *err = "cannot allocate memory";
......@@ -1452,14 +1452,15 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
int ret = -ENOMEM;
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev_handle = bdev_handle;
dc->bdev = bdev_handle->bdev;
dc->sb_disk = sb_disk;
if (cached_dev_init(dc, sb->block_size << 9))
goto err;
err = "error creating kobject";
if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache"))
if (kobject_add(&dc->disk.kobj, bdev_kobj(dc->bdev), "bcache"))
goto err;
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
......@@ -2216,8 +2217,8 @@ void bch_cache_release(struct kobject *kobj)
if (ca->sb_disk)
put_page(virt_to_page(ca->sb_disk));
if (!IS_ERR_OR_NULL(ca->bdev))
blkdev_put(ca->bdev, ca);
if (ca->bdev_handle)
bdev_release(ca->bdev_handle);
kfree(ca);
module_put(THIS_MODULE);
......@@ -2337,16 +2338,18 @@ static int cache_alloc(struct cache *ca)
}
static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct block_device *bdev, struct cache *ca)
struct bdev_handle *bdev_handle,
struct cache *ca)
{
const char *err = NULL; /* must be set for any error case */
int ret = 0;
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev_handle = bdev_handle;
ca->bdev = bdev_handle->bdev;
ca->sb_disk = sb_disk;
if (bdev_max_discard_sectors((bdev)))
if (bdev_max_discard_sectors((bdev_handle->bdev)))
ca->discard = CACHE_DISCARD(&ca->sb);
ret = cache_alloc(ca);
......@@ -2354,10 +2357,10 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
/*
* If we failed here, it means ca->kobj is not initialized yet,
* kobject_put() won't be called and there is no chance to
* call blkdev_put() to bdev in bch_cache_release(). So we
* explicitly call blkdev_put() here.
* call bdev_release() to bdev in bch_cache_release(). So
* we explicitly call bdev_release() here.
*/
blkdev_put(bdev, ca);
bdev_release(bdev_handle);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
else if (ret == -EPERM)
......@@ -2367,7 +2370,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto err;
}
if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) {
if (kobject_add(&ca->kobj, bdev_kobj(bdev_handle->bdev), "bcache")) {
err = "error calling kobject_add";
ret = -ENOMEM;
goto out;
......@@ -2382,14 +2385,14 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
pr_info("registered cache device %pg\n", ca->bdev);
pr_info("registered cache device %pg\n", ca->bdev_handle->bdev);
out:
kobject_put(&ca->kobj);
err:
if (err)
pr_notice("error %pg: %s\n", ca->bdev, err);
pr_notice("error %pg: %s\n", ca->bdev_handle->bdev, err);
return ret;
}
......@@ -2445,7 +2448,7 @@ struct async_reg_args {
char *path;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
struct block_device *bdev;
struct bdev_handle *bdev_handle;
void *holder;
};
......@@ -2456,8 +2459,8 @@ static void register_bdev_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
mutex_lock(&bch_register_lock);
if (register_bdev(args->sb, args->sb_disk, args->bdev, args->holder)
< 0)
if (register_bdev(args->sb, args->sb_disk, args->bdev_handle,
args->holder) < 0)
fail = true;
mutex_unlock(&bch_register_lock);
......@@ -2477,7 +2480,8 @@ static void register_cache_worker(struct work_struct *work)
container_of(work, struct async_reg_args, reg_work.work);
/* blkdev_put() will be called in bch_cache_release() */
if (register_cache(args->sb, args->sb_disk, args->bdev, args->holder))
if (register_cache(args->sb, args->sb_disk, args->bdev_handle,
args->holder))
fail = true;
if (fail)
......@@ -2514,7 +2518,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
char *path = NULL;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
struct block_device *bdev, *bdev2;
struct bdev_handle *bdev_handle, *bdev_handle2;
void *holder = NULL;
ssize_t ret;
bool async_registration = false;
......@@ -2547,15 +2551,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
ret = -EINVAL;
err = "failed to open device";
bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(bdev))
bdev_handle = bdev_open_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
if (IS_ERR(bdev_handle))
goto out_free_sb;
err = "failed to set blocksize";
if (set_blocksize(bdev, 4096))
if (set_blocksize(bdev_handle->bdev, 4096))
goto out_blkdev_put;
err = read_super(sb, bdev, &sb_disk);
err = read_super(sb, bdev_handle->bdev, &sb_disk);
if (err)
goto out_blkdev_put;
......@@ -2567,13 +2571,13 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
}
/* Now reopen in exclusive mode with proper holder */
bdev2 = blkdev_get_by_dev(bdev->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
holder, NULL);
blkdev_put(bdev, NULL);
bdev = bdev2;
if (IS_ERR(bdev)) {
ret = PTR_ERR(bdev);
bdev = NULL;
bdev_handle2 = bdev_open_by_dev(bdev_handle->bdev->bd_dev,
BLK_OPEN_READ | BLK_OPEN_WRITE, holder, NULL);
bdev_release(bdev_handle);
bdev_handle = bdev_handle2;
if (IS_ERR(bdev_handle)) {
ret = PTR_ERR(bdev_handle);
bdev_handle = NULL;
if (ret == -EBUSY) {
dev_t dev;
......@@ -2608,7 +2612,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
args->path = path;
args->sb = sb;
args->sb_disk = sb_disk;
args->bdev = bdev;
args->bdev_handle = bdev_handle;
args->holder = holder;
register_device_async(args);
/* No wait and returns to user space */
......@@ -2617,14 +2621,14 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_disk, bdev, holder);
ret = register_bdev(sb, sb_disk, bdev_handle, holder);
mutex_unlock(&bch_register_lock);
/* blkdev_put() will be called in cached_dev_free() */
if (ret < 0)
goto out_free_sb;
} else {
/* blkdev_put() will be called in bch_cache_release() */
ret = register_cache(sb, sb_disk, bdev, holder);
ret = register_cache(sb, sb_disk, bdev_handle, holder);
if (ret)
goto out_free_sb;
}
......@@ -2640,8 +2644,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
out_put_sb_page:
put_page(virt_to_page(sb_disk));
out_blkdev_put:
if (bdev)
blkdev_put(bdev, holder);
if (bdev_handle)
bdev_release(bdev_handle);
out_free_sb:
kfree(sb);
out_free_path:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment