Commit 370d2662 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'upstream-3.15-rc1' of git://git.infradead.org/linux-ubifs

Pull ubifs updates from Artem Bityutskiy:
 "This pull request includes the 'ubiblock' driver which provides R/O
  block access to UBI volumes.  It is useful for those who want to use
  squashfs on top of raw flash devices.  UBI will provide bit-flip
  handling and wear-levelling in this case (e.g., if there are other UBI
  volumes with R/W UBIFS too).

  The driver is actually pretty small and it is part of the UBI kernel
  subsystem.  Delivered by Ezequiel Garcia, along with a piece of
  documentation on the MTD web site and the user-space tool for creating
  and removing block devices"

* tag 'upstream-3.15-rc1' of git://git.infradead.org/linux-ubifs:
  UBI: block: Remove __initdata from ubiblock_param_ops
  UBI: make UBI_IOCVOLCRBLK take a parameter for future usage
  UBI: rename block device ioctls
  UBI: block: Use ENOSYS as return value when CONFIG_UBIBLOCK=n
  UBI: block: Add CONFIG_BLOCK dependency
  UBI: block: Use 'u64' for the 64-bit dividend
  UBI: block: Mark init-only symbol as __initdata
  UBI: block: do not use term "attach"
  UBI: R/O block driver on top of UBI volumes
parents d15fee81 d56030ac
......@@ -87,4 +87,20 @@ config MTD_UBI_GLUEBI
work on top of UBI. Do not enable this unless you use legacy
software.
config MTD_UBI_BLOCK
bool "Read-only block devices on top of UBI volumes"
default n
depends on BLOCK
help
This option enables read-only UBI block devices support. UBI block
devices will be layered on top of UBI volumes, which means that the
UBI driver will transparently handle things like bad eraseblocks and
bit-flips. You can put any block-oriented file system on top of UBI
volumes in read-only mode (e.g., ext4), but it is probably most
practical for read-only file systems, like squashfs.
When selected, this feature will be built in the UBI driver.
If in doubt, say "N".
endif # MTD_UBI
......@@ -3,5 +3,6 @@ obj-$(CONFIG_MTD_UBI) += ubi.o
ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
ubi-y += misc.o debug.o
ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
/*
* Copyright (c) 2014 Ezequiel Garcia
* Copyright (c) 2011 Free Electrons
*
* Driver parameter handling strongly based on drivers/mtd/ubi/build.c
* Copyright (c) International Business Machines Corp., 2006
* Copyright (c) Nokia Corporation, 2007
* Authors: Artem Bityutskiy, Frank Haverkamp
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*/
/*
* Read-only block devices on top of UBI volumes
*
* A simple implementation to allow a block device to be layered on top of a
* UBI volume. The implementation is provided by creating a static 1-to-1
* mapping between the block device and the UBI volume.
*
* The addressed byte is obtained from the addressed block sector, which is
* mapped linearly into the corresponding LEB:
*
* LEB number = addressed byte / LEB size
*
* This feature is compiled in the UBI core, and adds a 'block' parameter
* to allow early creation of block devices on top of UBI volumes. Runtime
* block creation/removal for UBI volumes is provided through two UBI ioctls:
* UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mtd/ubi.h>
#include <linux/workqueue.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <asm/div64.h>
#include "ubi-media.h"
#include "ubi.h"
/* Maximum number of supported devices */
#define UBIBLOCK_MAX_DEVICES 32
/* Maximum length of the 'block=' parameter */
#define UBIBLOCK_PARAM_LEN 63
/* Maximum number of comma-separated items in the 'block=' parameter */
#define UBIBLOCK_PARAM_COUNT 2
struct ubiblock_param {
int ubi_num;
int vol_id;
char name[UBIBLOCK_PARAM_LEN+1];
};
/* Numbers of elements set in the @ubiblock_param array */
static int ubiblock_devs __initdata;
/* MTD devices specification parameters */
static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
struct ubiblock {
struct ubi_volume_desc *desc;
int ubi_num;
int vol_id;
int refcnt;
int leb_size;
struct gendisk *gd;
struct request_queue *rq;
struct workqueue_struct *wq;
struct work_struct work;
struct mutex dev_mutex;
spinlock_t queue_lock;
struct list_head list;
};
/* Linked list of all ubiblock instances */
static LIST_HEAD(ubiblock_devices);
static DEFINE_MUTEX(devices_mutex);
static int ubiblock_major;
static int __init ubiblock_set_param(const char *val,
const struct kernel_param *kp)
{
int i, ret;
size_t len;
struct ubiblock_param *param;
char buf[UBIBLOCK_PARAM_LEN];
char *pbuf = &buf[0];
char *tokens[UBIBLOCK_PARAM_COUNT];
if (!val)
return -EINVAL;
len = strnlen(val, UBIBLOCK_PARAM_LEN);
if (len == 0) {
ubi_warn("block: empty 'block=' parameter - ignored\n");
return 0;
}
if (len == UBIBLOCK_PARAM_LEN) {
ubi_err("block: parameter \"%s\" is too long, max. is %d\n",
val, UBIBLOCK_PARAM_LEN);
return -EINVAL;
}
strcpy(buf, val);
/* Get rid of the final newline */
if (buf[len - 1] == '\n')
buf[len - 1] = '\0';
for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
tokens[i] = strsep(&pbuf, ",");
param = &ubiblock_param[ubiblock_devs];
if (tokens[1]) {
/* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
ret = kstrtoint(tokens[0], 10, &param->ubi_num);
if (ret < 0)
return -EINVAL;
/* Second param can be a number or a name */
ret = kstrtoint(tokens[1], 10, &param->vol_id);
if (ret < 0) {
param->vol_id = -1;
strcpy(param->name, tokens[1]);
}
} else {
/* One parameter: must be device path */
strcpy(param->name, tokens[0]);
param->ubi_num = -1;
param->vol_id = -1;
}
ubiblock_devs++;
return 0;
}
static struct kernel_param_ops ubiblock_param_ops = {
.set = ubiblock_set_param,
};
module_param_cb(block, &ubiblock_param_ops, NULL, 0);
MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
"Multiple \"block\" parameters may be specified.\n"
"UBI volumes may be specified by their number, name, or path to the device node.\n"
"Examples\n"
"Using the UBI volume path:\n"
"ubi.block=/dev/ubi0_0\n"
"Using the UBI device, and the volume name:\n"
"ubi.block=0,rootfs\n"
"Using both UBI device number and UBI volume number:\n"
"ubi.block=0,0\n");
static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
{
struct ubiblock *dev;
list_for_each_entry(dev, &ubiblock_devices, list)
if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
return dev;
return NULL;
}
static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
int leb, int offset, int len)
{
int ret;
ret = ubi_read(dev->desc, leb, buffer, offset, len);
if (ret) {
ubi_err("%s ubi_read error %d",
dev->gd->disk_name, ret);
return ret;
}
return 0;
}
static int ubiblock_read(struct ubiblock *dev, char *buffer,
sector_t sec, int len)
{
int ret, leb, offset;
int bytes_left = len;
int to_read = len;
u64 pos = sec << 9;
/* Get LEB:offset address to read from */
offset = do_div(pos, dev->leb_size);
leb = pos;
while (bytes_left) {
/*
* We can only read one LEB at a time. Therefore if the read
* length is larger than one LEB size, we split the operation.
*/
if (offset + to_read > dev->leb_size)
to_read = dev->leb_size - offset;
ret = ubiblock_read_to_buf(dev, buffer, leb, offset, to_read);
if (ret)
return ret;
buffer += to_read;
bytes_left -= to_read;
to_read = bytes_left;
leb += 1;
offset = 0;
}
return 0;
}
static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
{
int len, ret;
sector_t sec;
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
get_capacity(req->rq_disk))
return -EIO;
if (rq_data_dir(req) != READ)
return -ENOSYS; /* Write not implemented */
sec = blk_rq_pos(req);
len = blk_rq_cur_bytes(req);
/*
* Let's prevent the device from being removed while we're doing I/O
* work. Notice that this means we serialize all the I/O operations,
* but it's probably of no impact given the NAND core serializes
* flash access anyway.
*/
mutex_lock(&dev->dev_mutex);
ret = ubiblock_read(dev, req->buffer, sec, len);
mutex_unlock(&dev->dev_mutex);
return ret;
}
static void ubiblock_do_work(struct work_struct *work)
{
struct ubiblock *dev =
container_of(work, struct ubiblock, work);
struct request_queue *rq = dev->rq;
struct request *req;
int res;
spin_lock_irq(rq->queue_lock);
req = blk_fetch_request(rq);
while (req) {
spin_unlock_irq(rq->queue_lock);
res = do_ubiblock_request(dev, req);
spin_lock_irq(rq->queue_lock);
/*
* If we're done with this request,
* we need to fetch a new one
*/
if (!__blk_end_request_cur(req, res))
req = blk_fetch_request(rq);
}
spin_unlock_irq(rq->queue_lock);
}
static void ubiblock_request(struct request_queue *rq)
{
struct ubiblock *dev;
struct request *req;
dev = rq->queuedata;
if (!dev)
while ((req = blk_fetch_request(rq)) != NULL)
__blk_end_request_all(req, -ENODEV);
else
queue_work(dev->wq, &dev->work);
}
static int ubiblock_open(struct block_device *bdev, fmode_t mode)
{
struct ubiblock *dev = bdev->bd_disk->private_data;
int ret;
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
/*
* The volume is already open, just increase the reference
* counter.
*/
goto out_done;
}
/*
* We want users to be aware they should only mount us as read-only.
* It's just a paranoid check, as write requests will get rejected
* in any case.
*/
if (mode & FMODE_WRITE) {
ret = -EPERM;
goto out_unlock;
}
dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
if (IS_ERR(dev->desc)) {
ubi_err("%s failed to open ubi volume %d_%d",
dev->gd->disk_name, dev->ubi_num, dev->vol_id);
ret = PTR_ERR(dev->desc);
dev->desc = NULL;
goto out_unlock;
}
out_done:
dev->refcnt++;
mutex_unlock(&dev->dev_mutex);
return 0;
out_unlock:
mutex_unlock(&dev->dev_mutex);
return ret;
}
static void ubiblock_release(struct gendisk *gd, fmode_t mode)
{
struct ubiblock *dev = gd->private_data;
mutex_lock(&dev->dev_mutex);
dev->refcnt--;
if (dev->refcnt == 0) {
ubi_close_volume(dev->desc);
dev->desc = NULL;
}
mutex_unlock(&dev->dev_mutex);
}
static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
/* Some tools might require this information */
geo->heads = 1;
geo->cylinders = 1;
geo->sectors = get_capacity(bdev->bd_disk);
geo->start = 0;
return 0;
}
static const struct block_device_operations ubiblock_ops = {
.owner = THIS_MODULE,
.open = ubiblock_open,
.release = ubiblock_release,
.getgeo = ubiblock_getgeo,
};
int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
struct gendisk *gd;
int disk_capacity;
int ret;
/* Check that the volume isn't already handled */
mutex_lock(&devices_mutex);
if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
mutex_unlock(&devices_mutex);
return -EEXIST;
}
mutex_unlock(&devices_mutex);
dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
if (!dev)
return -ENOMEM;
mutex_init(&dev->dev_mutex);
dev->ubi_num = vi->ubi_num;
dev->vol_id = vi->vol_id;
dev->leb_size = vi->usable_leb_size;
/* Initialize the gendisk of this ubiblock device */
gd = alloc_disk(1);
if (!gd) {
ubi_err("block: alloc_disk failed");
ret = -ENODEV;
goto out_free_dev;
}
gd->fops = &ubiblock_ops;
gd->major = ubiblock_major;
gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id;
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
set_capacity(gd, disk_capacity);
dev->gd = gd;
spin_lock_init(&dev->queue_lock);
dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
if (!dev->rq) {
ubi_err("block: blk_init_queue failed");
ret = -ENODEV;
goto out_put_disk;
}
dev->rq->queuedata = dev;
dev->gd->queue = dev->rq;
/*
* Create one workqueue per volume (per registered block device).
* Rembember workqueues are cheap, they're not threads.
*/
dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
if (!dev->wq)
goto out_free_queue;
INIT_WORK(&dev->work, ubiblock_do_work);
mutex_lock(&devices_mutex);
list_add_tail(&dev->list, &ubiblock_devices);
mutex_unlock(&devices_mutex);
/* Must be the last step: anyone can call file ops from now on */
add_disk(dev->gd);
ubi_msg("%s created from ubi%d:%d(%s)",
dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name);
return 0;
out_free_queue:
blk_cleanup_queue(dev->rq);
out_put_disk:
put_disk(dev->gd);
out_free_dev:
kfree(dev);
return ret;
}
static void ubiblock_cleanup(struct ubiblock *dev)
{
del_gendisk(dev->gd);
blk_cleanup_queue(dev->rq);
ubi_msg("%s released", dev->gd->disk_name);
put_disk(dev->gd);
}
int ubiblock_remove(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
mutex_unlock(&devices_mutex);
return -ENODEV;
}
/* Found a device, let's lock it so we can check if it's busy */
mutex_lock(&dev->dev_mutex);
if (dev->refcnt > 0) {
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
return -EBUSY;
}
/* Remove from device list */
list_del(&dev->list);
mutex_unlock(&devices_mutex);
/* Flush pending work and stop this workqueue */
destroy_workqueue(dev->wq);
ubiblock_cleanup(dev);
mutex_unlock(&dev->dev_mutex);
kfree(dev);
return 0;
}
static void ubiblock_resize(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
int disk_capacity;
/*
* Need to lock the device list until we stop using the device,
* otherwise the device struct might get released in
* 'ubiblock_remove()'.
*/
mutex_lock(&devices_mutex);
dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
if (!dev) {
mutex_unlock(&devices_mutex);
return;
}
mutex_lock(&dev->dev_mutex);
disk_capacity = (vi->size * vi->usable_leb_size) >> 9;
set_capacity(dev->gd, disk_capacity);
ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
}
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
struct ubi_notification *nt = ns_ptr;
switch (notification_type) {
case UBI_VOLUME_ADDED:
/*
* We want to enforce explicit block device creation for
* volumes, so when a volume is added we do nothing.
*/
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
break;
case UBI_VOLUME_RESIZED:
ubiblock_resize(&nt->vi);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
static struct ubi_volume_desc * __init
open_volume_desc(const char *name, int ubi_num, int vol_id)
{
if (ubi_num == -1)
/* No ubi num, name must be a vol device path */
return ubi_open_volume_path(name, UBI_READONLY);
else if (vol_id == -1)
/* No vol_id, must be vol_name */
return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
else
return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
}
static int __init ubiblock_create_from_param(void)
{
int i, ret;
struct ubiblock_param *p;
struct ubi_volume_desc *desc;
struct ubi_volume_info vi;
for (i = 0; i < ubiblock_devs; i++) {
p = &ubiblock_param[i];
desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
if (IS_ERR(desc)) {
ubi_err("block: can't open volume, err=%ld\n",
PTR_ERR(desc));
ret = PTR_ERR(desc);
break;
}
ubi_get_volume_info(desc, &vi);
ubi_close_volume(desc);
ret = ubiblock_create(&vi);
if (ret) {
ubi_err("block: can't add '%s' volume, err=%d\n",
vi.name, ret);
break;
}
}
return ret;
}
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
struct ubiblock *dev;
list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
/* Flush pending work and stop workqueue */
destroy_workqueue(dev->wq);
/* The module is being forcefully removed */
WARN_ON(dev->desc);
/* Remove from device list */
list_del(&dev->list);
ubiblock_cleanup(dev);
kfree(dev);
}
}
int __init ubiblock_init(void)
{
int ret;
ubiblock_major = register_blkdev(0, "ubiblock");
if (ubiblock_major < 0)
return ubiblock_major;
/* Attach block devices from 'block=' module param */
ret = ubiblock_create_from_param();
if (ret)
goto err_remove;
/*
* Block devices are only created upon user requests, so we ignore
* existing volumes.
*/
ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
if (ret)
goto err_unreg;
return 0;
err_unreg:
unregister_blkdev(ubiblock_major, "ubiblock");
err_remove:
ubiblock_remove_all();
return ret;
}
void __exit ubiblock_exit(void)
{
ubi_unregister_volume_notifier(&ubiblock_notifier);
ubiblock_remove_all();
unregister_blkdev(ubiblock_major, "ubiblock");
}
......@@ -1298,6 +1298,15 @@ static int __init ubi_init(void)
}
}
err = ubiblock_init();
if (err) {
ubi_err("block: cannot initialize, error %d", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
goto out_detach;
}
return 0;
out_detach:
......@@ -1326,6 +1335,8 @@ static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
mutex_lock(&ubi_devices_mutex);
......
......@@ -561,6 +561,26 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
}
/* Create a R/O block device on top of the UBI volume */
case UBI_IOCVOLCRBLK:
{
struct ubi_volume_info vi;
ubi_get_volume_info(desc, &vi);
err = ubiblock_create(&vi);
break;
}
/* Remove the R/O block device */
case UBI_IOCVOLRMBLK:
{
struct ubi_volume_info vi;
ubi_get_volume_info(desc, &vi);
err = ubiblock_remove(&vi);
break;
}
default:
err = -ENOTTY;
break;
......
......@@ -864,6 +864,26 @@ int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
int fm_anchor);
/* block.c */
#ifdef CONFIG_MTD_UBI_BLOCK
int ubiblock_init(void);
void ubiblock_exit(void);
int ubiblock_create(struct ubi_volume_info *vi);
int ubiblock_remove(struct ubi_volume_info *vi);
#else
static inline int ubiblock_init(void) { return 0; }
static inline void ubiblock_exit(void) {}
static inline int ubiblock_create(struct ubi_volume_info *vi)
{
return -ENOSYS;
}
static inline int ubiblock_remove(struct ubi_volume_info *vi)
{
return -ENOSYS;
}
#endif
/*
* ubi_rb_for_each_entry - walk an RB-tree.
* @rb: a pointer to type 'struct rb_node' to use as a loop counter
......
......@@ -134,6 +134,16 @@
* used. A pointer to a &struct ubi_set_vol_prop_req object is expected to be
* passed. The object describes which property should be set, and to which value
* it should be set.
*
* Block devices on UBI volumes
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* To create a R/O block device on top of an UBI volume the %UBI_IOCVOLCRBLK
* should be used. A pointer to a &struct ubi_blkcreate_req object is expected
* to be passed, which is not used and reserved for future usage.
*
* Conversely, to remove a block device the %UBI_IOCVOLRMBLK should be used,
* which takes no arguments.
*/
/*
......@@ -191,6 +201,10 @@
/* Set an UBI volume property */
#define UBI_IOCSETVOLPROP _IOW(UBI_VOL_IOC_MAGIC, 6, \
struct ubi_set_vol_prop_req)
/* Create a R/O block device on top of an UBI volume */
#define UBI_IOCVOLCRBLK _IOW(UBI_VOL_IOC_MAGIC, 7, struct ubi_blkcreate_req)
/* Remove the R/O block device */
#define UBI_IOCVOLRMBLK _IO(UBI_VOL_IOC_MAGIC, 8)
/* Maximum MTD device name length supported by UBI */
#define MAX_UBI_MTD_NAME_LEN 127
......@@ -420,4 +434,12 @@ struct ubi_set_vol_prop_req {
__u64 value;
} __packed;
/**
* struct ubi_blkcreate_req - a data structure used in block creation requests.
* @padding: reserved for future, not used, has to be zeroed
*/
struct ubi_blkcreate_req {
__s8 padding[128];
} __packed;
#endif /* __UBI_USER_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment