Commit 232dd599 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'zonefs-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs

Pull zonefs updates from Damien Le Moal:

 - Reorganize zonefs code to split file related operations to a new
   fs/zonefs/file.c file (me)

 - Modify zonefs to use dynamically allocated inodes and dentries (using
   the inode and dentry caches) instead of statically allocating
   everything on mount. This saves a significant amount of memory for
   very large zoned block devices with 10s of thousands of zones (me)

 - Make zonefs_sb_ktype a const struct kobj_type (Thomas)

* tag 'zonefs-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs:
  zonefs: make kobj_type structure constant
  zonefs: Cache zone group directory inodes
  zonefs: Dynamically create file inodes when needed
  zonefs: Separate zone information from inode information
  zonefs: Reduce struct zonefs_inode_info size
  zonefs: Simplify IO error handling
  zonefs: Reorganize code
parents b7ee8812 2b188a2c
...@@ -3,4 +3,4 @@ ccflags-y += -I$(src) ...@@ -3,4 +3,4 @@ ccflags-y += -I$(src)
obj-$(CONFIG_ZONEFS_FS) += zonefs.o obj-$(CONFIG_ZONEFS_FS) += zonefs.o
zonefs-y := super.o sysfs.o zonefs-y := super.o file.o sysfs.o
// SPDX-License-Identifier: GPL-2.0
/*
* Simple file system for zoned block devices exposing zones as files.
*
* Copyright (C) 2022 Western Digital Corporation or its affiliates.
*/
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/iomap.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/statfs.h>
#include <linux/writeback.h>
#include <linux/quotaops.h>
#include <linux/seq_file.h>
#include <linux/parser.h>
#include <linux/uio.h>
#include <linux/mman.h>
#include <linux/sched/mm.h>
#include <linux/task_io_accounting_ops.h>
#include "zonefs.h"
#include "trace.h"
static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
/*
* All blocks are always mapped below EOF. If reading past EOF,
* act as if there is a hole up to the file maximum size.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
isize = i_size_read(inode);
if (iomap->offset >= isize) {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
iomap->length = length;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
iomap->length = isize - iomap->offset;
}
mutex_unlock(&zi->i_truncate_mutex);
trace_zonefs_iomap_begin(inode, iomap);
return 0;
}
static const struct iomap_ops zonefs_read_iomap_ops = {
.iomap_begin = zonefs_read_iomap_begin,
};
static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
/* All write I/Os should always be within the file maximum size */
if (WARN_ON_ONCE(offset + length > z->z_capacity))
return -EIO;
/*
* Sequential zones can only accept direct writes. This is already
* checked when writes are issued, so warn if we see a page writeback
* operation.
*/
if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT)))
return -EIO;
/*
* For conventional zones, all blocks are always mapped. For sequential
* zones, all blocks after always mapped below the inode size (zone
* write pointer) and unwriten beyond.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
isize = i_size_read(inode);
if (iomap->offset >= isize) {
iomap->type = IOMAP_UNWRITTEN;
iomap->length = z->z_capacity - iomap->offset;
} else {
iomap->type = IOMAP_MAPPED;
iomap->length = isize - iomap->offset;
}
mutex_unlock(&zi->i_truncate_mutex);
trace_zonefs_iomap_begin(inode, iomap);
return 0;
}
static const struct iomap_ops zonefs_write_iomap_ops = {
.iomap_begin = zonefs_write_iomap_begin,
};
static int zonefs_read_folio(struct file *unused, struct folio *folio)
{
return iomap_read_folio(folio, &zonefs_read_iomap_ops);
}
static void zonefs_readahead(struct readahead_control *rac)
{
iomap_readahead(rac, &zonefs_read_iomap_ops);
}
/*
* Map blocks for page writeback. This is used only on conventional zone files,
* which implies that the page range can only be within the fixed inode size.
*/
static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
struct inode *inode, loff_t offset)
{
struct zonefs_zone *z = zonefs_inode_zone(inode);
if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
return -EIO;
if (WARN_ON_ONCE(offset >= i_size_read(inode)))
return -EIO;
/* If the mapping is already OK, nothing needs to be done */
if (offset >= wpc->iomap.offset &&
offset < wpc->iomap.offset + wpc->iomap.length)
return 0;
return zonefs_write_iomap_begin(inode, offset,
z->z_capacity - offset,
IOMAP_WRITE, &wpc->iomap, NULL);
}
static const struct iomap_writeback_ops zonefs_writeback_ops = {
.map_blocks = zonefs_write_map_blocks,
};
static int zonefs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct iomap_writepage_ctx wpc = { };
return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
}
static int zonefs_swap_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *span)
{
struct inode *inode = file_inode(swap_file);
if (zonefs_inode_is_seq(inode)) {
zonefs_err(inode->i_sb,
"swap file: not a conventional zone file\n");
return -EINVAL;
}
return iomap_swapfile_activate(sis, swap_file, span,
&zonefs_read_iomap_ops);
}
const struct address_space_operations zonefs_file_aops = {
.read_folio = zonefs_read_folio,
.readahead = zonefs_readahead,
.writepages = zonefs_writepages,
.dirty_folio = filemap_dirty_folio,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
.direct_IO = noop_direct_IO,
.swap_activate = zonefs_swap_activate,
};
int zonefs_file_truncate(struct inode *inode, loff_t isize)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
loff_t old_isize;
enum req_op op;
int ret = 0;
/*
* Only sequential zone files can be truncated and truncation is allowed
* only down to a 0 size, which is equivalent to a zone reset, and to
* the maximum file size, which is equivalent to a zone finish.
*/
if (!zonefs_zone_is_seq(z))
return -EPERM;
if (!isize)
op = REQ_OP_ZONE_RESET;
else if (isize == z->z_capacity)
op = REQ_OP_ZONE_FINISH;
else
return -EPERM;
inode_dio_wait(inode);
/* Serialize against page faults */
filemap_invalidate_lock(inode->i_mapping);
/* Serialize against zonefs_iomap_begin() */
mutex_lock(&zi->i_truncate_mutex);
old_isize = i_size_read(inode);
if (isize == old_isize)
goto unlock;
ret = zonefs_inode_zone_mgmt(inode, op);
if (ret)
goto unlock;
/*
* If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
* take care of open zones.
*/
if (z->z_flags & ZONEFS_ZONE_OPEN) {
/*
* Truncating a zone to EMPTY or FULL is the equivalent of
* closing the zone. For a truncation to 0, we need to
* re-open the zone to ensure new writes can be processed.
* For a truncation to the maximum file size, the zone is
* closed and writes cannot be accepted anymore, so clear
* the open flag.
*/
if (!isize)
ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
else
z->z_flags &= ~ZONEFS_ZONE_OPEN;
}
zonefs_update_stats(inode, isize);
truncate_setsize(inode, isize);
z->z_wpoffset = isize;
zonefs_inode_account_active(inode);
unlock:
mutex_unlock(&zi->i_truncate_mutex);
filemap_invalidate_unlock(inode->i_mapping);
return ret;
}
static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file_inode(file);
int ret = 0;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
/*
* Since only direct writes are allowed in sequential files, page cache
* flush is needed only for conventional zone files.
*/
if (zonefs_inode_is_cnv(inode))
ret = file_write_and_wait_range(file, start, end);
if (!ret)
ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (ret)
zonefs_io_error(inode, true);
return ret;
}
static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
vm_fault_t ret;
if (unlikely(IS_IMMUTABLE(inode)))
return VM_FAULT_SIGBUS;
/*
* Sanity check: only conventional zone files can have shared
* writeable mappings.
*/
if (zonefs_inode_is_seq(inode))
return VM_FAULT_NOPAGE;
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
/* Serialize against truncates */
filemap_invalidate_lock_shared(inode->i_mapping);
ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
filemap_invalidate_unlock_shared(inode->i_mapping);
sb_end_pagefault(inode->i_sb);
return ret;
}
static const struct vm_operations_struct zonefs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = zonefs_filemap_page_mkwrite,
};
static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
/*
* Conventional zones accept random writes, so their files can support
* shared writable mappings. For sequential zone files, only read
* mappings are possible since there are no guarantees for write
* ordering between msync() and page cache writeback.
*/
if (zonefs_inode_is_seq(file_inode(file)) &&
(vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
return -EINVAL;
file_accessed(file);
vma->vm_ops = &zonefs_file_vm_ops;
return 0;
}
static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
{
loff_t isize = i_size_read(file_inode(file));
/*
* Seeks are limited to below the zone size for conventional zones
* and below the zone write pointer for sequential zones. In both
* cases, this limit is the inode size.
*/
return generic_file_llseek_size(file, offset, whence, isize, isize);
}
static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
int error, unsigned int flags)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (error) {
zonefs_io_error(inode, true);
return error;
}
if (size && zonefs_inode_is_seq(inode)) {
/*
* Note that we may be seeing completions out of order,
* but that is not a problem since a write completed
* successfully necessarily means that all preceding writes
* were also successful. So we can safely increase the inode
* size to the write end location.
*/
mutex_lock(&zi->i_truncate_mutex);
if (i_size_read(inode) < iocb->ki_pos + size) {
zonefs_update_stats(inode, iocb->ki_pos + size);
zonefs_i_size_write(inode, iocb->ki_pos + size);
}
mutex_unlock(&zi->i_truncate_mutex);
}
return 0;
}
static const struct iomap_dio_ops zonefs_write_dio_ops = {
.end_io = zonefs_file_write_dio_end_io,
};
static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct block_device *bdev = inode->i_sb->s_bdev;
unsigned int max = bdev_max_zone_append_sectors(bdev);
struct bio *bio;
ssize_t size;
int nr_pages;
ssize_t ret;
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
iov_iter_truncate(from, max);
nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
if (!nr_pages)
return 0;
bio = bio_alloc(bdev, nr_pages,
REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
bio->bi_iter.bi_sector = z->z_sector;
bio->bi_ioprio = iocb->ki_ioprio;
if (iocb_is_dsync(iocb))
bio->bi_opf |= REQ_FUA;
ret = bio_iov_iter_get_pages(bio, from);
if (unlikely(ret))
goto out_release;
size = bio->bi_iter.bi_size;
task_io_account_write(size);
if (iocb->ki_flags & IOCB_HIPRI)
bio_set_polled(bio, iocb);
ret = submit_bio_wait(bio);
/*
* If the file zone was written underneath the file system, the zone
* write pointer may not be where we expect it to be, but the zone
* append write can still succeed. So check manually that we wrote where
* we intended to, that is, at zi->i_wpoffset.
*/
if (!ret) {
sector_t wpsector =
z->z_sector + (z->z_wpoffset >> SECTOR_SHIFT);
if (bio->bi_iter.bi_sector != wpsector) {
zonefs_warn(inode->i_sb,
"Corrupted write pointer %llu for zone at %llu\n",
wpsector, z->z_sector);
ret = -EIO;
}
}
zonefs_file_write_dio_end_io(iocb, size, ret, 0);
trace_zonefs_file_dio_append(inode, size, ret);
out_release:
bio_release_pages(bio, false);
bio_put(bio);
if (ret >= 0) {
iocb->ki_pos += size;
return size;
}
return ret;
}
/*
* Do not exceed the LFS limits nor the file zone size. If pos is under the
* limit it becomes a short access. If it exceeds the limit, return -EFBIG.
*/
static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
loff_t count)
{
struct inode *inode = file_inode(file);
struct zonefs_zone *z = zonefs_inode_zone(inode);
loff_t limit = rlimit(RLIMIT_FSIZE);
loff_t max_size = z->z_capacity;
if (limit != RLIM_INFINITY) {
if (pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
count = min(count, limit - pos);
}
if (!(file->f_flags & O_LARGEFILE))
max_size = min_t(loff_t, MAX_NON_LFS, max_size);
if (unlikely(pos >= max_size))
return -EFBIG;
return min(count, max_size - pos);
}
static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
loff_t count;
if (IS_SWAPFILE(inode))
return -ETXTBSY;
if (!iov_iter_count(from))
return 0;
if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
return -EINVAL;
if (iocb->ki_flags & IOCB_APPEND) {
if (zonefs_zone_is_cnv(z))
return -EINVAL;
mutex_lock(&zi->i_truncate_mutex);
iocb->ki_pos = z->z_wpoffset;
mutex_unlock(&zi->i_truncate_mutex);
}
count = zonefs_write_check_limits(file, iocb->ki_pos,
iov_iter_count(from));
if (count < 0)
return count;
iov_iter_truncate(from, count);
return iov_iter_count(from);
}
/*
* Handle direct writes. For sequential zone files, this is the only possible
* write path. For these files, check that the user is issuing writes
* sequentially from the end of the file. This code assumes that the block layer
* delivers write requests to the device in sequential order. This is always the
* case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
* elevator feature is being used (e.g. mq-deadline). The block layer always
* automatically select such an elevator for zoned block devices during the
* device initialization.
*/
static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
bool sync = is_sync_kiocb(iocb);
bool append = false;
ssize_t ret, count;
/*
* For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
* as this can cause write reordering (e.g. the first aio gets EAGAIN
* on the inode lock but the second goes through but is now unaligned).
*/
if (zonefs_zone_is_seq(z) && !sync && (iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
return -EAGAIN;
} else {
inode_lock(inode);
}
count = zonefs_write_checks(iocb, from);
if (count <= 0) {
ret = count;
goto inode_unlock;
}
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL;
goto inode_unlock;
}
/* Enforce sequential writes (append only) in sequential zones */
if (zonefs_zone_is_seq(z)) {
mutex_lock(&zi->i_truncate_mutex);
if (iocb->ki_pos != z->z_wpoffset) {
mutex_unlock(&zi->i_truncate_mutex);
ret = -EINVAL;
goto inode_unlock;
}
mutex_unlock(&zi->i_truncate_mutex);
append = sync;
}
if (append)
ret = zonefs_file_dio_append(iocb, from);
else
ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
&zonefs_write_dio_ops, 0, NULL, 0);
if (zonefs_zone_is_seq(z) &&
(ret > 0 || ret == -EIOCBQUEUED)) {
if (ret > 0)
count = ret;
/*
* Update the zone write pointer offset assuming the write
* operation succeeded. If it did not, the error recovery path
* will correct it. Also do active seq file accounting.
*/
mutex_lock(&zi->i_truncate_mutex);
z->z_wpoffset += count;
zonefs_inode_account_active(inode);
mutex_unlock(&zi->i_truncate_mutex);
}
inode_unlock:
inode_unlock(inode);
return ret;
}
static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
ssize_t ret;
/*
* Direct IO writes are mandatory for sequential zone files so that the
* write IO issuing order is preserved.
*/
if (zonefs_inode_is_seq(inode))
return -EIO;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
return -EAGAIN;
} else {
inode_lock(inode);
}
ret = zonefs_write_checks(iocb, from);
if (ret <= 0)
goto inode_unlock;
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
if (ret > 0)
iocb->ki_pos += ret;
else if (ret == -EIO)
zonefs_io_error(inode, true);
inode_unlock:
inode_unlock(inode);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
return ret;
}
static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_zone *z = zonefs_inode_zone(inode);
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
if (sb_rdonly(inode->i_sb))
return -EROFS;
/* Write operations beyond the zone capacity are not allowed */
if (iocb->ki_pos >= z->z_capacity)
return -EFBIG;
if (iocb->ki_flags & IOCB_DIRECT) {
ssize_t ret = zonefs_file_dio_write(iocb, from);
if (ret != -ENOTBLK)
return ret;
}
return zonefs_file_buffered_write(iocb, from);
}
static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
int error, unsigned int flags)
{
if (error) {
zonefs_io_error(file_inode(iocb->ki_filp), false);
return error;
}
return 0;
}
static const struct iomap_dio_ops zonefs_read_dio_ops = {
.end_io = zonefs_file_read_dio_end_io,
};
static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
ssize_t ret;
/* Offline zones cannot be read */
if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
return -EPERM;
if (iocb->ki_pos >= z->z_capacity)
return 0;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock_shared(inode))
return -EAGAIN;
} else {
inode_lock_shared(inode);
}
/* Limit read operations to written data */
mutex_lock(&zi->i_truncate_mutex);
isize = i_size_read(inode);
if (iocb->ki_pos >= isize) {
mutex_unlock(&zi->i_truncate_mutex);
ret = 0;
goto inode_unlock;
}
iov_iter_truncate(to, isize - iocb->ki_pos);
mutex_unlock(&zi->i_truncate_mutex);
if (iocb->ki_flags & IOCB_DIRECT) {
size_t count = iov_iter_count(to);
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL;
goto inode_unlock;
}
file_accessed(iocb->ki_filp);
ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
&zonefs_read_dio_ops, 0, NULL, 0);
} else {
ret = generic_file_read_iter(iocb, to);
if (ret == -EIO)
zonefs_io_error(inode, false);
}
inode_unlock:
inode_unlock_shared(inode);
return ret;
}
/*
* Write open accounting is done only for sequential files.
*/
static inline bool zonefs_seq_file_need_wro(struct inode *inode,
struct file *file)
{
if (zonefs_inode_is_cnv(inode))
return false;
if (!(file->f_mode & FMODE_WRITE))
return false;
return true;
}
static int zonefs_seq_file_write_open(struct inode *inode)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
int ret = 0;
mutex_lock(&zi->i_truncate_mutex);
if (!zi->i_wr_refcnt) {
struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
if (sbi->s_max_wro_seq_files
&& wro > sbi->s_max_wro_seq_files) {
atomic_dec(&sbi->s_wro_seq_files);
ret = -EBUSY;
goto unlock;
}
if (i_size_read(inode) < z->z_capacity) {
ret = zonefs_inode_zone_mgmt(inode,
REQ_OP_ZONE_OPEN);
if (ret) {
atomic_dec(&sbi->s_wro_seq_files);
goto unlock;
}
z->z_flags |= ZONEFS_ZONE_OPEN;
zonefs_inode_account_active(inode);
}
}
}
zi->i_wr_refcnt++;
unlock:
mutex_unlock(&zi->i_truncate_mutex);
return ret;
}
static int zonefs_file_open(struct inode *inode, struct file *file)
{
int ret;
ret = generic_file_open(inode, file);
if (ret)
return ret;
if (zonefs_seq_file_need_wro(inode, file))
return zonefs_seq_file_write_open(inode);
return 0;
}
static void zonefs_seq_file_write_close(struct inode *inode)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
int ret = 0;
mutex_lock(&zi->i_truncate_mutex);
zi->i_wr_refcnt--;
if (zi->i_wr_refcnt)
goto unlock;
/*
* The file zone may not be open anymore (e.g. the file was truncated to
* its maximum size or it was fully written). For this case, we only
* need to decrement the write open count.
*/
if (z->z_flags & ZONEFS_ZONE_OPEN) {
ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
if (ret) {
__zonefs_io_error(inode, false);
/*
* Leaving zones explicitly open may lead to a state
* where most zones cannot be written (zone resources
* exhausted). So take preventive action by remounting
* read-only.
*/
if (z->z_flags & ZONEFS_ZONE_OPEN &&
!(sb->s_flags & SB_RDONLY)) {
zonefs_warn(sb,
"closing zone at %llu failed %d\n",
z->z_sector, ret);
zonefs_warn(sb,
"remounting filesystem read-only\n");
sb->s_flags |= SB_RDONLY;
}
goto unlock;
}
z->z_flags &= ~ZONEFS_ZONE_OPEN;
zonefs_inode_account_active(inode);
}
atomic_dec(&sbi->s_wro_seq_files);
unlock:
mutex_unlock(&zi->i_truncate_mutex);
}
static int zonefs_file_release(struct inode *inode, struct file *file)
{
/*
* If we explicitly open a zone we must close it again as well, but the
* zone management operation can fail (either due to an IO error or as
* the zone has gone offline or read-only). Make sure we don't fail the
* close(2) for user-space.
*/
if (zonefs_seq_file_need_wro(inode, file))
zonefs_seq_file_write_close(inode);
return 0;
}
const struct file_operations zonefs_file_operations = {
.open = zonefs_file_open,
.release = zonefs_file_release,
.fsync = zonefs_file_fsync,
.mmap = zonefs_file_mmap,
.llseek = zonefs_file_llseek,
.read_iter = zonefs_file_read_iter,
.write_iter = zonefs_file_write_iter,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iocb_bio_iopoll,
};
...@@ -28,33 +28,47 @@ ...@@ -28,33 +28,47 @@
#include "trace.h" #include "trace.h"
/* /*
* Manage the active zone count. Called with zi->i_truncate_mutex held. * Get the name of a zone group directory.
*/ */
static void zonefs_account_active(struct inode *inode) static const char *zonefs_zgroup_name(enum zonefs_ztype ztype)
{ {
struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb); switch (ztype) {
struct zonefs_inode_info *zi = ZONEFS_I(inode); case ZONEFS_ZTYPE_CNV:
return "cnv";
case ZONEFS_ZTYPE_SEQ:
return "seq";
default:
WARN_ON_ONCE(1);
return "???";
}
}
lockdep_assert_held(&zi->i_truncate_mutex); /*
* Manage the active zone count.
*/
static void zonefs_account_active(struct super_block *sb,
struct zonefs_zone *z)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) if (zonefs_zone_is_cnv(z))
return; return;
/* /*
* For zones that transitioned to the offline or readonly condition, * For zones that transitioned to the offline or readonly condition,
* we only need to clear the active state. * we only need to clear the active state.
*/ */
if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY)) if (z->z_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
goto out; goto out;
/* /*
* If the zone is active, that is, if it is explicitly open or * If the zone is active, that is, if it is explicitly open or
* partially written, check if it was already accounted as active. * partially written, check if it was already accounted as active.
*/ */
if ((zi->i_flags & ZONEFS_ZONE_OPEN) || if ((z->z_flags & ZONEFS_ZONE_OPEN) ||
(zi->i_wpoffset > 0 && zi->i_wpoffset < zi->i_max_size)) { (z->z_wpoffset > 0 && z->z_wpoffset < z->z_capacity)) {
if (!(zi->i_flags & ZONEFS_ZONE_ACTIVE)) { if (!(z->z_flags & ZONEFS_ZONE_ACTIVE)) {
zi->i_flags |= ZONEFS_ZONE_ACTIVE; z->z_flags |= ZONEFS_ZONE_ACTIVE;
atomic_inc(&sbi->s_active_seq_files); atomic_inc(&sbi->s_active_seq_files);
} }
return; return;
...@@ -62,18 +76,29 @@ static void zonefs_account_active(struct inode *inode) ...@@ -62,18 +76,29 @@ static void zonefs_account_active(struct inode *inode)
out: out:
/* The zone is not active. If it was, update the active count */ /* The zone is not active. If it was, update the active count */
if (zi->i_flags & ZONEFS_ZONE_ACTIVE) { if (z->z_flags & ZONEFS_ZONE_ACTIVE) {
zi->i_flags &= ~ZONEFS_ZONE_ACTIVE; z->z_flags &= ~ZONEFS_ZONE_ACTIVE;
atomic_dec(&sbi->s_active_seq_files); atomic_dec(&sbi->s_active_seq_files);
} }
} }
static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op) /*
* Manage the active zone count. Called with zi->i_truncate_mutex held.
*/
void zonefs_inode_account_active(struct inode *inode)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
int ret;
return zonefs_account_active(inode->i_sb, zonefs_inode_zone(inode));
}
lockdep_assert_held(&zi->i_truncate_mutex); /*
* Execute a zone management operation.
*/
static int zonefs_zone_mgmt(struct super_block *sb,
struct zonefs_zone *z, enum req_op op)
{
int ret;
/* /*
* With ZNS drives, closing an explicitly open zone that has not been * With ZNS drives, closing an explicitly open zone that has not been
...@@ -83,201 +108,49 @@ static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op) ...@@ -83,201 +108,49 @@ static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op)
* are exceeded, make sure that the zone does not remain active by * are exceeded, make sure that the zone does not remain active by
* resetting it. * resetting it.
*/ */
if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset) if (op == REQ_OP_ZONE_CLOSE && !z->z_wpoffset)
op = REQ_OP_ZONE_RESET; op = REQ_OP_ZONE_RESET;
trace_zonefs_zone_mgmt(inode, op); trace_zonefs_zone_mgmt(sb, z, op);
ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector, ret = blkdev_zone_mgmt(sb->s_bdev, op, z->z_sector,
zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS); z->z_size >> SECTOR_SHIFT, GFP_NOFS);
if (ret) { if (ret) {
zonefs_err(inode->i_sb, zonefs_err(sb,
"Zone management operation %s at %llu failed %d\n", "Zone management operation %s at %llu failed %d\n",
blk_op_str(op), zi->i_zsector, ret); blk_op_str(op), z->z_sector, ret);
return ret; return ret;
} }
return 0; return 0;
} }
static inline void zonefs_i_size_write(struct inode *inode, loff_t isize) int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
i_size_write(inode, isize);
/*
* A full zone is no longer open/active and does not need
* explicit closing.
*/
if (isize >= zi->i_max_size) {
struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
if (zi->i_flags & ZONEFS_ZONE_ACTIVE)
atomic_dec(&sbi->s_active_seq_files);
zi->i_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
}
}
static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
struct super_block *sb = inode->i_sb;
loff_t isize;
/*
* All blocks are always mapped below EOF. If reading past EOF,
* act as if there is a hole up to the file maximum size.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
isize = i_size_read(inode);
if (iomap->offset >= isize) {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
iomap->length = length;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
iomap->length = isize - iomap->offset;
}
mutex_unlock(&zi->i_truncate_mutex);
trace_zonefs_iomap_begin(inode, iomap); return zonefs_zone_mgmt(inode->i_sb, zonefs_inode_zone(inode), op);
return 0;
} }
static const struct iomap_ops zonefs_read_iomap_ops = { void zonefs_i_size_write(struct inode *inode, loff_t isize)
.iomap_begin = zonefs_read_iomap_begin,
};
static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
loff_t length, unsigned int flags,
struct iomap *iomap, struct iomap *srcmap)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
loff_t isize;
/* All write I/Os should always be within the file maximum size */
if (WARN_ON_ONCE(offset + length > zi->i_max_size))
return -EIO;
/* i_size_write(inode, isize);
* Sequential zones can only accept direct writes. This is already
* checked when writes are issued, so warn if we see a page writeback
* operation.
*/
if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
!(flags & IOMAP_DIRECT)))
return -EIO;
/* /*
* For conventional zones, all blocks are always mapped. For sequential * A full zone is no longer open/active and does not need
* zones, all blocks after always mapped below the inode size (zone * explicit closing.
* write pointer) and unwriten beyond.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
isize = i_size_read(inode);
if (iomap->offset >= isize) {
iomap->type = IOMAP_UNWRITTEN;
iomap->length = zi->i_max_size - iomap->offset;
} else {
iomap->type = IOMAP_MAPPED;
iomap->length = isize - iomap->offset;
}
mutex_unlock(&zi->i_truncate_mutex);
trace_zonefs_iomap_begin(inode, iomap);
return 0;
}
static const struct iomap_ops zonefs_write_iomap_ops = {
.iomap_begin = zonefs_write_iomap_begin,
};
static int zonefs_read_folio(struct file *unused, struct folio *folio)
{
return iomap_read_folio(folio, &zonefs_read_iomap_ops);
}
static void zonefs_readahead(struct readahead_control *rac)
{
iomap_readahead(rac, &zonefs_read_iomap_ops);
}
/*
* Map blocks for page writeback. This is used only on conventional zone files,
* which implies that the page range can only be within the fixed inode size.
*/ */
static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc, if (isize >= z->z_capacity) {
struct inode *inode, loff_t offset) struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
return -EIO;
if (WARN_ON_ONCE(offset >= i_size_read(inode)))
return -EIO;
/* If the mapping is already OK, nothing needs to be done */
if (offset >= wpc->iomap.offset &&
offset < wpc->iomap.offset + wpc->iomap.length)
return 0;
return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
IOMAP_WRITE, &wpc->iomap, NULL);
}
static const struct iomap_writeback_ops zonefs_writeback_ops = {
.map_blocks = zonefs_write_map_blocks,
};
static int zonefs_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct iomap_writepage_ctx wpc = { };
return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
}
static int zonefs_swap_activate(struct swap_info_struct *sis,
struct file *swap_file, sector_t *span)
{
struct inode *inode = file_inode(swap_file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (zi->i_ztype != ZONEFS_ZTYPE_CNV) { if (z->z_flags & ZONEFS_ZONE_ACTIVE)
zonefs_err(inode->i_sb, atomic_dec(&sbi->s_active_seq_files);
"swap file: not a conventional zone file\n"); z->z_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
return -EINVAL;
} }
return iomap_swapfile_activate(sis, swap_file, span,
&zonefs_read_iomap_ops);
} }
static const struct address_space_operations zonefs_file_aops = { void zonefs_update_stats(struct inode *inode, loff_t new_isize)
.read_folio = zonefs_read_folio,
.readahead = zonefs_readahead,
.writepages = zonefs_writepages,
.dirty_folio = filemap_dirty_folio,
.release_folio = iomap_release_folio,
.invalidate_folio = iomap_invalidate_folio,
.migrate_folio = filemap_migrate_folio,
.is_partially_uptodate = iomap_is_partially_uptodate,
.error_remove_page = generic_error_remove_page,
.direct_IO = noop_direct_IO,
.swap_activate = zonefs_swap_activate,
};
static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
{ {
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
...@@ -310,63 +183,69 @@ static void zonefs_update_stats(struct inode *inode, loff_t new_isize) ...@@ -310,63 +183,69 @@ static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
} }
/* /*
* Check a zone condition and adjust its file inode access permissions for * Check a zone condition. Return the amount of written (and still readable)
* offline and readonly zones. Return the inode size corresponding to the * data in the zone.
* amount of readable data in the zone.
*/ */
static loff_t zonefs_check_zone_condition(struct inode *inode, static loff_t zonefs_check_zone_condition(struct super_block *sb,
struct blk_zone *zone, bool warn, struct zonefs_zone *z,
bool mount) struct blk_zone *zone)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode);
switch (zone->cond) { switch (zone->cond) {
case BLK_ZONE_COND_OFFLINE: case BLK_ZONE_COND_OFFLINE:
/* zonefs_warn(sb, "Zone %llu: offline zone\n",
* Dead zone: make the inode immutable, disable all accesses z->z_sector);
* and set the file size to 0 (zone wp set to zone start). z->z_flags |= ZONEFS_ZONE_OFFLINE;
*/
if (warn)
zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
inode->i_ino);
inode->i_flags |= S_IMMUTABLE;
inode->i_mode &= ~0777;
zone->wp = zone->start;
zi->i_flags |= ZONEFS_ZONE_OFFLINE;
return 0; return 0;
case BLK_ZONE_COND_READONLY: case BLK_ZONE_COND_READONLY:
/* /*
* The write pointer of read-only zones is invalid. If such a * The write pointer of read-only zones is invalid, so we cannot
* zone is found during mount, the file size cannot be retrieved * determine the zone wpoffset (inode size). We thus keep the
* so we treat the zone as offline (mount == true case). * zone wpoffset as is, which leads to an empty file
* Otherwise, keep the file size as it was when last updated * (wpoffset == 0) on mount. For a runtime error, this keeps
* so that the user can recover data. In both cases, writes are * the inode size as it was when last updated so that the user
* always disabled for the zone. * can recover data.
*/ */
if (warn) zonefs_warn(sb, "Zone %llu: read-only zone\n",
zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n", z->z_sector);
inode->i_ino); z->z_flags |= ZONEFS_ZONE_READONLY;
inode->i_flags |= S_IMMUTABLE; if (zonefs_zone_is_cnv(z))
if (mount) { return z->z_capacity;
zone->cond = BLK_ZONE_COND_OFFLINE; return z->z_wpoffset;
inode->i_mode &= ~0777;
zone->wp = zone->start;
zi->i_flags |= ZONEFS_ZONE_OFFLINE;
return 0;
}
zi->i_flags |= ZONEFS_ZONE_READONLY;
inode->i_mode &= ~0222;
return i_size_read(inode);
case BLK_ZONE_COND_FULL: case BLK_ZONE_COND_FULL:
/* The write pointer of full zones is invalid. */ /* The write pointer of full zones is invalid. */
return zi->i_max_size; return z->z_capacity;
default: default:
if (zi->i_ztype == ZONEFS_ZTYPE_CNV) if (zonefs_zone_is_cnv(z))
return zi->i_max_size; return z->z_capacity;
return (zone->wp - zone->start) << SECTOR_SHIFT; return (zone->wp - zone->start) << SECTOR_SHIFT;
} }
} }
/*
* Check a zone condition and adjust its inode access permissions for
* offline and readonly zones.
*/
static void zonefs_inode_update_mode(struct inode *inode)
{
struct zonefs_zone *z = zonefs_inode_zone(inode);
if (z->z_flags & ZONEFS_ZONE_OFFLINE) {
/* Offline zones cannot be read nor written */
inode->i_flags |= S_IMMUTABLE;
inode->i_mode &= ~0777;
} else if (z->z_flags & ZONEFS_ZONE_READONLY) {
/* Readonly zones cannot be written */
inode->i_flags |= S_IMMUTABLE;
if (z->z_flags & ZONEFS_ZONE_INIT_MODE)
inode->i_mode &= ~0777;
else
inode->i_mode &= ~0222;
}
z->z_flags &= ~ZONEFS_ZONE_INIT_MODE;
z->z_mode = inode->i_mode;
}
struct zonefs_ioerr_data { struct zonefs_ioerr_data {
struct inode *inode; struct inode *inode;
bool write; bool write;
...@@ -377,7 +256,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, ...@@ -377,7 +256,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
{ {
struct zonefs_ioerr_data *err = data; struct zonefs_ioerr_data *err = data;
struct inode *inode = err->inode; struct inode *inode = err->inode;
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
loff_t isize, data_size; loff_t isize, data_size;
...@@ -388,10 +267,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, ...@@ -388,10 +267,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* as there is no inconsistency between the inode size and the amount of * as there is no inconsistency between the inode size and the amount of
* data writen in the zone (data_size). * data writen in the zone (data_size).
*/ */
data_size = zonefs_check_zone_condition(inode, zone, true, false); data_size = zonefs_check_zone_condition(sb, z, zone);
isize = i_size_read(inode); isize = i_size_read(inode);
if (zone->cond != BLK_ZONE_COND_OFFLINE && if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
zone->cond != BLK_ZONE_COND_READONLY &&
!err->write && isize == data_size) !err->write && isize == data_size)
return 0; return 0;
...@@ -414,8 +292,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, ...@@ -414,8 +292,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* In all cases, warn about inode size inconsistency and handle the * In all cases, warn about inode size inconsistency and handle the
* IO error according to the zone condition and to the mount options. * IO error according to the zone condition and to the mount options.
*/ */
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size) if (zonefs_zone_is_seq(z) && isize != data_size)
zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n", zonefs_warn(sb,
"inode %lu: invalid size %lld (should be %lld)\n",
inode->i_ino, isize, data_size); inode->i_ino, isize, data_size);
/* /*
...@@ -424,24 +303,22 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, ...@@ -424,24 +303,22 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* zone condition to read-only and offline respectively, as if the * zone condition to read-only and offline respectively, as if the
* condition was signaled by the hardware. * condition was signaled by the hardware.
*/ */
if (zone->cond == BLK_ZONE_COND_OFFLINE || if ((z->z_flags & ZONEFS_ZONE_OFFLINE) ||
sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) { (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)) {
zonefs_warn(sb, "inode %lu: read/write access disabled\n", zonefs_warn(sb, "inode %lu: read/write access disabled\n",
inode->i_ino); inode->i_ino);
if (zone->cond != BLK_ZONE_COND_OFFLINE) { if (!(z->z_flags & ZONEFS_ZONE_OFFLINE))
zone->cond = BLK_ZONE_COND_OFFLINE; z->z_flags |= ZONEFS_ZONE_OFFLINE;
data_size = zonefs_check_zone_condition(inode, zone, zonefs_inode_update_mode(inode);
false, false); data_size = 0;
} } else if ((z->z_flags & ZONEFS_ZONE_READONLY) ||
} else if (zone->cond == BLK_ZONE_COND_READONLY || (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)) {
sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
zonefs_warn(sb, "inode %lu: write access disabled\n", zonefs_warn(sb, "inode %lu: write access disabled\n",
inode->i_ino); inode->i_ino);
if (zone->cond != BLK_ZONE_COND_READONLY) { if (!(z->z_flags & ZONEFS_ZONE_READONLY))
zone->cond = BLK_ZONE_COND_READONLY; z->z_flags |= ZONEFS_ZONE_READONLY;
data_size = zonefs_check_zone_condition(inode, zone, zonefs_inode_update_mode(inode);
false, false); data_size = isize;
}
} else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO && } else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
data_size > isize) { data_size > isize) {
/* Do not expose garbage data */ /* Do not expose garbage data */
...@@ -455,9 +332,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, ...@@ -455,9 +332,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* close of the zone when the inode file is closed. * close of the zone when the inode file is closed.
*/ */
if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) && if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
(zone->cond == BLK_ZONE_COND_OFFLINE || (z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)))
zone->cond == BLK_ZONE_COND_READONLY)) z->z_flags &= ~ZONEFS_ZONE_OPEN;
zi->i_flags &= ~ZONEFS_ZONE_OPEN;
/* /*
* If error=remount-ro was specified, any error result in remounting * If error=remount-ro was specified, any error result in remounting
...@@ -474,8 +350,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, ...@@ -474,8 +350,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
*/ */
zonefs_update_stats(inode, data_size); zonefs_update_stats(inode, data_size);
zonefs_i_size_write(inode, data_size); zonefs_i_size_write(inode, data_size);
zi->i_wpoffset = data_size; z->z_wpoffset = data_size;
zonefs_account_active(inode); zonefs_inode_account_active(inode);
return 0; return 0;
} }
...@@ -487,9 +363,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, ...@@ -487,9 +363,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* eventually correct the file size and zonefs inode write pointer offset * eventually correct the file size and zonefs inode write pointer offset
* (which can be out of sync with the drive due to partial write failures). * (which can be out of sync with the drive due to partial write failures).
*/ */
static void __zonefs_io_error(struct inode *inode, bool write) void __zonefs_io_error(struct inode *inode, bool write)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
unsigned int noio_flag; unsigned int noio_flag;
...@@ -505,8 +381,8 @@ static void __zonefs_io_error(struct inode *inode, bool write) ...@@ -505,8 +381,8 @@ static void __zonefs_io_error(struct inode *inode, bool write)
* files with aggregated conventional zones, for which the inode zone * files with aggregated conventional zones, for which the inode zone
* size is always larger than the device zone size. * size is always larger than the device zone size.
*/ */
if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev)) if (z->z_size > bdev_zone_sectors(sb->s_bdev))
nr_zones = zi->i_zone_size >> nr_zones = z->z_size >>
(sbi->s_zone_sectors_shift + SECTOR_SHIFT); (sbi->s_zone_sectors_shift + SECTOR_SHIFT);
/* /*
...@@ -518,7 +394,7 @@ static void __zonefs_io_error(struct inode *inode, bool write) ...@@ -518,7 +394,7 @@ static void __zonefs_io_error(struct inode *inode, bool write)
* the GFP_NOIO context avoids both problems. * the GFP_NOIO context avoids both problems.
*/ */
noio_flag = memalloc_noio_save(); noio_flag = memalloc_noio_save();
ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones, ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
zonefs_io_error_cb, &err); zonefs_io_error_cb, &err);
if (ret != nr_zones) if (ret != nr_zones)
zonefs_err(sb, "Get inode %lu zone information failed %d\n", zonefs_err(sb, "Get inode %lu zone information failed %d\n",
...@@ -526,82 +402,142 @@ static void __zonefs_io_error(struct inode *inode, bool write) ...@@ -526,82 +402,142 @@ static void __zonefs_io_error(struct inode *inode, bool write)
memalloc_noio_restore(noio_flag); memalloc_noio_restore(noio_flag);
} }
static void zonefs_io_error(struct inode *inode, bool write) static struct kmem_cache *zonefs_inode_cachep;
static struct inode *zonefs_alloc_inode(struct super_block *sb)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_inode_info *zi;
zi = alloc_inode_sb(sb, zonefs_inode_cachep, GFP_KERNEL);
if (!zi)
return NULL;
inode_init_once(&zi->i_vnode);
mutex_init(&zi->i_truncate_mutex);
zi->i_wr_refcnt = 0;
mutex_lock(&zi->i_truncate_mutex); return &zi->i_vnode;
__zonefs_io_error(inode, write);
mutex_unlock(&zi->i_truncate_mutex);
} }
static int zonefs_file_truncate(struct inode *inode, loff_t isize) static void zonefs_free_inode(struct inode *inode)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
loff_t old_isize; }
enum req_op op;
int ret = 0;
/* /*
* Only sequential zone files can be truncated and truncation is allowed * File system stat.
* only down to a 0 size, which is equivalent to a zone reset, and to
* the maximum file size, which is equivalent to a zone finish.
*/ */
if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
return -EPERM; {
struct super_block *sb = dentry->d_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype t;
if (!isize) buf->f_type = ZONEFS_MAGIC;
op = REQ_OP_ZONE_RESET; buf->f_bsize = sb->s_blocksize;
else if (isize == zi->i_max_size) buf->f_namelen = ZONEFS_NAME_MAX;
op = REQ_OP_ZONE_FINISH;
spin_lock(&sbi->s_lock);
buf->f_blocks = sbi->s_blocks;
if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
buf->f_bfree = 0;
else else
return -EPERM; buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
buf->f_bavail = buf->f_bfree;
inode_dio_wait(inode); for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
if (sbi->s_zgroup[t].g_nr_zones)
buf->f_files += sbi->s_zgroup[t].g_nr_zones + 1;
}
buf->f_ffree = 0;
/* Serialize against page faults */ spin_unlock(&sbi->s_lock);
filemap_invalidate_lock(inode->i_mapping);
/* Serialize against zonefs_iomap_begin() */ buf->f_fsid = uuid_to_fsid(sbi->s_uuid.b);
mutex_lock(&zi->i_truncate_mutex);
old_isize = i_size_read(inode); return 0;
if (isize == old_isize) }
goto unlock;
ret = zonefs_zone_mgmt(inode, op); enum {
if (ret) Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
goto unlock; Opt_explicit_open, Opt_err,
};
/* static const match_table_t tokens = {
* If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set, { Opt_errors_ro, "errors=remount-ro"},
* take care of open zones. { Opt_errors_zro, "errors=zone-ro"},
*/ { Opt_errors_zol, "errors=zone-offline"},
if (zi->i_flags & ZONEFS_ZONE_OPEN) { { Opt_errors_repair, "errors=repair"},
/* { Opt_explicit_open, "explicit-open" },
* Truncating a zone to EMPTY or FULL is the equivalent of { Opt_err, NULL}
* closing the zone. For a truncation to 0, we need to };
* re-open the zone to ensure new writes can be processed.
* For a truncation to the maximum file size, the zone is static int zonefs_parse_options(struct super_block *sb, char *options)
* closed and writes cannot be accepted anymore, so clear {
* the open flag. struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
*/ substring_t args[MAX_OPT_ARGS];
if (!isize) char *p;
ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
else if (!options)
zi->i_flags &= ~ZONEFS_ZONE_OPEN; return 0;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_errors_ro:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
break;
case Opt_errors_zro:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
break;
case Opt_errors_zol:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
break;
case Opt_errors_repair:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
break;
case Opt_explicit_open:
sbi->s_mount_opts |= ZONEFS_MNTOPT_EXPLICIT_OPEN;
break;
default:
return -EINVAL;
}
} }
zonefs_update_stats(inode, isize); return 0;
truncate_setsize(inode, isize); }
zi->i_wpoffset = isize;
zonefs_account_active(inode);
unlock: static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
mutex_unlock(&zi->i_truncate_mutex); {
filemap_invalidate_unlock(inode->i_mapping); struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
return ret; if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO)
seq_puts(seq, ",errors=remount-ro");
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)
seq_puts(seq, ",errors=zone-ro");
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)
seq_puts(seq, ",errors=zone-offline");
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR)
seq_puts(seq, ",errors=repair");
return 0;
}
static int zonefs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
return zonefs_parse_options(sb, data);
} }
static int zonefs_inode_setattr(struct mnt_idmap *idmap, static int zonefs_inode_setattr(struct mnt_idmap *idmap,
...@@ -643,6 +579,14 @@ static int zonefs_inode_setattr(struct mnt_idmap *idmap, ...@@ -643,6 +579,14 @@ static int zonefs_inode_setattr(struct mnt_idmap *idmap,
setattr_copy(&nop_mnt_idmap, inode, iattr); setattr_copy(&nop_mnt_idmap, inode, iattr);
if (S_ISREG(inode->i_mode)) {
struct zonefs_zone *z = zonefs_inode_zone(inode);
z->z_mode = inode->i_mode;
z->z_uid = inode->i_uid;
z->z_gid = inode->i_gid;
}
return 0; return 0;
} }
...@@ -650,943 +594,386 @@ static const struct inode_operations zonefs_file_inode_operations = { ...@@ -650,943 +594,386 @@ static const struct inode_operations zonefs_file_inode_operations = {
.setattr = zonefs_inode_setattr, .setattr = zonefs_inode_setattr,
}; };
static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end, static long zonefs_fname_to_fno(const struct qstr *fname)
int datasync)
{ {
struct inode *inode = file_inode(file); const char *name = fname->name;
int ret = 0; unsigned int len = fname->len;
long fno = 0, shift = 1;
const char *rname;
char c = *name;
unsigned int i;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
/*
* Since only direct writes are allowed in sequential files, page cache
* flush is needed only for conventional zone files.
*/
if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
ret = file_write_and_wait_range(file, start, end);
if (!ret)
ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (ret)
zonefs_io_error(inode, true);
return ret;
}
static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
vm_fault_t ret;
if (unlikely(IS_IMMUTABLE(inode)))
return VM_FAULT_SIGBUS;
/*
* Sanity check: only conventional zone files can have shared
* writeable mappings.
*/
if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
return VM_FAULT_NOPAGE;
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
/* Serialize against truncates */
filemap_invalidate_lock_shared(inode->i_mapping);
ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
filemap_invalidate_unlock_shared(inode->i_mapping);
sb_end_pagefault(inode->i_sb);
return ret;
}
static const struct vm_operations_struct zonefs_file_vm_ops = {
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = zonefs_filemap_page_mkwrite,
};
static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
/* /*
* Conventional zones accept random writes, so their files can support * File names are always a base-10 number string without any
* shared writable mappings. For sequential zone files, only read * leading 0s.
* mappings are possible since there are no guarantees for write
* ordering between msync() and page cache writeback.
*/ */
if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ && if (!isdigit(c))
(vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) return -ENOENT;
return -EINVAL;
file_accessed(file); if (len > 1 && c == '0')
vma->vm_ops = &zonefs_file_vm_ops; return -ENOENT;
return 0; if (len == 1)
} return c - '0';
static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence) for (i = 0, rname = name + len - 1; i < len; i++, rname--) {
{ c = *rname;
loff_t isize = i_size_read(file_inode(file)); if (!isdigit(c))
return -ENOENT;
/* fno += (c - '0') * shift;
* Seeks are limited to below the zone size for conventional zones shift *= 10;
* and below the zone write pointer for sequential zones. In both
* cases, this limit is the inode size.
*/
return generic_file_llseek_size(file, offset, whence, isize, isize);
}
static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
int error, unsigned int flags)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (error) {
zonefs_io_error(inode, true);
return error;
}
if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
/*
* Note that we may be seeing completions out of order,
* but that is not a problem since a write completed
* successfully necessarily means that all preceding writes
* were also successful. So we can safely increase the inode
* size to the write end location.
*/
mutex_lock(&zi->i_truncate_mutex);
if (i_size_read(inode) < iocb->ki_pos + size) {
zonefs_update_stats(inode, iocb->ki_pos + size);
zonefs_i_size_write(inode, iocb->ki_pos + size);
}
mutex_unlock(&zi->i_truncate_mutex);
} }
return 0; return fno;
} }
static const struct iomap_dio_ops zonefs_write_dio_ops = { static struct inode *zonefs_get_file_inode(struct inode *dir,
.end_io = zonefs_file_write_dio_end_io, struct dentry *dentry)
};
static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
{ {
struct inode *inode = file_inode(iocb->ki_filp); struct zonefs_zone_group *zgroup = dir->i_private;
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct super_block *sb = dir->i_sb;
struct block_device *bdev = inode->i_sb->s_bdev; struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
unsigned int max = bdev_max_zone_append_sectors(bdev); struct zonefs_zone *z;
struct bio *bio; struct inode *inode;
ssize_t size; ino_t ino;
int nr_pages; long fno;
ssize_t ret;
max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
iov_iter_truncate(from, max);
nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
if (!nr_pages)
return 0;
bio = bio_alloc(bdev, nr_pages,
REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
bio->bi_iter.bi_sector = zi->i_zsector;
bio->bi_ioprio = iocb->ki_ioprio;
if (iocb_is_dsync(iocb))
bio->bi_opf |= REQ_FUA;
ret = bio_iov_iter_get_pages(bio, from);
if (unlikely(ret))
goto out_release;
size = bio->bi_iter.bi_size;
task_io_account_write(size);
if (iocb->ki_flags & IOCB_HIPRI)
bio_set_polled(bio, iocb);
ret = submit_bio_wait(bio);
/*
* If the file zone was written underneath the file system, the zone
* write pointer may not be where we expect it to be, but the zone
* append write can still succeed. So check manually that we wrote where
* we intended to, that is, at zi->i_wpoffset.
*/
if (!ret) {
sector_t wpsector =
zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT);
if (bio->bi_iter.bi_sector != wpsector) {
zonefs_warn(inode->i_sb,
"Corrupted write pointer %llu for zone at %llu\n",
wpsector, zi->i_zsector);
ret = -EIO;
}
}
zonefs_file_write_dio_end_io(iocb, size, ret, 0); /* Get the file number from the file name */
trace_zonefs_file_dio_append(inode, size, ret); fno = zonefs_fname_to_fno(&dentry->d_name);
if (fno < 0)
return ERR_PTR(fno);
out_release: if (!zgroup->g_nr_zones || fno >= zgroup->g_nr_zones)
bio_release_pages(bio, false); return ERR_PTR(-ENOENT);
bio_put(bio);
if (ret >= 0) { z = &zgroup->g_zones[fno];
iocb->ki_pos += size; ino = z->z_sector >> sbi->s_zone_sectors_shift;
return size; inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) {
WARN_ON_ONCE(inode->i_private != z);
return inode;
} }
return ret; inode->i_ino = ino;
} inode->i_mode = z->z_mode;
inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
inode->i_uid = z->z_uid;
inode->i_gid = z->z_gid;
inode->i_size = z->z_wpoffset;
inode->i_blocks = z->z_capacity >> SECTOR_SHIFT;
inode->i_private = z;
/* inode->i_op = &zonefs_file_inode_operations;
* Do not exceed the LFS limits nor the file zone size. If pos is under the inode->i_fop = &zonefs_file_operations;
* limit it becomes a short access. If it exceeds the limit, return -EFBIG. inode->i_mapping->a_ops = &zonefs_file_aops;
*/
static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
loff_t count)
{
struct inode *inode = file_inode(file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
loff_t limit = rlimit(RLIMIT_FSIZE);
loff_t max_size = zi->i_max_size;
if (limit != RLIM_INFINITY) {
if (pos >= limit) {
send_sig(SIGXFSZ, current, 0);
return -EFBIG;
}
count = min(count, limit - pos);
}
if (!(file->f_flags & O_LARGEFILE)) /* Update the inode access rights depending on the zone condition */
max_size = min_t(loff_t, MAX_NON_LFS, max_size); zonefs_inode_update_mode(inode);
if (unlikely(pos >= max_size)) unlock_new_inode(inode);
return -EFBIG;
return min(count, max_size - pos); return inode;
} }
static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from) static struct inode *zonefs_get_zgroup_inode(struct super_block *sb,
enum zonefs_ztype ztype)
{ {
struct file *file = iocb->ki_filp; struct inode *root = d_inode(sb->s_root);
struct inode *inode = file_inode(file); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct inode *inode;
loff_t count; ino_t ino = bdev_nr_zones(sb->s_bdev) + ztype + 1;
if (IS_SWAPFILE(inode))
return -ETXTBSY;
if (!iov_iter_count(from))
return 0;
if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) inode = iget_locked(sb, ino);
return -EINVAL; if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
inode->i_ino = ino;
inode_init_owner(&nop_mnt_idmap, inode, root, S_IFDIR | 0555);
inode->i_size = sbi->s_zgroup[ztype].g_nr_zones;
inode->i_ctime = inode->i_mtime = inode->i_atime = root->i_ctime;
inode->i_private = &sbi->s_zgroup[ztype];
set_nlink(inode, 2);
if (iocb->ki_flags & IOCB_APPEND) { inode->i_op = &zonefs_dir_inode_operations;
if (zi->i_ztype != ZONEFS_ZTYPE_SEQ) inode->i_fop = &zonefs_dir_operations;
return -EINVAL;
mutex_lock(&zi->i_truncate_mutex);
iocb->ki_pos = zi->i_wpoffset;
mutex_unlock(&zi->i_truncate_mutex);
}
count = zonefs_write_check_limits(file, iocb->ki_pos, unlock_new_inode(inode);
iov_iter_count(from));
if (count < 0)
return count;
iov_iter_truncate(from, count); return inode;
return iov_iter_count(from);
} }
/*
* Handle direct writes. For sequential zone files, this is the only possible
* write path. For these files, check that the user is issuing writes
* sequentially from the end of the file. This code assumes that the block layer
* delivers write requests to the device in sequential order. This is always the
* case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
* elevator feature is being used (e.g. mq-deadline). The block layer always
* automatically select such an elevator for zoned block devices during the
* device initialization.
*/
static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct super_block *sb = inode->i_sb;
bool sync = is_sync_kiocb(iocb);
bool append = false;
ssize_t ret, count;
/*
* For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
* as this can cause write reordering (e.g. the first aio gets EAGAIN
* on the inode lock but the second goes through but is now unaligned).
*/
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
(iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
return -EAGAIN;
} else {
inode_lock(inode);
}
count = zonefs_write_checks(iocb, from);
if (count <= 0) {
ret = count;
goto inode_unlock;
}
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL;
goto inode_unlock;
}
/* Enforce sequential writes (append only) in sequential zones */
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
mutex_lock(&zi->i_truncate_mutex);
if (iocb->ki_pos != zi->i_wpoffset) {
mutex_unlock(&zi->i_truncate_mutex);
ret = -EINVAL;
goto inode_unlock;
}
mutex_unlock(&zi->i_truncate_mutex);
append = sync;
}
if (append)
ret = zonefs_file_dio_append(iocb, from);
else
ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
&zonefs_write_dio_ops, 0, NULL, 0);
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
(ret > 0 || ret == -EIOCBQUEUED)) {
if (ret > 0)
count = ret;
/*
* Update the zone write pointer offset assuming the write
* operation succeeded. If it did not, the error recovery path
* will correct it. Also do active seq file accounting.
*/
mutex_lock(&zi->i_truncate_mutex);
zi->i_wpoffset += count;
zonefs_account_active(inode);
mutex_unlock(&zi->i_truncate_mutex);
}
inode_unlock: static struct inode *zonefs_get_dir_inode(struct inode *dir,
inode_unlock(inode); struct dentry *dentry)
return ret;
}
static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
struct iov_iter *from)
{ {
struct inode *inode = file_inode(iocb->ki_filp); struct super_block *sb = dir->i_sb;
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
ssize_t ret; const char *name = dentry->d_name.name;
enum zonefs_ztype ztype;
/* /*
* Direct IO writes are mandatory for sequential zone files so that the * We only need to check for the "seq" directory and
* write IO issuing order is preserved. * the "cnv" directory if we have conventional zones.
*/ */
if (zi->i_ztype != ZONEFS_ZTYPE_CNV) if (dentry->d_name.len != 3)
return -EIO; return ERR_PTR(-ENOENT);
if (iocb->ki_flags & IOCB_NOWAIT) { for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (!inode_trylock(inode)) if (sbi->s_zgroup[ztype].g_nr_zones &&
return -EAGAIN; memcmp(name, zonefs_zgroup_name(ztype), 3) == 0)
} else { break;
inode_lock(inode);
} }
if (ztype == ZONEFS_ZTYPE_MAX)
return ERR_PTR(-ENOENT);
ret = zonefs_write_checks(iocb, from); return zonefs_get_zgroup_inode(sb, ztype);
if (ret <= 0)
goto inode_unlock;
ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
if (ret > 0)
iocb->ki_pos += ret;
else if (ret == -EIO)
zonefs_io_error(inode, true);
inode_unlock:
inode_unlock(inode);
if (ret > 0)
ret = generic_write_sync(iocb, ret);
return ret;
} }
static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) static struct dentry *zonefs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{ {
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode;
if (unlikely(IS_IMMUTABLE(inode)))
return -EPERM;
if (sb_rdonly(inode->i_sb))
return -EROFS;
/* Write operations beyond the zone size are not allowed */
if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
return -EFBIG;
if (iocb->ki_flags & IOCB_DIRECT) {
ssize_t ret = zonefs_file_dio_write(iocb, from);
if (ret != -ENOTBLK)
return ret;
}
return zonefs_file_buffered_write(iocb, from); if (dentry->d_name.len > ZONEFS_NAME_MAX)
} return ERR_PTR(-ENAMETOOLONG);
static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size, if (dir == d_inode(dir->i_sb->s_root))
int error, unsigned int flags) inode = zonefs_get_dir_inode(dir, dentry);
{ else
if (error) { inode = zonefs_get_file_inode(dir, dentry);
zonefs_io_error(file_inode(iocb->ki_filp), false); if (IS_ERR(inode))
return error; return ERR_CAST(inode);
}
return 0; return d_splice_alias(inode, dentry);
} }
static const struct iomap_dio_ops zonefs_read_dio_ops = { static int zonefs_readdir_root(struct file *file, struct dir_context *ctx)
.end_io = zonefs_file_read_dio_end_io,
};
static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{ {
struct inode *inode = file_inode(iocb->ki_filp); struct inode *inode = file_inode(file);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
loff_t isize; struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
ssize_t ret; enum zonefs_ztype ztype = ZONEFS_ZTYPE_CNV;
ino_t base_ino = bdev_nr_zones(sb->s_bdev) + 1;
/* Offline zones cannot be read */
if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
return -EPERM;
if (iocb->ki_pos >= zi->i_max_size) if (ctx->pos >= inode->i_size)
return 0; return 0;
if (iocb->ki_flags & IOCB_NOWAIT) { if (!dir_emit_dots(file, ctx))
if (!inode_trylock_shared(inode)) return 0;
return -EAGAIN;
} else {
inode_lock_shared(inode);
}
/* Limit read operations to written data */
mutex_lock(&zi->i_truncate_mutex);
isize = i_size_read(inode);
if (iocb->ki_pos >= isize) {
mutex_unlock(&zi->i_truncate_mutex);
ret = 0;
goto inode_unlock;
}
iov_iter_truncate(to, isize - iocb->ki_pos);
mutex_unlock(&zi->i_truncate_mutex);
if (iocb->ki_flags & IOCB_DIRECT) {
size_t count = iov_iter_count(to);
if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
ret = -EINVAL;
goto inode_unlock;
}
file_accessed(iocb->ki_filp);
ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
&zonefs_read_dio_ops, 0, NULL, 0);
} else {
ret = generic_file_read_iter(iocb, to);
if (ret == -EIO)
zonefs_io_error(inode, false);
}
inode_unlock:
inode_unlock_shared(inode);
return ret;
}
/*
* Write open accounting is done only for sequential files.
*/
static inline bool zonefs_seq_file_need_wro(struct inode *inode,
struct file *file)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
return false;
if (!(file->f_mode & FMODE_WRITE))
return false;
return true;
}
static int zonefs_seq_file_write_open(struct inode *inode)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
int ret = 0;
mutex_lock(&zi->i_truncate_mutex);
if (!zi->i_wr_refcnt) {
struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) { if (ctx->pos == 2) {
if (!sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones)
ztype = ZONEFS_ZTYPE_SEQ;
if (sbi->s_max_wro_seq_files if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
&& wro > sbi->s_max_wro_seq_files) { base_ino + ztype, DT_DIR))
atomic_dec(&sbi->s_wro_seq_files); return 0;
ret = -EBUSY; ctx->pos++;
goto unlock;
} }
if (i_size_read(inode) < zi->i_max_size) { if (ctx->pos == 3 && ztype != ZONEFS_ZTYPE_SEQ) {
ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN); ztype = ZONEFS_ZTYPE_SEQ;
if (ret) { if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
atomic_dec(&sbi->s_wro_seq_files); base_ino + ztype, DT_DIR))
goto unlock; return 0;
} ctx->pos++;
zi->i_flags |= ZONEFS_ZONE_OPEN;
zonefs_account_active(inode);
}
}
} }
zi->i_wr_refcnt++;
unlock:
mutex_unlock(&zi->i_truncate_mutex);
return ret;
}
static int zonefs_file_open(struct inode *inode, struct file *file)
{
int ret;
ret = generic_file_open(inode, file);
if (ret)
return ret;
if (zonefs_seq_file_need_wro(inode, file))
return zonefs_seq_file_write_open(inode);
return 0; return 0;
} }
static void zonefs_seq_file_write_close(struct inode *inode) static int zonefs_readdir_zgroup(struct file *file,
struct dir_context *ctx)
{ {
struct zonefs_inode_info *zi = ZONEFS_I(inode); struct inode *inode = file_inode(file);
struct zonefs_zone_group *zgroup = inode->i_private;
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
int ret = 0; struct zonefs_zone *z;
int fname_len;
char *fname;
ino_t ino;
int f;
mutex_lock(&zi->i_truncate_mutex);
zi->i_wr_refcnt--;
if (zi->i_wr_refcnt)
goto unlock;
/*
* The file zone may not be open anymore (e.g. the file was truncated to
* its maximum size or it was fully written). For this case, we only
* need to decrement the write open count.
*/
if (zi->i_flags & ZONEFS_ZONE_OPEN) {
ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
if (ret) {
__zonefs_io_error(inode, false);
/* /*
* Leaving zones explicitly open may lead to a state * The size of zone group directories is equal to the number
* where most zones cannot be written (zone resources * of zone files in the group and does note include the "." and
* exhausted). So take preventive action by remounting * ".." entries. Hence the "+ 2" here.
* read-only.
*/ */
if (zi->i_flags & ZONEFS_ZONE_OPEN && if (ctx->pos >= inode->i_size + 2)
!(sb->s_flags & SB_RDONLY)) {
zonefs_warn(sb,
"closing zone at %llu failed %d\n",
zi->i_zsector, ret);
zonefs_warn(sb,
"remounting filesystem read-only\n");
sb->s_flags |= SB_RDONLY;
}
goto unlock;
}
zi->i_flags &= ~ZONEFS_ZONE_OPEN;
zonefs_account_active(inode);
}
atomic_dec(&sbi->s_wro_seq_files);
unlock:
mutex_unlock(&zi->i_truncate_mutex);
}
static int zonefs_file_release(struct inode *inode, struct file *file)
{
/*
* If we explicitly open a zone we must close it again as well, but the
* zone management operation can fail (either due to an IO error or as
* the zone has gone offline or read-only). Make sure we don't fail the
* close(2) for user-space.
*/
if (zonefs_seq_file_need_wro(inode, file))
zonefs_seq_file_write_close(inode);
return 0; return 0;
}
static const struct file_operations zonefs_file_operations = { if (!dir_emit_dots(file, ctx))
.open = zonefs_file_open,
.release = zonefs_file_release,
.fsync = zonefs_file_fsync,
.mmap = zonefs_file_mmap,
.llseek = zonefs_file_llseek,
.read_iter = zonefs_file_read_iter,
.write_iter = zonefs_file_write_iter,
.splice_read = generic_file_splice_read,
.splice_write = iter_file_splice_write,
.iopoll = iocb_bio_iopoll,
};
static struct kmem_cache *zonefs_inode_cachep;
static struct inode *zonefs_alloc_inode(struct super_block *sb)
{
struct zonefs_inode_info *zi;
zi = alloc_inode_sb(sb, zonefs_inode_cachep, GFP_KERNEL);
if (!zi)
return NULL;
inode_init_once(&zi->i_vnode);
mutex_init(&zi->i_truncate_mutex);
zi->i_wr_refcnt = 0;
zi->i_flags = 0;
return &zi->i_vnode;
}
static void zonefs_free_inode(struct inode *inode)
{
kmem_cache_free(zonefs_inode_cachep, ZONEFS_I(inode));
}
/*
* File system stat.
*/
static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype t;
buf->f_type = ZONEFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_namelen = ZONEFS_NAME_MAX;
spin_lock(&sbi->s_lock);
buf->f_blocks = sbi->s_blocks;
if (WARN_ON(sbi->s_used_blocks > sbi->s_blocks))
buf->f_bfree = 0;
else
buf->f_bfree = buf->f_blocks - sbi->s_used_blocks;
buf->f_bavail = buf->f_bfree;
for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
if (sbi->s_nr_files[t])
buf->f_files += sbi->s_nr_files[t] + 1;
}
buf->f_ffree = 0;
spin_unlock(&sbi->s_lock);
buf->f_fsid = uuid_to_fsid(sbi->s_uuid.b);
return 0;
}
enum {
Opt_errors_ro, Opt_errors_zro, Opt_errors_zol, Opt_errors_repair,
Opt_explicit_open, Opt_err,
};
static const match_table_t tokens = {
{ Opt_errors_ro, "errors=remount-ro"},
{ Opt_errors_zro, "errors=zone-ro"},
{ Opt_errors_zol, "errors=zone-offline"},
{ Opt_errors_repair, "errors=repair"},
{ Opt_explicit_open, "explicit-open" },
{ Opt_err, NULL}
};
static int zonefs_parse_options(struct super_block *sb, char *options)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
substring_t args[MAX_OPT_ARGS];
char *p;
if (!options)
return 0; return 0;
while ((p = strsep(&options, ",")) != NULL) { fname = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
int token; if (!fname)
return -ENOMEM;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_errors_ro:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_RO;
break;
case Opt_errors_zro:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZRO;
break;
case Opt_errors_zol:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_ZOL;
break;
case Opt_errors_repair:
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_ERRORS_MASK;
sbi->s_mount_opts |= ZONEFS_MNTOPT_ERRORS_REPAIR;
break;
case Opt_explicit_open:
sbi->s_mount_opts |= ZONEFS_MNTOPT_EXPLICIT_OPEN;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int zonefs_show_options(struct seq_file *seq, struct dentry *root)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(root->d_sb);
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO) for (f = ctx->pos - 2; f < zgroup->g_nr_zones; f++) {
seq_puts(seq, ",errors=remount-ro"); z = &zgroup->g_zones[f];
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) ino = z->z_sector >> sbi->s_zone_sectors_shift;
seq_puts(seq, ",errors=zone-ro"); fname_len = snprintf(fname, ZONEFS_NAME_MAX - 1, "%u", f);
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) if (!dir_emit(ctx, fname, fname_len, ino, DT_REG))
seq_puts(seq, ",errors=zone-offline"); break;
if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_REPAIR) ctx->pos++;
seq_puts(seq, ",errors=repair"); }
kfree(fname);
return 0; return 0;
} }
static int zonefs_remount(struct super_block *sb, int *flags, char *data) static int zonefs_readdir(struct file *file, struct dir_context *ctx)
{ {
sync_filesystem(sb); struct inode *inode = file_inode(file);
return zonefs_parse_options(sb, data); if (inode == d_inode(inode->i_sb->s_root))
} return zonefs_readdir_root(file, ctx);
static const struct super_operations zonefs_sops = { return zonefs_readdir_zgroup(file, ctx);
.alloc_inode = zonefs_alloc_inode, }
.free_inode = zonefs_free_inode,
.statfs = zonefs_statfs,
.remount_fs = zonefs_remount,
.show_options = zonefs_show_options,
};
static const struct inode_operations zonefs_dir_inode_operations = { const struct inode_operations zonefs_dir_inode_operations = {
.lookup = simple_lookup, .lookup = zonefs_lookup,
.setattr = zonefs_inode_setattr, .setattr = zonefs_inode_setattr,
}; };
static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode, const struct file_operations zonefs_dir_operations = {
enum zonefs_ztype type) .llseek = generic_file_llseek,
{ .read = generic_read_dir,
struct super_block *sb = parent->i_sb; .iterate_shared = zonefs_readdir,
};
inode->i_ino = bdev_nr_zones(sb->s_bdev) + type + 1; struct zonefs_zone_data {
inode_init_owner(&nop_mnt_idmap, inode, parent, S_IFDIR | 0555); struct super_block *sb;
inode->i_op = &zonefs_dir_inode_operations; unsigned int nr_zones[ZONEFS_ZTYPE_MAX];
inode->i_fop = &simple_dir_operations; sector_t cnv_zone_start;
set_nlink(inode, 2); struct blk_zone *zones;
inc_nlink(parent); };
}
static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
enum zonefs_ztype type) void *data)
{ {
struct super_block *sb = inode->i_sb; struct zonefs_zone_data *zd = data;
struct super_block *sb = zd->sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_inode_info *zi = ZONEFS_I(inode);
int ret = 0;
inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
inode->i_mode = S_IFREG | sbi->s_perm;
zi->i_ztype = type; /*
zi->i_zsector = zone->start; * We do not care about the first zone: it contains the super block
zi->i_zone_size = zone->len << SECTOR_SHIFT; * and not exposed as a file.
if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT && */
!(sbi->s_features & ZONEFS_F_AGGRCNV)) { if (!idx)
zonefs_err(sb, return 0;
"zone size %llu doesn't match device's zone sectors %llu\n",
zi->i_zone_size,
bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
return -EINVAL;
}
zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
zone->capacity << SECTOR_SHIFT);
zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
inode->i_uid = sbi->s_uid;
inode->i_gid = sbi->s_gid;
inode->i_size = zi->i_wpoffset;
inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
inode->i_op = &zonefs_file_inode_operations;
inode->i_fop = &zonefs_file_operations;
inode->i_mapping->a_ops = &zonefs_file_aops;
sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
mutex_lock(&zi->i_truncate_mutex);
/* /*
* For sequential zones, make sure that any open zone is closed first * Count the number of zones that will be exposed as files.
* to ensure that the initial number of open zones is 0, in sync with * For sequential zones, we always have as many files as zones.
* the open zone accounting done when the mount option * FOr conventional zones, the number of files depends on if we have
* ZONEFS_MNTOPT_EXPLICIT_OPEN is used. * conventional zones aggregation enabled.
*/ */
if (type == ZONEFS_ZTYPE_SEQ && switch (zone->type) {
(zone->cond == BLK_ZONE_COND_IMP_OPEN || case BLK_ZONE_TYPE_CONVENTIONAL:
zone->cond == BLK_ZONE_COND_EXP_OPEN)) { if (sbi->s_features & ZONEFS_F_AGGRCNV) {
ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE); /* One file per set of contiguous conventional zones */
if (ret) if (!(sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones) ||
goto unlock; zone->start != zd->cnv_zone_start)
sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
zd->cnv_zone_start = zone->start + zone->len;
} else {
/* One file per zone */
sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
}
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
sbi->s_zgroup[ZONEFS_ZTYPE_SEQ].g_nr_zones++;
break;
default:
zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
zone->type);
return -EIO;
} }
zonefs_account_active(inode); memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
unlock:
mutex_unlock(&zi->i_truncate_mutex);
return ret; return 0;
} }
static struct dentry *zonefs_create_inode(struct dentry *parent, static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
const char *name, struct blk_zone *zone,
enum zonefs_ztype type)
{ {
struct inode *dir = d_inode(parent); struct block_device *bdev = zd->sb->s_bdev;
struct dentry *dentry; int ret;
struct inode *inode;
int ret = -ENOMEM;
dentry = d_alloc_name(parent, name);
if (!dentry)
return ERR_PTR(ret);
inode = new_inode(parent->d_sb); zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone),
if (!inode) GFP_KERNEL);
goto dput; if (!zd->zones)
return -ENOMEM;
inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime; /* Get zones information from the device */
if (zone) { ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
ret = zonefs_init_file_inode(inode, zone, type); zonefs_get_zone_info_cb, zd);
if (ret) { if (ret < 0) {
iput(inode); zonefs_err(zd->sb, "Zone report failed %d\n", ret);
goto dput; return ret;
}
} else {
zonefs_init_dir_inode(dir, inode, type);
} }
d_add(dentry, inode); if (ret != bdev_nr_zones(bdev)) {
dir->i_size++; zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
ret, bdev_nr_zones(bdev));
return dentry; return -EIO;
}
dput:
dput(dentry);
return ERR_PTR(ret); return 0;
} }
struct zonefs_zone_data { static inline void zonefs_free_zone_info(struct zonefs_zone_data *zd)
struct super_block *sb; {
unsigned int nr_zones[ZONEFS_ZTYPE_MAX]; kvfree(zd->zones);
struct blk_zone *zones; }
};
/* /*
* Create a zone group and populate it with zone files. * Create a zone group and populate it with zone files.
*/ */
static int zonefs_create_zgroup(struct zonefs_zone_data *zd, static int zonefs_init_zgroup(struct super_block *sb,
enum zonefs_ztype type) struct zonefs_zone_data *zd,
enum zonefs_ztype ztype)
{ {
struct super_block *sb = zd->sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
struct blk_zone *zone, *next, *end; struct blk_zone *zone, *next, *end;
const char *zgroup_name; struct zonefs_zone *z;
char *file_name;
struct dentry *dir, *dent;
unsigned int n = 0; unsigned int n = 0;
int ret; int ret;
/* If the group is empty, there is nothing to do */ /* Allocate the zone group. If it is empty, we have nothing to do. */
if (!zd->nr_zones[type]) if (!zgroup->g_nr_zones)
return 0; return 0;
file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL); zgroup->g_zones = kvcalloc(zgroup->g_nr_zones,
if (!file_name) sizeof(struct zonefs_zone), GFP_KERNEL);
if (!zgroup->g_zones)
return -ENOMEM; return -ENOMEM;
if (type == ZONEFS_ZTYPE_CNV)
zgroup_name = "cnv";
else
zgroup_name = "seq";
dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
if (IS_ERR(dir)) {
ret = PTR_ERR(dir);
goto free;
}
/* /*
* The first zone contains the super block: skip it. * Initialize the zone groups using the device zone information.
* We always skip the first zone as it contains the super block
* and is not use to back a file.
*/ */
end = zd->zones + bdev_nr_zones(sb->s_bdev); end = zd->zones + bdev_nr_zones(sb->s_bdev);
for (zone = &zd->zones[1]; zone < end; zone = next) { for (zone = &zd->zones[1]; zone < end; zone = next) {
next = zone + 1; next = zone + 1;
if (zonefs_zone_type(zone) != type) if (zonefs_zone_type(zone) != ztype)
continue; continue;
if (WARN_ON_ONCE(n >= zgroup->g_nr_zones))
return -EINVAL;
/* /*
* For conventional zones, contiguous zones can be aggregated * For conventional zones, contiguous zones can be aggregated
* together to form larger files. Note that this overwrites the * together to form larger files. Note that this overwrites the
...@@ -1595,10 +982,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, ...@@ -1595,10 +982,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
* found, assume that all zones aggregated have the same * found, assume that all zones aggregated have the same
* condition. * condition.
*/ */
if (type == ZONEFS_ZTYPE_CNV && if (ztype == ZONEFS_ZTYPE_CNV &&
(sbi->s_features & ZONEFS_F_AGGRCNV)) { (sbi->s_features & ZONEFS_F_AGGRCNV)) {
for (; next < end; next++) { for (; next < end; next++) {
if (zonefs_zone_type(next) != type) if (zonefs_zone_type(next) != ztype)
break; break;
zone->len += next->len; zone->len += next->len;
zone->capacity += next->capacity; zone->capacity += next->capacity;
...@@ -1608,99 +995,118 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, ...@@ -1608,99 +995,118 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
else if (next->cond == BLK_ZONE_COND_OFFLINE) else if (next->cond == BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_OFFLINE; zone->cond = BLK_ZONE_COND_OFFLINE;
} }
if (zone->capacity != zone->len) {
zonefs_err(sb, "Invalid conventional zone capacity\n");
ret = -EINVAL;
goto free;
} }
z = &zgroup->g_zones[n];
if (ztype == ZONEFS_ZTYPE_CNV)
z->z_flags |= ZONEFS_ZONE_CNV;
z->z_sector = zone->start;
z->z_size = zone->len << SECTOR_SHIFT;
if (z->z_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
!(sbi->s_features & ZONEFS_F_AGGRCNV)) {
zonefs_err(sb,
"Invalid zone size %llu (device zone sectors %llu)\n",
z->z_size,
bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
return -EINVAL;
} }
z->z_capacity = min_t(loff_t, MAX_LFS_FILESIZE,
zone->capacity << SECTOR_SHIFT);
z->z_wpoffset = zonefs_check_zone_condition(sb, z, zone);
z->z_mode = S_IFREG | sbi->s_perm;
z->z_uid = sbi->s_uid;
z->z_gid = sbi->s_gid;
/* /*
* Use the file number within its group as file name. * Let zonefs_inode_update_mode() know that we will need
* special initialization of the inode mode the first time
* it is accessed.
*/ */
snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n); z->z_flags |= ZONEFS_ZONE_INIT_MODE;
dent = zonefs_create_inode(dir, file_name, zone, type);
if (IS_ERR(dent)) { sb->s_maxbytes = max(z->z_capacity, sb->s_maxbytes);
ret = PTR_ERR(dent); sbi->s_blocks += z->z_capacity >> sb->s_blocksize_bits;
goto free; sbi->s_used_blocks += z->z_wpoffset >> sb->s_blocksize_bits;
/*
* For sequential zones, make sure that any open zone is closed
* first to ensure that the initial number of open zones is 0,
* in sync with the open zone accounting done when the mount
* option ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
*/
if (ztype == ZONEFS_ZTYPE_SEQ &&
(zone->cond == BLK_ZONE_COND_IMP_OPEN ||
zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
ret = zonefs_zone_mgmt(sb, z, REQ_OP_ZONE_CLOSE);
if (ret)
return ret;
} }
zonefs_account_active(sb, z);
n++; n++;
} }
zonefs_info(sb, "Zone group \"%s\" has %u file%s\n", if (WARN_ON_ONCE(n != zgroup->g_nr_zones))
zgroup_name, n, n > 1 ? "s" : ""); return -EINVAL;
sbi->s_nr_files[type] = n;
ret = 0;
free: zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
kfree(file_name); zonefs_zgroup_name(ztype),
zgroup->g_nr_zones,
zgroup->g_nr_zones > 1 ? "s" : "");
return ret; return 0;
} }
static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx, static void zonefs_free_zgroups(struct super_block *sb)
void *data)
{ {
struct zonefs_zone_data *zd = data; struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype ztype;
/*
* Count the number of usable zones: the first zone at index 0 contains
* the super block and is ignored.
*/
switch (zone->type) {
case BLK_ZONE_TYPE_CONVENTIONAL:
zone->wp = zone->start + zone->len;
if (idx)
zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
break;
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
if (idx)
zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
break;
default:
zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
zone->type);
return -EIO;
}
memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone)); if (!sbi)
return;
return 0; for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
kvfree(sbi->s_zgroup[ztype].g_zones);
sbi->s_zgroup[ztype].g_zones = NULL;
}
} }
static int zonefs_get_zone_info(struct zonefs_zone_data *zd) /*
* Create a zone group and populate it with zone files.
*/
static int zonefs_init_zgroups(struct super_block *sb)
{ {
struct block_device *bdev = zd->sb->s_bdev; struct zonefs_zone_data zd;
enum zonefs_ztype ztype;
int ret; int ret;
zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone), /* First get the device zone information */
GFP_KERNEL); memset(&zd, 0, sizeof(struct zonefs_zone_data));
if (!zd->zones) zd.sb = sb;
return -ENOMEM; ret = zonefs_get_zone_info(&zd);
if (ret)
goto cleanup;
/* Get zones information from the device */ /* Allocate and initialize the zone groups */
ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
zonefs_get_zone_info_cb, zd); ret = zonefs_init_zgroup(sb, &zd, ztype);
if (ret < 0) { if (ret) {
zonefs_err(zd->sb, "Zone report failed %d\n", ret); zonefs_info(sb,
return ret; "Zone group \"%s\" initialization failed\n",
zonefs_zgroup_name(ztype));
break;
} }
if (ret != bdev_nr_zones(bdev)) {
zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
ret, bdev_nr_zones(bdev));
return -EIO;
} }
return 0; cleanup:
} zonefs_free_zone_info(&zd);
if (ret)
zonefs_free_zgroups(sb);
static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd) return ret;
{
kvfree(zd->zones);
} }
/* /*
...@@ -1785,6 +1191,50 @@ static int zonefs_read_super(struct super_block *sb) ...@@ -1785,6 +1191,50 @@ static int zonefs_read_super(struct super_block *sb)
return ret; return ret;
} }
static const struct super_operations zonefs_sops = {
.alloc_inode = zonefs_alloc_inode,
.free_inode = zonefs_free_inode,
.statfs = zonefs_statfs,
.remount_fs = zonefs_remount,
.show_options = zonefs_show_options,
};
static int zonefs_get_zgroup_inodes(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
struct inode *dir_inode;
enum zonefs_ztype ztype;
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (!sbi->s_zgroup[ztype].g_nr_zones)
continue;
dir_inode = zonefs_get_zgroup_inode(sb, ztype);
if (IS_ERR(dir_inode))
return PTR_ERR(dir_inode);
sbi->s_zgroup[ztype].g_inode = dir_inode;
}
return 0;
}
static void zonefs_release_zgroup_inodes(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
enum zonefs_ztype ztype;
if (!sbi)
return;
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (sbi->s_zgroup[ztype].g_inode) {
iput(sbi->s_zgroup[ztype].g_inode);
sbi->s_zgroup[ztype].g_inode = NULL;
}
}
}
/* /*
* Check that the device is zoned. If it is, get the list of zones and create * Check that the device is zoned. If it is, get the list of zones and create
* sub-directories and files according to the device zone configuration and * sub-directories and files according to the device zone configuration and
...@@ -1792,10 +1242,9 @@ static int zonefs_read_super(struct super_block *sb) ...@@ -1792,10 +1242,9 @@ static int zonefs_read_super(struct super_block *sb)
*/ */
static int zonefs_fill_super(struct super_block *sb, void *data, int silent) static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
{ {
struct zonefs_zone_data zd;
struct zonefs_sb_info *sbi; struct zonefs_sb_info *sbi;
struct inode *inode; struct inode *inode;
enum zonefs_ztype t; enum zonefs_ztype ztype;
int ret; int ret;
if (!bdev_is_zoned(sb->s_bdev)) { if (!bdev_is_zoned(sb->s_bdev)) {
...@@ -1845,16 +1294,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1845,16 +1294,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
if (ret) if (ret)
return ret; return ret;
memset(&zd, 0, sizeof(struct zonefs_zone_data));
zd.sb = sb;
ret = zonefs_get_zone_info(&zd);
if (ret)
goto cleanup;
ret = zonefs_sysfs_register(sb);
if (ret)
goto cleanup;
zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev)); zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev));
if (!sbi->s_max_wro_seq_files && if (!sbi->s_max_wro_seq_files &&
...@@ -1865,7 +1304,12 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1865,7 +1304,12 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN; sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
} }
/* Create root directory inode */ /* Initialize the zone groups */
ret = zonefs_init_zgroups(sb);
if (ret)
goto cleanup;
/* Create the root directory inode */
ret = -ENOMEM; ret = -ENOMEM;
inode = new_inode(sb); inode = new_inode(sb);
if (!inode) if (!inode)
...@@ -1875,22 +1319,37 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -1875,22 +1319,37 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_mode = S_IFDIR | 0555; inode->i_mode = S_IFDIR | 0555;
inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode); inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
inode->i_op = &zonefs_dir_inode_operations; inode->i_op = &zonefs_dir_inode_operations;
inode->i_fop = &simple_dir_operations; inode->i_fop = &zonefs_dir_operations;
inode->i_size = 2;
set_nlink(inode, 2); set_nlink(inode, 2);
for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
if (sbi->s_zgroup[ztype].g_nr_zones) {
inc_nlink(inode);
inode->i_size++;
}
}
sb->s_root = d_make_root(inode); sb->s_root = d_make_root(inode);
if (!sb->s_root) if (!sb->s_root)
goto cleanup; goto cleanup;
/* Create and populate files in zone groups directories */ /*
for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) { * Take a reference on the zone groups directory inodes
ret = zonefs_create_zgroup(&zd, t); * to keep them in the inode cache.
*/
ret = zonefs_get_zgroup_inodes(sb);
if (ret) if (ret)
break; goto cleanup;
}
ret = zonefs_sysfs_register(sb);
if (ret)
goto cleanup;
return 0;
cleanup: cleanup:
zonefs_cleanup_zone_info(&zd); zonefs_release_zgroup_inodes(sb);
zonefs_free_zgroups(sb);
return ret; return ret;
} }
...@@ -1905,11 +1364,13 @@ static void zonefs_kill_super(struct super_block *sb) ...@@ -1905,11 +1364,13 @@ static void zonefs_kill_super(struct super_block *sb)
{ {
struct zonefs_sb_info *sbi = ZONEFS_SB(sb); struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
if (sb->s_root) /* Release the reference on the zone group directory inodes */
d_genocide(sb->s_root); zonefs_release_zgroup_inodes(sb);
zonefs_sysfs_unregister(sb);
kill_block_super(sb); kill_block_super(sb);
zonefs_sysfs_unregister(sb);
zonefs_free_zgroups(sb);
kfree(sbi); kfree(sbi);
} }
......
...@@ -79,7 +79,7 @@ static const struct sysfs_ops zonefs_sysfs_attr_ops = { ...@@ -79,7 +79,7 @@ static const struct sysfs_ops zonefs_sysfs_attr_ops = {
.show = zonefs_sysfs_attr_show, .show = zonefs_sysfs_attr_show,
}; };
static struct kobj_type zonefs_sb_ktype = { static const struct kobj_type zonefs_sb_ktype = {
.default_groups = zonefs_sysfs_groups, .default_groups = zonefs_sysfs_groups,
.sysfs_ops = &zonefs_sysfs_attr_ops, .sysfs_ops = &zonefs_sysfs_attr_ops,
.release = zonefs_sysfs_sb_release, .release = zonefs_sysfs_sb_release,
......
...@@ -20,8 +20,9 @@ ...@@ -20,8 +20,9 @@
#define show_dev(dev) MAJOR(dev), MINOR(dev) #define show_dev(dev) MAJOR(dev), MINOR(dev)
TRACE_EVENT(zonefs_zone_mgmt, TRACE_EVENT(zonefs_zone_mgmt,
TP_PROTO(struct inode *inode, enum req_op op), TP_PROTO(struct super_block *sb, struct zonefs_zone *z,
TP_ARGS(inode, op), enum req_op op),
TP_ARGS(sb, z, op),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
__field(ino_t, ino) __field(ino_t, ino)
...@@ -30,12 +31,12 @@ TRACE_EVENT(zonefs_zone_mgmt, ...@@ -30,12 +31,12 @@ TRACE_EVENT(zonefs_zone_mgmt,
__field(sector_t, nr_sectors) __field(sector_t, nr_sectors)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = inode->i_sb->s_dev; __entry->dev = sb->s_dev;
__entry->ino = inode->i_ino; __entry->ino =
z->z_sector >> ZONEFS_SB(sb)->s_zone_sectors_shift;
__entry->op = op; __entry->op = op;
__entry->sector = ZONEFS_I(inode)->i_zsector; __entry->sector = z->z_sector;
__entry->nr_sectors = __entry->nr_sectors = z->z_size >> SECTOR_SHIFT;
ZONEFS_I(inode)->i_zone_size >> SECTOR_SHIFT;
), ),
TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu", TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
show_dev(__entry->dev), (unsigned long)__entry->ino, show_dev(__entry->dev), (unsigned long)__entry->ino,
...@@ -58,9 +59,10 @@ TRACE_EVENT(zonefs_file_dio_append, ...@@ -58,9 +59,10 @@ TRACE_EVENT(zonefs_file_dio_append,
TP_fast_assign( TP_fast_assign(
__entry->dev = inode->i_sb->s_dev; __entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino; __entry->ino = inode->i_ino;
__entry->sector = ZONEFS_I(inode)->i_zsector; __entry->sector = zonefs_inode_zone(inode)->z_sector;
__entry->size = size; __entry->size = size;
__entry->wpoffset = ZONEFS_I(inode)->i_wpoffset; __entry->wpoffset =
zonefs_inode_zone(inode)->z_wpoffset;
__entry->ret = ret; __entry->ret = ret;
), ),
TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu", TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu",
......
...@@ -39,31 +39,53 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone) ...@@ -39,31 +39,53 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
return ZONEFS_ZTYPE_SEQ; return ZONEFS_ZTYPE_SEQ;
} }
#define ZONEFS_ZONE_OPEN (1U << 0) #define ZONEFS_ZONE_INIT_MODE (1U << 0)
#define ZONEFS_ZONE_ACTIVE (1U << 1) #define ZONEFS_ZONE_OPEN (1U << 1)
#define ZONEFS_ZONE_OFFLINE (1U << 2) #define ZONEFS_ZONE_ACTIVE (1U << 2)
#define ZONEFS_ZONE_READONLY (1U << 3) #define ZONEFS_ZONE_OFFLINE (1U << 3)
#define ZONEFS_ZONE_READONLY (1U << 4)
#define ZONEFS_ZONE_CNV (1U << 31)
/* /*
* In-memory inode data. * In-memory per-file inode zone data.
*/ */
struct zonefs_inode_info { struct zonefs_zone {
struct inode i_vnode; /* Zone state flags */
unsigned int z_flags;
/* File zone type */ /* Zone start sector (512B unit) */
enum zonefs_ztype i_ztype; sector_t z_sector;
/* File zone start sector (512B unit) */ /* Zone size (bytes) */
sector_t i_zsector; loff_t z_size;
/* File zone write pointer position (sequential zones only) */ /* Zone capacity (file maximum size, bytes) */
loff_t i_wpoffset; loff_t z_capacity;
/* File maximum size */ /* Write pointer offset in the zone (sequential zones only, bytes) */
loff_t i_max_size; loff_t z_wpoffset;
/* Saved inode uid, gid and access rights */
umode_t z_mode;
kuid_t z_uid;
kgid_t z_gid;
};
/*
* In memory zone group information: all zones of a group are exposed
* as files, one file per zone.
*/
struct zonefs_zone_group {
struct inode *g_inode;
unsigned int g_nr_zones;
struct zonefs_zone *g_zones;
};
/* File zone size */ /*
loff_t i_zone_size; * In-memory inode data.
*/
struct zonefs_inode_info {
struct inode i_vnode;
/* /*
* To serialise fully against both syscall and mmap based IO and * To serialise fully against both syscall and mmap based IO and
...@@ -82,7 +104,6 @@ struct zonefs_inode_info { ...@@ -82,7 +104,6 @@ struct zonefs_inode_info {
/* guarded by i_truncate_mutex */ /* guarded by i_truncate_mutex */
unsigned int i_wr_refcnt; unsigned int i_wr_refcnt;
unsigned int i_flags;
}; };
static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode) static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
...@@ -90,6 +111,31 @@ static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode) ...@@ -90,6 +111,31 @@ static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
return container_of(inode, struct zonefs_inode_info, i_vnode); return container_of(inode, struct zonefs_inode_info, i_vnode);
} }
static inline bool zonefs_zone_is_cnv(struct zonefs_zone *z)
{
return z->z_flags & ZONEFS_ZONE_CNV;
}
static inline bool zonefs_zone_is_seq(struct zonefs_zone *z)
{
return !zonefs_zone_is_cnv(z);
}
static inline struct zonefs_zone *zonefs_inode_zone(struct inode *inode)
{
return inode->i_private;
}
static inline bool zonefs_inode_is_cnv(struct inode *inode)
{
return zonefs_zone_is_cnv(zonefs_inode_zone(inode));
}
static inline bool zonefs_inode_is_seq(struct inode *inode)
{
return zonefs_zone_is_seq(zonefs_inode_zone(inode));
}
/* /*
* On-disk super block (block 0). * On-disk super block (block 0).
*/ */
...@@ -181,7 +227,7 @@ struct zonefs_sb_info { ...@@ -181,7 +227,7 @@ struct zonefs_sb_info {
uuid_t s_uuid; uuid_t s_uuid;
unsigned int s_zone_sectors_shift; unsigned int s_zone_sectors_shift;
unsigned int s_nr_files[ZONEFS_ZTYPE_MAX]; struct zonefs_zone_group s_zgroup[ZONEFS_ZTYPE_MAX];
loff_t s_blocks; loff_t s_blocks;
loff_t s_used_blocks; loff_t s_used_blocks;
...@@ -209,6 +255,32 @@ static inline struct zonefs_sb_info *ZONEFS_SB(struct super_block *sb) ...@@ -209,6 +255,32 @@ static inline struct zonefs_sb_info *ZONEFS_SB(struct super_block *sb)
#define zonefs_warn(sb, format, args...) \ #define zonefs_warn(sb, format, args...) \
pr_warn("zonefs (%s) WARNING: " format, sb->s_id, ## args) pr_warn("zonefs (%s) WARNING: " format, sb->s_id, ## args)
/* In super.c */
void zonefs_inode_account_active(struct inode *inode);
int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
void zonefs_i_size_write(struct inode *inode, loff_t isize);
void zonefs_update_stats(struct inode *inode, loff_t new_isize);
void __zonefs_io_error(struct inode *inode, bool write);
static inline void zonefs_io_error(struct inode *inode, bool write)
{
struct zonefs_inode_info *zi = ZONEFS_I(inode);
mutex_lock(&zi->i_truncate_mutex);
__zonefs_io_error(inode, write);
mutex_unlock(&zi->i_truncate_mutex);
}
/* In super.c */
extern const struct inode_operations zonefs_dir_inode_operations;
extern const struct file_operations zonefs_dir_operations;
/* In file.c */
extern const struct address_space_operations zonefs_file_aops;
extern const struct file_operations zonefs_file_operations;
int zonefs_file_truncate(struct inode *inode, loff_t isize);
/* In sysfs.c */
int zonefs_sysfs_register(struct super_block *sb); int zonefs_sysfs_register(struct super_block *sb);
void zonefs_sysfs_unregister(struct super_block *sb); void zonefs_sysfs_unregister(struct super_block *sb);
int zonefs_sysfs_init(void); int zonefs_sysfs_init(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment