Commit ab78bec6 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/home/hch/BK/xfs/linux-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 55ff56e3 9b9c3303
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
*/ */
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/xattr.h> #include <linux/xattr.h>
...@@ -19,7 +18,7 @@ ...@@ -19,7 +18,7 @@
/* /*
* Extended attribute memory allocation wrappers, originally * Extended attribute memory allocation wrappers, originally
* based on the Intermezzo PRESTO_ALLOC/PRESTO_FREE macros. * based on the Intermezzo PRESTO_ALLOC/PRESTO_FREE macros.
* The vmalloc use here is very uncommon - extended attributes * Values larger than a page are uncommon - extended attributes
* are supposed to be small chunks of metadata, and it is quite * are supposed to be small chunks of metadata, and it is quite
* unusual to have very many extended attributes, so lists tend * unusual to have very many extended attributes, so lists tend
* to be quite short as well. The 64K upper limit is derived * to be quite short as well. The 64K upper limit is derived
...@@ -36,10 +35,8 @@ xattr_alloc(size_t size, size_t limit) ...@@ -36,10 +35,8 @@ xattr_alloc(size_t size, size_t limit)
if (!size) /* size request, no buffer is needed */ if (!size) /* size request, no buffer is needed */
return NULL; return NULL;
else if (size <= PAGE_SIZE)
ptr = kmalloc((unsigned long) size, GFP_KERNEL); ptr = kmalloc((unsigned long) size, GFP_KERNEL);
else
ptr = vmalloc((unsigned long) size);
if (!ptr) if (!ptr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
return ptr; return ptr;
...@@ -48,12 +45,8 @@ xattr_alloc(size_t size, size_t limit) ...@@ -48,12 +45,8 @@ xattr_alloc(size_t size, size_t limit)
static void static void
xattr_free(void *ptr, size_t size) xattr_free(void *ptr, size_t size)
{ {
if (!size) /* size request, no buffer was needed */ if (size) /* for a size request, no buffer was needed */
return;
else if (size <= PAGE_SIZE)
kfree(ptr); kfree(ptr);
else
vfree(ptr);
} }
/* /*
......
...@@ -135,17 +135,14 @@ probe_unmapped_page( ...@@ -135,17 +135,14 @@ probe_unmapped_page(
struct page *page; struct page *page;
int ret = 0; int ret = 0;
page = find_get_page(mapping, index); page = find_trylock_page(mapping, index);
if (!page) if (!page)
return 0; return 0;
if (PageWriteback(page) || TestSetPageLocked(page)) { if (PageWriteback(page))
page_cache_release(page); goto out;
return 0;
}
if (page->mapping && PageDirty(page)) { if (page->mapping && PageDirty(page)) {
if (!page_has_buffers(page)) { if (page_has_buffers(page)) {
ret = PAGE_CACHE_SIZE;
} else {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
bh = head = page_buffers(page); bh = head = page_buffers(page);
do { do {
...@@ -156,11 +153,12 @@ probe_unmapped_page( ...@@ -156,11 +153,12 @@ probe_unmapped_page(
if (ret >= pg_offset) if (ret >= pg_offset)
break; break;
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
} } else
ret = PAGE_CACHE_SIZE;
} }
out:
unlock_page(page); unlock_page(page);
page_cache_release(page);
return ret; return ret;
} }
...@@ -214,13 +212,12 @@ probe_page( ...@@ -214,13 +212,12 @@ probe_page(
{ {
struct page *page; struct page *page;
page = find_get_page(inode->i_mapping, index); page = find_trylock_page(inode->i_mapping, index);
if (!page) if (!page)
return NULL; return NULL;
if (PageWriteback(page) || TestSetPageLocked(page)) { if (PageWriteback(page))
page_cache_release(page); goto out;
return NULL;
}
if (page->mapping && page_has_buffers(page)) { if (page->mapping && page_has_buffers(page)) {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
...@@ -230,8 +227,9 @@ probe_page( ...@@ -230,8 +227,9 @@ probe_page(
return page; return page;
} while ((bh = bh->b_this_page) != head); } while ((bh = bh->b_this_page) != head);
} }
out:
unlock_page(page); unlock_page(page);
page_cache_release(page);
return NULL; return NULL;
} }
...@@ -319,8 +317,6 @@ convert_page( ...@@ -319,8 +317,6 @@ convert_page(
} else { } else {
unlock_page(page); unlock_page(page);
} }
page_cache_release(page);
} }
/* /*
......
...@@ -138,6 +138,22 @@ linvfs_aio_write( ...@@ -138,6 +138,22 @@ linvfs_aio_write(
return linvfs_writev(iocb->ki_filp, &iov, 1, &iocb->ki_pos); return linvfs_writev(iocb->ki_filp, &iov, 1, &iocb->ki_pos);
} }
STATIC ssize_t
linvfs_sendfile(
struct file *filp,
loff_t *ppos,
size_t count,
read_actor_t actor,
void *target)
{
vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
int error;
VOP_SENDFILE(vp, filp, ppos, count, actor, target, NULL, error);
return error;
}
STATIC int STATIC int
linvfs_open( linvfs_open(
...@@ -186,7 +202,7 @@ linvfs_fsync( ...@@ -186,7 +202,7 @@ linvfs_fsync(
ASSERT(vp); ASSERT(vp);
VOP_FSYNC(vp, flags, NULL, (off_t)0, (off_t)-1, error); VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error);
return -error; return -error;
} }
...@@ -212,7 +228,7 @@ linvfs_readdir( ...@@ -212,7 +228,7 @@ linvfs_readdir(
caddr_t read_buf; caddr_t read_buf;
int namelen, size = 0; int namelen, size = 0;
size_t rlen = PAGE_CACHE_SIZE << 2; size_t rlen = PAGE_CACHE_SIZE << 2;
off_t start_offset; xfs_off_t start_offset;
xfs_dirent_t *dbp = NULL; xfs_dirent_t *dbp = NULL;
vp = LINVFS_GET_VP(filp->f_dentry->d_inode); vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
...@@ -280,7 +296,7 @@ linvfs_file_mmap( ...@@ -280,7 +296,7 @@ linvfs_file_mmap(
{ {
struct inode *ip = filp->f_dentry->d_inode; struct inode *ip = filp->f_dentry->d_inode;
vnode_t *vp = LINVFS_GET_VP(ip); vnode_t *vp = LINVFS_GET_VP(ip);
vattr_t va = { .va_mask = AT_UPDATIME }; vattr_t va = { .va_mask = XFS_AT_UPDATIME };
int error; int error;
if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) { if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
...@@ -291,7 +307,7 @@ linvfs_file_mmap( ...@@ -291,7 +307,7 @@ linvfs_file_mmap(
vma->vm_ops = &linvfs_file_vm_ops; vma->vm_ops = &linvfs_file_vm_ops;
VOP_SETATTR(vp, &va, AT_UPDATIME, NULL, error); VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
UPDATE_ATIME(ip); UPDATE_ATIME(ip);
return 0; return 0;
} }
...@@ -348,6 +364,7 @@ struct file_operations linvfs_file_operations = { ...@@ -348,6 +364,7 @@ struct file_operations linvfs_file_operations = {
.writev = linvfs_writev, .writev = linvfs_writev,
.aio_read = linvfs_aio_read, .aio_read = linvfs_aio_read,
.aio_write = linvfs_aio_write, .aio_write = linvfs_aio_write,
.sendfile = linvfs_sendfile,
.ioctl = linvfs_ioctl, .ioctl = linvfs_ioctl,
.mmap = linvfs_file_mmap, .mmap = linvfs_file_mmap,
.open = linvfs_open, .open = linvfs_open,
......
...@@ -34,8 +34,9 @@ ...@@ -34,8 +34,9 @@
#include <xfs_fsops.h> #include <xfs_fsops.h>
#include <xfs_dfrag.h> #include <xfs_dfrag.h>
#include <linux/dcache.h> #include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
extern int xfs_change_file_space(bhv_desc_t *, int, extern int xfs_change_file_space(bhv_desc_t *, int,
...@@ -591,17 +592,16 @@ xfs_ioctl( ...@@ -591,17 +592,16 @@ xfs_ioctl(
case XFS_IOC_DIOINFO: { case XFS_IOC_DIOINFO: {
struct dioattr da; struct dioattr da;
da.d_miniosz = mp->m_sb.sb_blocksize;
da.d_mem = mp->m_sb.sb_blocksize;
/* /*
* this only really needs to be BBSIZE. * this only really needs to be BBSIZE.
* it is set to the file system block size to * it is set to the file system block size to
* avoid having to do block zeroing on short writes. * avoid having to do block zeroing on short writes.
*/ */
#define KIO_MAX_ATOMIC_IO 512 /* FIXME: what do we really want here? */ da.d_miniosz = mp->m_sb.sb_blocksize;
da.d_maxiosz = XFS_FSB_TO_B(mp, da.d_mem = mp->m_sb.sb_blocksize;
XFS_B_TO_FSBT(mp, KIO_MAX_ATOMIC_IO << 10)); /* The size dio will do in one go */
da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
if (copy_to_user((struct dioattr *)arg, &da, sizeof(da))) if (copy_to_user((struct dioattr *)arg, &da, sizeof(da)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
...@@ -945,7 +945,7 @@ int xfs_ioc_xattr( ...@@ -945,7 +945,7 @@ int xfs_ioc_xattr(
switch (cmd) { switch (cmd) {
case XFS_IOC_FSGETXATTR: { case XFS_IOC_FSGETXATTR: {
va.va_mask = AT_XFLAGS|AT_EXTSIZE|AT_NEXTENTS; va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS;
VOP_GETATTR(vp, &va, 0, NULL, error); VOP_GETATTR(vp, &va, 0, NULL, error);
if (error) if (error)
return -error; return -error;
...@@ -965,7 +965,7 @@ int xfs_ioc_xattr( ...@@ -965,7 +965,7 @@ int xfs_ioc_xattr(
if (copy_from_user(&fa, (struct fsxattr *)arg, sizeof(fa))) if (copy_from_user(&fa, (struct fsxattr *)arg, sizeof(fa)))
return -XFS_ERROR(EFAULT); return -XFS_ERROR(EFAULT);
va.va_mask = AT_XFLAGS | AT_EXTSIZE; va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
va.va_xflags = fa.fsx_xflags; va.va_xflags = fa.fsx_xflags;
va.va_extsize = fa.fsx_extsize; va.va_extsize = fa.fsx_extsize;
...@@ -978,7 +978,7 @@ int xfs_ioc_xattr( ...@@ -978,7 +978,7 @@ int xfs_ioc_xattr(
case XFS_IOC_FSGETXATTRA: { case XFS_IOC_FSGETXATTRA: {
va.va_mask = AT_XFLAGS|AT_EXTSIZE|AT_ANEXTENTS; va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS;
VOP_GETATTR(vp, &va, 0, NULL, error); VOP_GETATTR(vp, &va, 0, NULL, error);
if (error) if (error)
return -error; return -error;
......
...@@ -45,7 +45,7 @@ validate_fields( ...@@ -45,7 +45,7 @@ validate_fields(
vattr_t va; vattr_t va;
int error; int error;
va.va_mask = AT_NLINK|AT_SIZE; va.va_mask = XFS_AT_NLINK|XFS_AT_SIZE;
VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error); VOP_GETATTR(vp, &va, ATTR_LAZY, NULL, error);
ip->i_nlink = va.va_nlink; ip->i_nlink = va.va_nlink;
ip->i_size = va.va_size; ip->i_size = va.va_size;
...@@ -85,14 +85,14 @@ linvfs_mknod( ...@@ -85,14 +85,14 @@ linvfs_mknod(
mode &= ~current->fs->umask; mode &= ~current->fs->umask;
memset(&va, 0, sizeof(va)); memset(&va, 0, sizeof(va));
va.va_mask = AT_TYPE|AT_MODE; va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
va.va_type = IFTOVT(mode); va.va_type = IFTOVT(mode);
va.va_mode = mode; va.va_mode = mode;
switch (mode & S_IFMT) { switch (mode & S_IFMT) {
case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK:
va.va_rdev = XFS_MKDEV(MAJOR(rdev), MINOR(rdev)); va.va_rdev = XFS_MKDEV(MAJOR(rdev), MINOR(rdev));
va.va_mask |= AT_RDEV; va.va_mask |= XFS_AT_RDEV;
/*FALLTHROUGH*/ /*FALLTHROUGH*/
case S_IFREG: case S_IFREG:
VOP_CREATE(dvp, dentry, &va, &vp, NULL, error); VOP_CREATE(dvp, dentry, &va, &vp, NULL, error);
...@@ -255,7 +255,7 @@ linvfs_symlink( ...@@ -255,7 +255,7 @@ linvfs_symlink(
memset(&va, 0, sizeof(va)); memset(&va, 0, sizeof(va));
va.va_type = VLNK; va.va_type = VLNK;
va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO; va.va_mode = irix_symlink_mode ? 0777 & ~current->fs->umask : S_IRWXUGO;
va.va_mask = AT_TYPE|AT_MODE; va.va_mask = XFS_AT_TYPE|XFS_AT_MODE;
error = 0; error = 0;
VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error); VOP_SYMLINK(dvp, dentry, &va, (char *)symname, &cvp, NULL, error);
...@@ -462,31 +462,31 @@ linvfs_setattr( ...@@ -462,31 +462,31 @@ linvfs_setattr(
memset(&vattr, 0, sizeof(vattr_t)); memset(&vattr, 0, sizeof(vattr_t));
if (ia_valid & ATTR_UID) { if (ia_valid & ATTR_UID) {
vattr.va_mask |= AT_UID; vattr.va_mask |= XFS_AT_UID;
vattr.va_uid = attr->ia_uid; vattr.va_uid = attr->ia_uid;
} }
if (ia_valid & ATTR_GID) { if (ia_valid & ATTR_GID) {
vattr.va_mask |= AT_GID; vattr.va_mask |= XFS_AT_GID;
vattr.va_gid = attr->ia_gid; vattr.va_gid = attr->ia_gid;
} }
if (ia_valid & ATTR_SIZE) { if (ia_valid & ATTR_SIZE) {
vattr.va_mask |= AT_SIZE; vattr.va_mask |= XFS_AT_SIZE;
vattr.va_size = attr->ia_size; vattr.va_size = attr->ia_size;
} }
if (ia_valid & ATTR_ATIME) { if (ia_valid & ATTR_ATIME) {
vattr.va_mask |= AT_ATIME; vattr.va_mask |= XFS_AT_ATIME;
vattr.va_atime = attr->ia_atime; vattr.va_atime = attr->ia_atime;
} }
if (ia_valid & ATTR_MTIME) { if (ia_valid & ATTR_MTIME) {
vattr.va_mask |= AT_MTIME; vattr.va_mask |= XFS_AT_MTIME;
vattr.va_mtime = attr->ia_mtime; vattr.va_mtime = attr->ia_mtime;
} }
if (ia_valid & ATTR_CTIME) { if (ia_valid & ATTR_CTIME) {
vattr.va_mask |= AT_CTIME; vattr.va_mask |= XFS_AT_CTIME;
vattr.va_ctime = attr->ia_ctime; vattr.va_ctime = attr->ia_ctime;
} }
if (ia_valid & ATTR_MODE) { if (ia_valid & ATTR_MODE) {
vattr.va_mask |= AT_MODE; vattr.va_mask |= XFS_AT_MODE;
vattr.va_mode = attr->ia_mode; vattr.va_mode = attr->ia_mode;
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID)) if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
inode->i_mode &= ~S_ISGID; inode->i_mode &= ~S_ISGID;
......
...@@ -153,7 +153,6 @@ typedef struct xfs_dirent { /* data from readdir() */ ...@@ -153,7 +153,6 @@ typedef struct xfs_dirent { /* data from readdir() */
#define EFSCORRUPTED 990 /* Filesystem is corrupted */ #define EFSCORRUPTED 990 /* Filesystem is corrupted */
#define SYNCHRONIZE() barrier() #define SYNCHRONIZE() barrier()
#define rootdev ROOT_DEV
#define __return_address __builtin_return_address(0) #define __return_address __builtin_return_address(0)
/* IRIX uses a dynamic sizing algorithm (ndquot = 200 + numprocs*2) */ /* IRIX uses a dynamic sizing algorithm (ndquot = 200 + numprocs*2) */
......
...@@ -205,6 +205,67 @@ xfs_read( ...@@ -205,6 +205,67 @@ xfs_read(
return ret; return ret;
} }
ssize_t
xfs_sendfile(
bhv_desc_t *bdp,
struct file *filp,
loff_t *offp,
size_t count,
read_actor_t actor,
void *target,
cred_t *credp)
{
size_t size = 0;
ssize_t ret;
xfs_fsize_t n;
xfs_inode_t *ip;
xfs_mount_t *mp;
vnode_t *vp;
ip = XFS_BHVTOI(bdp);
vp = BHV_TO_VNODE(bdp);
mp = ip->i_mount;
vn_trace_entry(vp, "xfs_sendfile", (inst_t *)__return_address);
XFS_STATS_INC(xfsstats.xs_read_calls);
n = XFS_MAX_FILE_OFFSET - *offp;
if ((n <= 0) || (size == 0))
return 0;
if (n < size)
size = n;
if (XFS_FORCED_SHUTDOWN(mp)) {
return -EIO;
}
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
!(filp->f_mode & FINVIS)) {
int error;
vrwlock_t locktype = VRWLOCK_READ;
error = xfs_dm_send_data_event(DM_EVENT_READ, bdp, *offp,
size, FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
}
}
ret = generic_file_sendfile(filp, offp, count, actor, target);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
XFS_STATS_ADD(xfsstats.xs_read_bytes, ret);
if (!(filp->f_mode & FINVIS))
xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
return ret;
}
/* /*
* This routine is called to handle zeroing any space in the last * This routine is called to handle zeroing any space in the last
* block of the file that is beyond the EOF. We do this since the * block of the file that is beyond the EOF. We do this since the
......
...@@ -32,15 +32,128 @@ ...@@ -32,15 +32,128 @@
#ifndef __XFS_STATS_H__ #ifndef __XFS_STATS_H__
#define __XFS_STATS_H__ #define __XFS_STATS_H__
#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
/* /*
* procfs interface * XFS global statistics
*/ */
#ifdef CONFIG_PROC_FS struct xfsstats {
# define XFSSTAT_END_EXTENT_ALLOC 4
__uint32_t xs_allocx;
__uint32_t xs_allocb;
__uint32_t xs_freex;
__uint32_t xs_freeb;
# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4)
__uint32_t xs_abt_lookup;
__uint32_t xs_abt_compare;
__uint32_t xs_abt_insrec;
__uint32_t xs_abt_delrec;
# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7)
__uint32_t xs_blk_mapr;
__uint32_t xs_blk_mapw;
__uint32_t xs_blk_unmap;
__uint32_t xs_add_exlist;
__uint32_t xs_del_exlist;
__uint32_t xs_look_exlist;
__uint32_t xs_cmp_exlist;
# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4)
__uint32_t xs_bmbt_lookup;
__uint32_t xs_bmbt_compare;
__uint32_t xs_bmbt_insrec;
__uint32_t xs_bmbt_delrec;
# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4)
__uint32_t xs_dir_lookup;
__uint32_t xs_dir_create;
__uint32_t xs_dir_remove;
__uint32_t xs_dir_getdents;
# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3)
__uint32_t xs_trans_sync;
__uint32_t xs_trans_async;
__uint32_t xs_trans_empty;
# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7)
__uint32_t xs_ig_attempts;
__uint32_t xs_ig_found;
__uint32_t xs_ig_frecycle;
__uint32_t xs_ig_missed;
__uint32_t xs_ig_dup;
__uint32_t xs_ig_reclaims;
__uint32_t xs_ig_attrchg;
# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5)
__uint32_t xs_log_writes;
__uint32_t xs_log_blocks;
__uint32_t xs_log_noiclogs;
__uint32_t xs_log_force;
__uint32_t xs_log_force_sleep;
# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10)
__uint32_t xs_try_logspace;
__uint32_t xs_sleep_logspace;
__uint32_t xs_push_ail;
__uint32_t xs_push_ail_success;
__uint32_t xs_push_ail_pushbuf;
__uint32_t xs_push_ail_pinned;
__uint32_t xs_push_ail_locked;
__uint32_t xs_push_ail_flushing;
__uint32_t xs_push_ail_restarts;
__uint32_t xs_push_ail_flush;
# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2)
__uint32_t xs_xstrat_quick;
__uint32_t xs_xstrat_split;
# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2)
__uint32_t xs_write_calls;
__uint32_t xs_read_calls;
# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4)
__uint32_t xs_attr_get;
__uint32_t xs_attr_set;
__uint32_t xs_attr_remove;
__uint32_t xs_attr_list;
# define XFSSTAT_END_QUOTA_OPS (XFSSTAT_END_ATTRIBUTE_OPS+8)
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
__uint32_t xs_qm_dqcachemisses;
__uint32_t xs_qm_dqcachehits;
__uint32_t xs_qm_dqwants;
__uint32_t xs_qm_dqshake_reclaims;
__uint32_t xs_qm_dqinact_reclaims;
# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_QUOTA_OPS+3)
__uint32_t xs_iflush_count;
__uint32_t xs_icluster_flushcnt;
__uint32_t xs_icluster_flushinode;
# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8)
__uint32_t vn_active; /* # vnodes not on free lists */
__uint32_t vn_alloc; /* # times vn_alloc called */
__uint32_t vn_get; /* # times vn_get called */
__uint32_t vn_hold; /* # times vn_hold called */
__uint32_t vn_rele; /* # times vn_rele called */
__uint32_t vn_reclaim; /* # times vn_reclaim called */
__uint32_t vn_remove; /* # times vn_remove called */
__uint32_t vn_free; /* # times vn_free called */
/* Extra precision counters */
__uint64_t xs_xstrat_bytes;
__uint64_t xs_write_bytes;
__uint64_t xs_read_bytes;
};
extern struct xfsstats xfsstats;
# define XFS_STATS_INC(count) ( (count)++ )
# define XFS_STATS_DEC(count) ( (count)-- )
# define XFS_STATS_ADD(count, inc) ( (count) += (inc) )
extern void xfs_init_procfs(void); extern void xfs_init_procfs(void);
extern void xfs_cleanup_procfs(void); extern void xfs_cleanup_procfs(void);
#else
#else /* !CONFIG_PROC_FS */
# define XFS_STATS_INC(count)
# define XFS_STATS_DEC(count)
# define XFS_STATS_ADD(count, inc)
static __inline void xfs_init_procfs(void) { }; static __inline void xfs_init_procfs(void) { };
static __inline void xfs_cleanup_procfs(void) { }; static __inline void xfs_cleanup_procfs(void) { };
#endif
#endif /* !CONFIG_PROC_FS */
#endif /* __XFS_STATS_H__ */ #endif /* __XFS_STATS_H__ */
...@@ -46,10 +46,6 @@ ...@@ -46,10 +46,6 @@
extern int xfs_init(void); extern int xfs_init(void);
extern void xfs_cleanup(void); extern void xfs_cleanup(void);
#ifndef EVMS_MAJOR
# define EVMS_MAJOR 117
#endif
/* For kernels which have the s_maxbytes field - set it */ /* For kernels which have the s_maxbytes field - set it */
#ifdef MAX_NON_LFS #ifdef MAX_NON_LFS
# define set_max_bytes(sb) ((sb)->s_maxbytes = XFS_MAX_FILE_OFFSET) # define set_max_bytes(sb) ((sb)->s_maxbytes = XFS_MAX_FILE_OFFSET)
...@@ -101,7 +97,6 @@ STATIC struct export_operations linvfs_export_ops; ...@@ -101,7 +97,6 @@ STATIC struct export_operations linvfs_export_ops;
#define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */
#define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
#define MNTOPT_QUOTA "quota" /* disk quotas */ #define MNTOPT_QUOTA "quota" /* disk quotas */
#define MNTOPT_MRQUOTA "mrquota" /* don't turnoff if SB has quotas on */
#define MNTOPT_NOQUOTA "noquota" /* no quotas */ #define MNTOPT_NOQUOTA "noquota" /* no quotas */
#define MNTOPT_UQUOTA "usrquota" /* user quota enabled */ #define MNTOPT_UQUOTA "usrquota" /* user quota enabled */
#define MNTOPT_GQUOTA "grpquota" /* group quota enabled */ #define MNTOPT_GQUOTA "grpquota" /* group quota enabled */
...@@ -513,6 +508,23 @@ xfs_free_buftarg( ...@@ -513,6 +508,23 @@ xfs_free_buftarg(
kfree(btp); kfree(btp);
} }
void
xfs_size_buftarg(
xfs_buftarg_t *btp,
unsigned int blocksize,
unsigned int sectorsize)
{
btp->pbr_bsize = blocksize;
btp->pbr_sshift = ffs(sectorsize) - 1;
btp->pbr_smask = sectorsize - 1;
if (set_blocksize(btp->pbr_bdev, sectorsize)) {
printk(KERN_WARNING
"XFS: Cannot set_blocksize to %u on device 0x%x\n",
sectorsize, btp->pbr_dev);
}
}
xfs_buftarg_t * xfs_buftarg_t *
xfs_alloc_buftarg( xfs_alloc_buftarg(
struct block_device *bdev) struct block_device *bdev)
...@@ -524,14 +536,7 @@ xfs_alloc_buftarg( ...@@ -524,14 +536,7 @@ xfs_alloc_buftarg(
btp->pbr_dev = bdev->bd_dev; btp->pbr_dev = bdev->bd_dev;
btp->pbr_bdev = bdev; btp->pbr_bdev = bdev;
btp->pbr_mapping = bdev->bd_inode->i_mapping; btp->pbr_mapping = bdev->bd_inode->i_mapping;
btp->pbr_blocksize = PAGE_CACHE_SIZE; xfs_size_buftarg(btp, PAGE_CACHE_SIZE, bdev_hardsect_size(bdev));
switch (MAJOR(btp->pbr_dev)) {
case MD_MAJOR:
case EVMS_MAJOR:
btp->pbr_flags = PBR_ALIGNED_ONLY;
break;
}
return btp; return btp;
} }
......
...@@ -87,6 +87,7 @@ extern int xfs_blkdev_get (const char *, struct block_device **); ...@@ -87,6 +87,7 @@ extern int xfs_blkdev_get (const char *, struct block_device **);
extern void xfs_blkdev_put (struct block_device *); extern void xfs_blkdev_put (struct block_device *);
extern struct pb_target *xfs_alloc_buftarg (struct block_device *); extern struct pb_target *xfs_alloc_buftarg (struct block_device *);
extern void xfs_size_buftarg (struct pb_target *, unsigned int, unsigned int);
extern void xfs_free_buftarg (struct pb_target *); extern void xfs_free_buftarg (struct pb_target *);
#endif /* __XFS_SUPER_H__ */ #endif /* __XFS_SUPER_H__ */
...@@ -35,8 +35,7 @@ ...@@ -35,8 +35,7 @@
uint64_t vn_generation; /* vnode generation number */ uint64_t vn_generation; /* vnode generation number */
spinlock_t vnumber_lock = SPIN_LOCK_UNLOCKED;
spinlock_t vnumber_lock = SPIN_LOCK_UNLOCKED;
/* /*
* Dedicated vnode inactive/reclaim sync semaphores. * Dedicated vnode inactive/reclaim sync semaphores.
...@@ -59,9 +58,6 @@ u_short vttoif_tab[] = { ...@@ -59,9 +58,6 @@ u_short vttoif_tab[] = {
0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFIFO, 0, S_IFSOCK
}; };
#define VN_LOCK(vp) spin_lock(&(vp)->v_lock)
#define VN_UNLOCK(vp) spin_unlock(&(vp)->v_lock)
void void
vn_init(void) vn_init(void)
...@@ -73,14 +69,13 @@ vn_init(void) ...@@ -73,14 +69,13 @@ vn_init(void)
init_sv(svp, SV_DEFAULT, "vsy", i); init_sv(svp, SV_DEFAULT, "vsy", i);
} }
/* /*
* Clean a vnode of filesystem-specific data and prepare it for reuse. * Clean a vnode of filesystem-specific data and prepare it for reuse.
*/ */
STATIC int STATIC int
vn_reclaim(struct vnode *vp) vn_reclaim(struct vnode *vp)
{ {
int error; int error;
XFS_STATS_INC(xfsstats.vn_reclaim); XFS_STATS_INC(xfsstats.vn_reclaim);
...@@ -98,7 +93,6 @@ vn_reclaim(struct vnode *vp) ...@@ -98,7 +93,6 @@ vn_reclaim(struct vnode *vp)
ASSERT(vp->v_fbhv == NULL); ASSERT(vp->v_fbhv == NULL);
VN_LOCK(vp); VN_LOCK(vp);
vp->v_flag &= (VRECLM|VWAIT); vp->v_flag &= (VRECLM|VWAIT);
VN_UNLOCK(vp); VN_UNLOCK(vp);
...@@ -189,7 +183,7 @@ vn_get(struct vnode *vp, vmap_t *vmap) ...@@ -189,7 +183,7 @@ vn_get(struct vnode *vp, vmap_t *vmap)
} }
/* /*
* "revalidate" the linux inode. * Revalidate the Linux inode from the vnode.
*/ */
int int
vn_revalidate(struct vnode *vp) vn_revalidate(struct vnode *vp)
...@@ -199,17 +193,12 @@ vn_revalidate(struct vnode *vp) ...@@ -199,17 +193,12 @@ vn_revalidate(struct vnode *vp)
vattr_t va; vattr_t va;
vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address); vn_trace_entry(vp, "vn_revalidate", (inst_t *)__return_address);
ASSERT(vp->v_fbhv != NULL);
va.va_mask = AT_STAT|AT_GENCOUNT; va.va_mask = XFS_AT_STAT|XFS_AT_GENCOUNT;
ASSERT(vp->v_bh.bh_first != NULL);
VOP_GETATTR(vp, &va, 0, NULL, error); VOP_GETATTR(vp, &va, 0, NULL, error);
if (!error) {
if (! error) {
inode = LINVFS_GET_IP(vp); inode = LINVFS_GET_IP(vp);
ASSERT(inode);
inode->i_mode = VTTOIF(va.va_type) | va.va_mode; inode->i_mode = VTTOIF(va.va_type) | va.va_mode;
inode->i_nlink = va.va_nlink; inode->i_nlink = va.va_nlink;
inode->i_uid = va.va_uid; inode->i_uid = va.va_uid;
...@@ -224,11 +213,9 @@ vn_revalidate(struct vnode *vp) ...@@ -224,11 +213,9 @@ vn_revalidate(struct vnode *vp)
inode->i_atime.tv_nsec = va.va_atime.tv_nsec; inode->i_atime.tv_nsec = va.va_atime.tv_nsec;
VUNMODIFY(vp); VUNMODIFY(vp);
} }
return -error; return -error;
} }
/* /*
* purge a vnode from the cache * purge a vnode from the cache
* At this point the vnode is guaranteed to have no references (vn_count == 0) * At this point the vnode is guaranteed to have no references (vn_count == 0)
...@@ -317,12 +304,10 @@ void ...@@ -317,12 +304,10 @@ void
vn_rele(struct vnode *vp) vn_rele(struct vnode *vp)
{ {
int vcnt; int vcnt;
/* REFERENCED */ int cache;
int cache;
XFS_STATS_INC(xfsstats.vn_rele); XFS_STATS_INC(xfsstats.vn_rele);
VN_LOCK(vp); VN_LOCK(vp);
vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address); vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address);
...@@ -365,7 +350,6 @@ vn_rele(struct vnode *vp) ...@@ -365,7 +350,6 @@ vn_rele(struct vnode *vp)
vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address); vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address);
} }
/* /*
* Finish the removal of a vnode. * Finish the removal of a vnode.
*/ */
......
...@@ -32,6 +32,12 @@ ...@@ -32,6 +32,12 @@
#ifndef __XFS_VNODE_H__ #ifndef __XFS_VNODE_H__
#define __XFS_VNODE_H__ #define __XFS_VNODE_H__
struct uio;
struct file;
struct vattr;
struct page_buf_bmap_s;
struct attrlist_cursor_kern;
/* /*
* Vnode types (unrelated to on-disk inodes). VNON means no type. * Vnode types (unrelated to on-disk inodes). VNON means no type.
*/ */
...@@ -47,12 +53,9 @@ typedef enum vtype { ...@@ -47,12 +53,9 @@ typedef enum vtype {
VSOCK = 8 VSOCK = 8
} vtype_t; } vtype_t;
typedef __u64 vnumber_t; typedef xfs_ino_t vnumber_t;
typedef struct dentry vname_t;
/* typedef bhv_head_t vn_bhv_head_t;
* Define the type of behavior head used by vnodes.
*/
#define vn_bhv_head_t bhv_head_t
/* /*
* MP locking protocols: * MP locking protocols:
...@@ -61,18 +64,50 @@ typedef __u64 vnumber_t; ...@@ -61,18 +64,50 @@ typedef __u64 vnumber_t;
*/ */
typedef struct vnode { typedef struct vnode {
__u32 v_flag; /* vnode flags (see below) */ __u32 v_flag; /* vnode flags (see below) */
enum vtype v_type; /* vnode type */ enum vtype v_type; /* vnode type */
struct vfs *v_vfsp; /* ptr to containing VFS*/ struct vfs *v_vfsp; /* ptr to containing VFS */
vnumber_t v_number; /* in-core vnode number */ vnumber_t v_number; /* in-core vnode number */
vn_bhv_head_t v_bh; /* behavior head */ vn_bhv_head_t v_bh; /* behavior head */
spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */
spinlock_t v_lock; /* don't use VLOCK on Linux */ struct inode v_inode; /* Linux inode */
struct inode v_inode; /* linux inode */
#ifdef CONFIG_XFS_VNODE_TRACING #ifdef CONFIG_XFS_VNODE_TRACING
struct ktrace *v_trace; /* trace header structure */ struct ktrace *v_trace; /* trace header structure */
#endif #endif
} vnode_t; } vnode_t;
#define v_fbhv v_bh.bh_first /* first behavior */
#define v_fops v_bh.bh_first->bd_ops /* first behavior ops */
#define VNODE_POSITION_BASE BHV_POSITION_BASE /* chain bottom */
#define VNODE_POSITION_TOP BHV_POSITION_TOP /* chain top */
#define VNODE_POSITION_INVALID BHV_POSITION_INVALID /* invalid pos. num */
typedef enum {
VN_BHV_UNKNOWN, /* not specified */
VN_BHV_XFS, /* xfs */
VN_BHV_END /* housekeeping end-of-range */
} vn_bhv_t;
#define VNODE_POSITION_XFS (VNODE_POSITION_BASE)
/*
* Macros for dealing with the behavior descriptor inside of the vnode.
*/
#define BHV_TO_VNODE(bdp) ((vnode_t *)BHV_VOBJ(bdp))
#define BHV_TO_VNODE_NULL(bdp) ((vnode_t *)BHV_VOBJNULL(bdp))
#define VNODE_TO_FIRST_BHV(vp) (BHV_HEAD_FIRST(&(vp)->v_bh))
#define VN_BHV_HEAD(vp) ((bhv_head_t *)(&((vp)->v_bh)))
#define VN_BHV_READ_LOCK(bhp) BHV_READ_LOCK(bhp)
#define VN_BHV_READ_UNLOCK(bhp) BHV_READ_UNLOCK(bhp)
#define VN_BHV_WRITE_LOCK(bhp) BHV_WRITE_LOCK(bhp)
#define VN_BHV_NOT_READ_LOCKED(bhp) BHV_NOT_READ_LOCKED(bhp)
#define VN_BHV_NOT_WRITE_LOCKED(bhp) BHV_NOT_WRITE_LOCKED(bhp)
#define vn_bhv_head_init(bhp,name) bhv_head_init(bhp,name)
#define vn_bhv_remove(bhp,bdp) bhv_remove(bhp,bdp)
#define vn_bhv_lookup(bhp,ops) bhv_lookup(bhp,ops)
#define vn_bhv_lookup_unlocked(bhp,ops) bhv_lookup_unlocked(bhp,ops)
/* /*
* Vnode to Linux inode mapping. * Vnode to Linux inode mapping.
*/ */
...@@ -91,23 +126,12 @@ extern ushort vttoif_tab[]; ...@@ -91,23 +126,12 @@ extern ushort vttoif_tab[];
/* /*
* Vnode flags. * Vnode flags.
*
* The vnode flags fall into two categories:
* 1) Local only -
* Flags that are relevant only to a particular cell
* 2) Single system image -
* Flags that must be maintained coherent across all cells
*/ */
/* Local only flags */
#define VINACT 0x1 /* vnode is being inactivated */ #define VINACT 0x1 /* vnode is being inactivated */
#define VRECLM 0x2 /* vnode is being reclaimed */ #define VRECLM 0x2 /* vnode is being reclaimed */
#define VWAIT 0x4 /* waiting for VINACT #define VWAIT 0x4 /* waiting for VINACT/VRECLM to end */
or VRECLM to finish */ #define VMODIFIED 0x8 /* XFS inode state possibly differs */
#define VMODIFIED 0x8 /* xfs inode state possibly different /* to the Linux inode state. */
* from linux inode state.
*/
/* Single system image flags */
#define VROOT 0x100000 /* root of its file system */ #define VROOT 0x100000 /* root of its file system */
#define VNOSWAP 0x200000 /* cannot be used as virt swap device */ #define VNOSWAP 0x200000 /* cannot be used as virt swap device */
#define VISSWAP 0x400000 /* vnode is part of virt swap device */ #define VISSWAP 0x400000 /* vnode is part of virt swap device */
...@@ -115,7 +139,6 @@ extern ushort vttoif_tab[]; ...@@ -115,7 +139,6 @@ extern ushort vttoif_tab[];
#define VNONREPLICABLE 0x1000000 /* Vnode has writers. Don't replicate */ #define VNONREPLICABLE 0x1000000 /* Vnode has writers. Don't replicate */
#define VDOCMP 0x2000000 /* Vnode has special VOP_CMP impl. */ #define VDOCMP 0x2000000 /* Vnode has special VOP_CMP impl. */
#define VSHARE 0x4000000 /* vnode part of global cache */ #define VSHARE 0x4000000 /* vnode part of global cache */
/* VSHARE applies to local cell only */
#define VFRLOCKS 0x8000000 /* vnode has FR locks applied */ #define VFRLOCKS 0x8000000 /* vnode has FR locks applied */
#define VENF_LOCKING 0x10000000 /* enf. mode FR locking in effect */ #define VENF_LOCKING 0x10000000 /* enf. mode FR locking in effect */
#define VOPLOCK 0x20000000 /* oplock set on the vnode */ #define VOPLOCK 0x20000000 /* oplock set on the vnode */
...@@ -143,35 +166,6 @@ typedef enum vchange { ...@@ -143,35 +166,6 @@ typedef enum vchange {
VCHANGE_FLAGS_IOEXCL_COUNT = 4 VCHANGE_FLAGS_IOEXCL_COUNT = 4
} vchange_t; } vchange_t;
/*
* Macros for dealing with the behavior descriptor inside of the vnode.
*/
#define BHV_TO_VNODE(bdp) ((vnode_t *)BHV_VOBJ(bdp))
#define BHV_TO_VNODE_NULL(bdp) ((vnode_t *)BHV_VOBJNULL(bdp))
#define VNODE_TO_FIRST_BHV(vp) (BHV_HEAD_FIRST(&(vp)->v_bh))
#define VN_BHV_HEAD(vp) ((vn_bhv_head_t *)(&((vp)->v_bh)))
#define VN_BHV_READ_LOCK(bhp) BHV_READ_LOCK(bhp)
#define VN_BHV_READ_UNLOCK(bhp) BHV_READ_UNLOCK(bhp)
#define VN_BHV_WRITE_LOCK(bhp) BHV_WRITE_LOCK(bhp)
#define VN_BHV_NOT_READ_LOCKED(bhp) BHV_NOT_READ_LOCKED(bhp)
#define VN_BHV_NOT_WRITE_LOCKED(bhp) BHV_NOT_WRITE_LOCKED(bhp)
#define vn_bhv_head_init(bhp,name) bhv_head_init(bhp,name)
#define vn_bhv_head_reinit(bhp) bhv_head_reinit(bhp)
#define vn_bhv_insert_initial(bhp,bdp) bhv_insert_initial(bhp,bdp)
#define vn_bhv_remove(bhp,bdp) bhv_remove(bhp,bdp)
#define vn_bhv_lookup(bhp,ops) bhv_lookup(bhp,ops)
#define vn_bhv_lookup_unlocked(bhp,ops) bhv_lookup_unlocked(bhp,ops)
#define v_fbhv v_bh.bh_first /* first behavior */
#define v_fops v_bh.bh_first->bd_ops /* ops for first behavior */
struct uio;
struct file;
struct vattr;
struct page_buf_bmap_s;
struct attrlist_cursor_kern;
typedef int (*vop_open_t)(bhv_desc_t *, struct cred *); typedef int (*vop_open_t)(bhv_desc_t *, struct cred *);
typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct file *, typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct file *,
...@@ -180,37 +174,42 @@ typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct file *, ...@@ -180,37 +174,42 @@ typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct file *,
typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct file *, typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct file *,
const struct iovec *, unsigned long, const struct iovec *, unsigned long,
loff_t *, struct cred *); loff_t *, struct cred *);
typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *, unsigned int, unsigned long); typedef ssize_t (*vop_sendfile_t)(bhv_desc_t *, struct file *,
loff_t *, size_t, read_actor_t,
void *, struct cred *);
typedef int (*vop_ioctl_t)(bhv_desc_t *, struct inode *, struct file *,
unsigned int, unsigned long);
typedef int (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int, typedef int (*vop_getattr_t)(bhv_desc_t *, struct vattr *, int,
struct cred *); struct cred *);
typedef int (*vop_setattr_t)(bhv_desc_t *, struct vattr *, int, typedef int (*vop_setattr_t)(bhv_desc_t *, struct vattr *, int,
struct cred *); struct cred *);
typedef int (*vop_access_t)(bhv_desc_t *, int, struct cred *); typedef int (*vop_access_t)(bhv_desc_t *, int, struct cred *);
typedef int (*vop_lookup_t)(bhv_desc_t *, struct dentry *, vnode_t **, typedef int (*vop_lookup_t)(bhv_desc_t *, vname_t *, vnode_t **,
int, vnode_t *, struct cred *); int, vnode_t *, struct cred *);
typedef int (*vop_create_t)(bhv_desc_t *, struct dentry *, struct vattr *, typedef int (*vop_create_t)(bhv_desc_t *, vname_t *, struct vattr *,
vnode_t **, struct cred *); vnode_t **, struct cred *);
typedef int (*vop_remove_t)(bhv_desc_t *, struct dentry *, struct cred *); typedef int (*vop_remove_t)(bhv_desc_t *, vname_t *, struct cred *);
typedef int (*vop_link_t)(bhv_desc_t *, vnode_t *, struct dentry *, typedef int (*vop_link_t)(bhv_desc_t *, vnode_t *, vname_t *,
struct cred *);
typedef int (*vop_rename_t)(bhv_desc_t *, vname_t *, vnode_t *, vname_t *,
struct cred *); struct cred *);
typedef int (*vop_rename_t)(bhv_desc_t *, struct dentry *, vnode_t *, typedef int (*vop_mkdir_t)(bhv_desc_t *, vname_t *, struct vattr *,
struct dentry *, struct cred *);
typedef int (*vop_mkdir_t)(bhv_desc_t *, struct dentry *, struct vattr *,
vnode_t **, struct cred *); vnode_t **, struct cred *);
typedef int (*vop_rmdir_t)(bhv_desc_t *, struct dentry *, struct cred *); typedef int (*vop_rmdir_t)(bhv_desc_t *, vname_t *, struct cred *);
typedef int (*vop_readdir_t)(bhv_desc_t *, struct uio *, struct cred *, typedef int (*vop_readdir_t)(bhv_desc_t *, struct uio *, struct cred *,
int *); int *);
typedef int (*vop_symlink_t)(bhv_desc_t *, struct dentry *, typedef int (*vop_symlink_t)(bhv_desc_t *, vname_t *, struct vattr *,
struct vattr *, char *, char *, vnode_t **, struct cred *);
vnode_t **, struct cred *);
typedef int (*vop_readlink_t)(bhv_desc_t *, struct uio *, struct cred *); typedef int (*vop_readlink_t)(bhv_desc_t *, struct uio *, struct cred *);
typedef int (*vop_fsync_t)(bhv_desc_t *, int, struct cred *, xfs_off_t, xfs_off_t); typedef int (*vop_fsync_t)(bhv_desc_t *, int, struct cred *,
xfs_off_t, xfs_off_t);
typedef int (*vop_inactive_t)(bhv_desc_t *, struct cred *); typedef int (*vop_inactive_t)(bhv_desc_t *, struct cred *);
typedef int (*vop_fid2_t)(bhv_desc_t *, struct fid *); typedef int (*vop_fid2_t)(bhv_desc_t *, struct fid *);
typedef int (*vop_release_t)(bhv_desc_t *); typedef int (*vop_release_t)(bhv_desc_t *);
typedef int (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t); typedef int (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t);
typedef void (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t); typedef void (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t);
typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int, struct page_buf_bmap_s *, int *); typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int,
struct page_buf_bmap_s *, int *);
typedef int (*vop_reclaim_t)(bhv_desc_t *); typedef int (*vop_reclaim_t)(bhv_desc_t *);
typedef int (*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int, typedef int (*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int,
struct cred *); struct cred *);
...@@ -223,7 +222,8 @@ typedef void (*vop_link_removed_t)(bhv_desc_t *, vnode_t *, int); ...@@ -223,7 +222,8 @@ typedef void (*vop_link_removed_t)(bhv_desc_t *, vnode_t *, int);
typedef void (*vop_vnode_change_t)(bhv_desc_t *, vchange_t, __psint_t); typedef void (*vop_vnode_change_t)(bhv_desc_t *, vchange_t, __psint_t);
typedef void (*vop_ptossvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int); typedef void (*vop_ptossvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
typedef void (*vop_pflushinvalvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int); typedef void (*vop_pflushinvalvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, int);
typedef int (*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t, uint64_t, int); typedef int (*vop_pflushvp_t)(bhv_desc_t *, xfs_off_t, xfs_off_t,
uint64_t, int);
typedef int (*vop_iflush_t)(bhv_desc_t *, int); typedef int (*vop_iflush_t)(bhv_desc_t *, int);
...@@ -232,6 +232,7 @@ typedef struct vnodeops { ...@@ -232,6 +232,7 @@ typedef struct vnodeops {
vop_open_t vop_open; vop_open_t vop_open;
vop_read_t vop_read; vop_read_t vop_read;
vop_write_t vop_write; vop_write_t vop_write;
vop_sendfile_t vop_sendfile;
vop_ioctl_t vop_ioctl; vop_ioctl_t vop_ioctl;
vop_getattr_t vop_getattr; vop_getattr_t vop_getattr;
vop_setattr_t vop_setattr; vop_setattr_t vop_setattr;
...@@ -283,6 +284,12 @@ typedef struct vnodeops { ...@@ -283,6 +284,12 @@ typedef struct vnodeops {
rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,cr);\ rv = _VOP_(vop_write, vp)((vp)->v_fbhv,file,iov,segs,offset,cr);\
VN_BHV_READ_UNLOCK(&(vp)->v_bh); \ VN_BHV_READ_UNLOCK(&(vp)->v_bh); \
} }
#define VOP_SENDFILE(vp,f,of,cnt,act,targ,cr,rv) \
{ \
VN_BHV_READ_LOCK(&(vp)->v_bh); \
rv = _VOP_(vop_sendfile, vp)((vp)->v_fbhv,f,of,cnt,act,targ,cr);\
VN_BHV_READ_UNLOCK(&(vp)->v_bh); \
}
#define VOP_BMAP(vp,of,sz,rw,b,n,rv) \ #define VOP_BMAP(vp,of,sz,rw,b,n,rv) \
{ \ { \
VN_BHV_READ_LOCK(&(vp)->v_bh); \ VN_BHV_READ_LOCK(&(vp)->v_bh); \
...@@ -541,53 +548,55 @@ typedef struct vattr { ...@@ -541,53 +548,55 @@ typedef struct vattr {
/* /*
* setattr or getattr attributes * setattr or getattr attributes
*/ */
#define AT_TYPE 0x00000001 #define XFS_AT_TYPE 0x00000001
#define AT_MODE 0x00000002 #define XFS_AT_MODE 0x00000002
#define AT_UID 0x00000004 #define XFS_AT_UID 0x00000004
#define AT_GID 0x00000008 #define XFS_AT_GID 0x00000008
#define AT_FSID 0x00000010 #define XFS_AT_FSID 0x00000010
#define AT_NODEID 0x00000020 #define XFS_AT_NODEID 0x00000020
#define AT_NLINK 0x00000040 #define XFS_AT_NLINK 0x00000040
#define AT_SIZE 0x00000080 #define XFS_AT_SIZE 0x00000080
#define AT_ATIME 0x00000100 #define XFS_AT_ATIME 0x00000100
#define AT_MTIME 0x00000200 #define XFS_AT_MTIME 0x00000200
#define AT_CTIME 0x00000400 #define XFS_AT_CTIME 0x00000400
#define AT_RDEV 0x00000800 #define XFS_AT_RDEV 0x00000800
#define AT_BLKSIZE 0x00001000 #define XFS_AT_BLKSIZE 0x00001000
#define AT_NBLOCKS 0x00002000 #define XFS_AT_NBLOCKS 0x00002000
#define AT_VCODE 0x00004000 #define XFS_AT_VCODE 0x00004000
#define AT_MAC 0x00008000 #define XFS_AT_MAC 0x00008000
#define AT_UPDATIME 0x00010000 #define XFS_AT_UPDATIME 0x00010000
#define AT_UPDMTIME 0x00020000 #define XFS_AT_UPDMTIME 0x00020000
#define AT_UPDCTIME 0x00040000 #define XFS_AT_UPDCTIME 0x00040000
#define AT_ACL 0x00080000 #define XFS_AT_ACL 0x00080000
#define AT_CAP 0x00100000 #define XFS_AT_CAP 0x00100000
#define AT_INF 0x00200000 #define XFS_AT_INF 0x00200000
#define AT_XFLAGS 0x00400000 #define XFS_AT_XFLAGS 0x00400000
#define AT_EXTSIZE 0x00800000 #define XFS_AT_EXTSIZE 0x00800000
#define AT_NEXTENTS 0x01000000 #define XFS_AT_NEXTENTS 0x01000000
#define AT_ANEXTENTS 0x02000000 #define XFS_AT_ANEXTENTS 0x02000000
#define AT_PROJID 0x04000000 #define XFS_AT_PROJID 0x04000000
#define AT_SIZE_NOPERM 0x08000000 #define XFS_AT_SIZE_NOPERM 0x08000000
#define AT_GENCOUNT 0x10000000 #define XFS_AT_GENCOUNT 0x10000000
#define AT_ALL (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|\ #define XFS_AT_ALL (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
AT_NLINK|AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV|\ XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
AT_BLKSIZE|AT_NBLOCKS|AT_VCODE|AT_MAC|AT_ACL|AT_CAP|\ XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
AT_INF|AT_XFLAGS|AT_EXTSIZE|AT_NEXTENTS|AT_ANEXTENTS|\ XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\
AT_PROJID|AT_GENCOUNT) XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\
XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT)
#define AT_STAT (AT_TYPE|AT_MODE|AT_UID|AT_GID|AT_FSID|AT_NODEID|AT_NLINK|\
AT_SIZE|AT_ATIME|AT_MTIME|AT_CTIME|AT_RDEV|AT_BLKSIZE|\ #define XFS_AT_STAT (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
AT_NBLOCKS|AT_PROJID) XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
#define AT_TIMES (AT_ATIME|AT_MTIME|AT_CTIME) XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_PROJID)
#define AT_UPDTIMES (AT_UPDATIME|AT_UPDMTIME|AT_UPDCTIME) #define XFS_AT_TIMES (XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME)
#define AT_NOSET (AT_NLINK|AT_RDEV|AT_FSID|AT_NODEID|AT_TYPE|\ #define XFS_AT_UPDTIMES (XFS_AT_UPDATIME|XFS_AT_UPDMTIME|XFS_AT_UPDCTIME)
AT_BLKSIZE|AT_NBLOCKS|AT_VCODE|AT_NEXTENTS|AT_ANEXTENTS|\
AT_GENCOUNT) #define XFS_AT_NOSET (XFS_AT_NLINK|XFS_AT_RDEV|XFS_AT_FSID|XFS_AT_NODEID|\
XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\
XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT)
#define VREAD 00400 #define VREAD 00400
#define VWRITE 00200 #define VWRITE 00200
...@@ -664,9 +673,17 @@ extern void vn_rele(struct vnode *); ...@@ -664,9 +673,17 @@ extern void vn_rele(struct vnode *);
#endif /* ! (defined(CONFIG_XFS_VNODE_TRACING) */ #endif /* ! (defined(CONFIG_XFS_VNODE_TRACING) */
/*
* Vname handling macros.
*/
#define VNAME(dentry) ((char *) (dentry)->d_name.name)
#define VNAMELEN(dentry) ((dentry)->d_name.len)
/* /*
* Vnode spinlock manipulation. * Vnode spinlock manipulation.
*/ */
#define VN_LOCK(vp) spin_lock(&(vp)->v_lock)
#define VN_UNLOCK(vp) spin_unlock(&(vp)->v_lock)
#define VN_FLAGSET(vp,b) vn_flagset(vp,b) #define VN_FLAGSET(vp,b) vn_flagset(vp,b)
#define VN_FLAGCLR(vp,b) vn_flagclr(vp,b) #define VN_FLAGCLR(vp,b) vn_flagclr(vp,b)
......
...@@ -63,10 +63,8 @@ ...@@ -63,10 +63,8 @@
#include "page_buf_internal.h" #include "page_buf_internal.h"
#define SECTOR_SHIFT 9 #define BBSHIFT 9
#define SECTOR_SIZE (1<<SECTOR_SHIFT) #define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - BBSHIFT)) - 1)
#define SECTOR_MASK (SECTOR_SIZE - 1)
#define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - SECTOR_SHIFT)) - 1)
#ifndef GFP_READAHEAD #ifndef GFP_READAHEAD
#define GFP_READAHEAD 0 #define GFP_READAHEAD 0
...@@ -245,19 +243,27 @@ typedef struct a_list { ...@@ -245,19 +243,27 @@ typedef struct a_list {
STATIC a_list_t *as_free_head; STATIC a_list_t *as_free_head;
STATIC int as_list_len; STATIC int as_list_len;
/*
* Try to batch vunmaps because they are costly.
*/
STATIC void STATIC void
free_address( free_address(
void *addr) void *addr)
{ {
a_list_t *aentry; a_list_t *aentry;
spin_lock(&as_lock);
aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC); aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC);
aentry->next = as_free_head; if (aentry) {
aentry->vm_addr = addr; spin_lock(&as_lock);
as_free_head = aentry; aentry->next = as_free_head;
as_list_len++; aentry->vm_addr = addr;
spin_unlock(&as_lock); as_free_head = aentry;
as_list_len++;
spin_unlock(&as_lock);
} else {
vunmap(addr);
}
} }
STATIC void STATIC void
...@@ -265,7 +271,8 @@ purge_addresses(void) ...@@ -265,7 +271,8 @@ purge_addresses(void)
{ {
a_list_t *aentry, *old; a_list_t *aentry, *old;
if (as_free_head == NULL) return; if (as_free_head == NULL)
return;
spin_lock(&as_lock); spin_lock(&as_lock);
aentry = as_free_head; aentry = as_free_head;
...@@ -462,7 +469,8 @@ _pagebuf_lookup_pages( ...@@ -462,7 +469,8 @@ _pagebuf_lookup_pages(
struct page *page; struct page *page;
int gfp_mask, retry_count = 5, rval = 0; int gfp_mask, retry_count = 5, rval = 0;
int all_mapped, good_pages, nbytes; int all_mapped, good_pages, nbytes;
size_t blocksize, size, offset; unsigned int blocksize, sectorshift;
size_t size, offset;
/* For pagebufs where we want to map an address, do not use /* For pagebufs where we want to map an address, do not use
...@@ -508,7 +516,8 @@ _pagebuf_lookup_pages( ...@@ -508,7 +516,8 @@ _pagebuf_lookup_pages(
return rval; return rval;
rval = pi = 0; rval = pi = 0;
blocksize = pb->pb_target->pbr_blocksize; blocksize = pb->pb_target->pbr_bsize;
sectorshift = pb->pb_target->pbr_sshift;
size = pb->pb_count_desired; size = pb->pb_count_desired;
offset = pb->pb_offset; offset = pb->pb_offset;
...@@ -549,15 +558,15 @@ _pagebuf_lookup_pages( ...@@ -549,15 +558,15 @@ _pagebuf_lookup_pages(
pb->pb_locked = 1; pb->pb_locked = 1;
good_pages--; good_pages--;
} else if (!PagePrivate(page)) { } else if (!PagePrivate(page)) {
unsigned long i, range = (offset + nbytes) >> SECTOR_SHIFT; unsigned long i, range;
ASSERT(blocksize < PAGE_CACHE_SIZE);
ASSERT(!(pb->pb_flags & _PBF_PRIVATE_BH));
/* /*
* In this case page->private holds a bitmap * In this case page->private holds a bitmap
* of uptodate sectors (512) within the page * of uptodate sectors within the page
*/ */
for (i = offset >> SECTOR_SHIFT; i < range; i++) ASSERT(blocksize < PAGE_CACHE_SIZE);
range = (offset + nbytes) >> sectorshift;
for (i = offset >> sectorshift; i < range; i++)
if (!test_bit(i, &page->private)) if (!test_bit(i, &page->private))
break; break;
if (i != range) if (i != range)
...@@ -642,8 +651,14 @@ _pagebuf_find( /* find buffer for block */ ...@@ -642,8 +651,14 @@ _pagebuf_find( /* find buffer for block */
page_buf_t *pb; page_buf_t *pb;
int not_locked; int not_locked;
range_base = (ioff << SECTOR_SHIFT); range_base = (ioff << BBSHIFT);
range_length = (isize << SECTOR_SHIFT); range_length = (isize << BBSHIFT);
/* Ensure we never do IOs smaller than the sector size */
BUG_ON(range_length < (1 << target->pbr_sshift));
/* Ensure we never do IOs that are not sector aligned */
BUG_ON(range_base & (loff_t)target->pbr_smask);
hval = _bhash(target->pbr_bdev->bd_dev, range_base); hval = _bhash(target->pbr_bdev->bd_dev, range_base);
h = &pbhash[hval]; h = &pbhash[hval];
...@@ -851,8 +866,20 @@ pagebuf_readahead( ...@@ -851,8 +866,20 @@ pagebuf_readahead(
size_t isize, size_t isize,
page_buf_flags_t flags) page_buf_flags_t flags)
{ {
struct backing_dev_info *bdi;
bdi = target->pbr_mapping->backing_dev_info;
if (bdi_read_congested(bdi))
return;
if (bdi_write_congested(bdi))
return;
flags |= (PBF_TRYLOCK|PBF_READ|PBF_ASYNC|PBF_MAPPABLE|PBF_READ_AHEAD); flags |= (PBF_TRYLOCK|PBF_READ|PBF_ASYNC|PBF_MAPPABLE|PBF_READ_AHEAD);
/* don't complain on allocation failure, it's fine with us */
current->flags |= PF_NOWARN;
pagebuf_get(target, ioff, isize, flags); pagebuf_get(target, ioff, isize, flags);
current->flags &= ~PF_NOWARN;
} }
page_buf_t * page_buf_t *
...@@ -956,18 +983,12 @@ pagebuf_get_no_daddr( ...@@ -956,18 +983,12 @@ pagebuf_get_no_daddr(
} else { } else {
kfree(rmem); /* free the mem from the previous try */ kfree(rmem); /* free the mem from the previous try */
tlen <<= 1; /* double the size and try again */ tlen <<= 1; /* double the size and try again */
/*
printk(
"pb_get_no_daddr NOT block 0x%p mask 0x%p len %d\n",
rmem, ((size_t)rmem & (size_t)~SECTOR_MASK),
len);
*/
} }
if ((rmem = kmalloc(tlen, GFP_KERNEL)) == 0) { if ((rmem = kmalloc(tlen, GFP_KERNEL)) == 0) {
pagebuf_free(pb); pagebuf_free(pb);
return NULL; return NULL;
} }
} while ((size_t)rmem != ((size_t)rmem & (size_t)~SECTOR_MASK)); } while ((size_t)rmem != ((size_t)rmem & ~target->pbr_smask));
if ((rval = pagebuf_associate_memory(pb, rmem, len)) != 0) { if ((rval = pagebuf_associate_memory(pb, rmem, len)) != 0) {
kfree(rmem); kfree(rmem);
...@@ -1248,9 +1269,7 @@ pagebuf_iostart( /* start I/O on a buffer */ ...@@ -1248,9 +1269,7 @@ pagebuf_iostart( /* start I/O on a buffer */
pb->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD); pb->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI|PBF_READ_AHEAD);
pb->pb_flags |= flags & (PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD); pb->pb_flags |= flags & (PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_SYNC|PBF_READ_AHEAD);
if (pb->pb_bn == PAGE_BUF_DADDR_NULL) { BUG_ON(pb->pb_bn == PAGE_BUF_DADDR_NULL);
BUG();
}
/* For writes call internal function which checks for /* For writes call internal function which checks for
* filesystem specific callout function and execute it. * filesystem specific callout function and execute it.
...@@ -1279,7 +1298,8 @@ bio_end_io_pagebuf( ...@@ -1279,7 +1298,8 @@ bio_end_io_pagebuf(
int error) int error)
{ {
page_buf_t *pb = (page_buf_t *)bio->bi_private; page_buf_t *pb = (page_buf_t *)bio->bi_private;
unsigned int i, blocksize = pb->pb_target->pbr_blocksize; unsigned int i, blocksize = pb->pb_target->pbr_bsize;
unsigned int sectorshift = pb->pb_target->pbr_sshift;
struct bio_vec *bvec = bio->bi_io_vec; struct bio_vec *bvec = bio->bi_io_vec;
if (bio->bi_size) if (bio->bi_size)
...@@ -1299,10 +1319,8 @@ bio_end_io_pagebuf( ...@@ -1299,10 +1319,8 @@ bio_end_io_pagebuf(
unsigned int j, range; unsigned int j, range;
ASSERT(blocksize < PAGE_CACHE_SIZE); ASSERT(blocksize < PAGE_CACHE_SIZE);
ASSERT(!(pb->pb_flags & _PBF_PRIVATE_BH)); range = (bvec->bv_offset + bvec->bv_len) >> sectorshift;
for (j = bvec->bv_offset >> sectorshift; j < range; j++)
range = (bvec->bv_offset + bvec->bv_len)>>SECTOR_SHIFT;
for (j = bvec->bv_offset>>SECTOR_SHIFT; j < range; j++)
set_bit(j, &page->private); set_bit(j, &page->private);
if (page->private == (unsigned long)(PAGE_CACHE_SIZE-1)) if (page->private == (unsigned long)(PAGE_CACHE_SIZE-1))
SetPageUptodate(page); SetPageUptodate(page);
...@@ -1353,7 +1371,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1353,7 +1371,7 @@ pagebuf_iorequest( /* start real I/O */
int offset = pb->pb_offset; int offset = pb->pb_offset;
int size = pb->pb_count_desired; int size = pb->pb_count_desired;
sector_t sector = pb->pb_bn; sector_t sector = pb->pb_bn;
size_t blocksize = pb->pb_target->pbr_blocksize; unsigned int blocksize = pb->pb_target->pbr_bsize;
int locking; int locking;
locking = (pb->pb_flags & _PBF_LOCKABLE) == 0 && (pb->pb_locked == 0); locking = (pb->pb_flags & _PBF_LOCKABLE) == 0 && (pb->pb_locked == 0);
...@@ -1382,7 +1400,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1382,7 +1400,7 @@ pagebuf_iorequest( /* start real I/O */
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = pb->pb_target->pbr_bdev; bio->bi_bdev = pb->pb_target->pbr_bdev;
bio->bi_sector = sector - (offset >> SECTOR_SHIFT); bio->bi_sector = sector - (offset >> BBSHIFT);
bio->bi_end_io = bio_end_io_pagebuf; bio->bi_end_io = bio_end_io_pagebuf;
bio->bi_private = pb; bio->bi_private = pb;
bio->bi_vcnt++; bio->bi_vcnt++;
...@@ -1427,7 +1445,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1427,7 +1445,7 @@ pagebuf_iorequest( /* start real I/O */
next_chunk: next_chunk:
atomic_inc(&PBP(pb)->pb_io_remaining); atomic_inc(&PBP(pb)->pb_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - SECTOR_SHIFT); nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
if (nr_pages > total_nr_pages) if (nr_pages > total_nr_pages)
nr_pages = total_nr_pages; nr_pages = total_nr_pages;
...@@ -1452,7 +1470,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1452,7 +1470,7 @@ pagebuf_iorequest( /* start real I/O */
offset = 0; offset = 0;
sector += nbytes >> SECTOR_SHIFT; sector += nbytes >> BBSHIFT;
size -= nbytes; size -= nbytes;
total_nr_pages--; total_nr_pages--;
} }
......
...@@ -132,11 +132,12 @@ typedef enum page_buf_flags_e { /* pb_flags values */ ...@@ -132,11 +132,12 @@ typedef enum page_buf_flags_e { /* pb_flags values */
#define PBR_ALIGNED_ONLY 2 /* only use aligned I/O */ #define PBR_ALIGNED_ONLY 2 /* only use aligned I/O */
typedef struct pb_target { typedef struct pb_target {
int pbr_flags;
dev_t pbr_dev; dev_t pbr_dev;
struct block_device *pbr_bdev; struct block_device *pbr_bdev;
struct address_space *pbr_mapping; struct address_space *pbr_mapping;
unsigned int pbr_blocksize; unsigned int pbr_bsize;
unsigned int pbr_sshift;
size_t pbr_smask;
} pb_target_t; } pb_target_t;
/* /*
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#define __XFS_H__ #define __XFS_H__
#include <linux/config.h> #include <linux/config.h>
#include <linux/types.h>
#include <xfs_types.h> #include <xfs_types.h>
#include <xfs_arch.h> #include <xfs_arch.h>
......
...@@ -235,7 +235,7 @@ xfs_acl_vget( ...@@ -235,7 +235,7 @@ xfs_acl_vget(
if (kind == _ACL_TYPE_ACCESS) { if (kind == _ACL_TYPE_ACCESS) {
vattr_t va; vattr_t va;
va.va_mask = AT_MODE; va.va_mask = XFS_AT_MODE;
VOP_GETATTR(vp, &va, 0, sys_cred, error); VOP_GETATTR(vp, &va, 0, sys_cred, error);
if (error) if (error)
goto out; goto out;
...@@ -372,7 +372,7 @@ xfs_acl_allow_set( ...@@ -372,7 +372,7 @@ xfs_acl_allow_set(
return EROFS; return EROFS;
if ((error = _MAC_VACCESS(vp, NULL, VWRITE))) if ((error = _MAC_VACCESS(vp, NULL, VWRITE)))
return error; return error;
va.va_mask = AT_UID; va.va_mask = XFS_AT_UID;
VOP_GETATTR(vp, &va, 0, NULL, error); VOP_GETATTR(vp, &va, 0, NULL, error);
if (error) if (error)
return error; return error;
...@@ -702,7 +702,7 @@ xfs_acl_vtoacl( ...@@ -702,7 +702,7 @@ xfs_acl_vtoacl(
xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error); xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error);
if (!error) { if (!error) {
/* Got the ACL, need the mode... */ /* Got the ACL, need the mode... */
va.va_mask = AT_MODE; va.va_mask = XFS_AT_MODE;
VOP_GETATTR(vp, &va, 0, sys_cred, error); VOP_GETATTR(vp, &va, 0, sys_cred, error);
} }
...@@ -800,12 +800,12 @@ xfs_acl_setmode( ...@@ -800,12 +800,12 @@ xfs_acl_setmode(
* Copy the u::, g::, o::, and m:: bits from the ACL into the * Copy the u::, g::, o::, and m:: bits from the ACL into the
* mode. The m:: bits take precedence over the g:: bits. * mode. The m:: bits take precedence over the g:: bits.
*/ */
va.va_mask = AT_MODE; va.va_mask = XFS_AT_MODE;
VOP_GETATTR(vp, &va, 0, sys_cred, error); VOP_GETATTR(vp, &va, 0, sys_cred, error);
if (error) if (error)
return error; return error;
va.va_mask = AT_MODE; va.va_mask = XFS_AT_MODE;
va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
ap = acl->acl_entry; ap = acl->acl_entry;
for (i = 0; i < acl->acl_cnt; ++i) { for (i = 0; i < acl->acl_cnt; ++i) {
......
...@@ -96,16 +96,22 @@ int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur); ...@@ -96,16 +96,22 @@ int xfs_alloc_block_minrecs(int lev, struct xfs_btree_cur *cur);
#endif #endif
/* /*
* Minimum and maximum blocksize. * Minimum and maximum blocksize and sectorsize.
* The blocksize upper limit is pretty much arbitrary. * The blocksize upper limit is pretty much arbitrary.
* The sectorsize upper limit is due to sizeof(sb_sectsize).
*/ */
#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */ #define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */ #define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG) #define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG) #define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
/* /*
* block numbers in the AG; SB is BB 0, AGF is BB 1, AGI is BB 2, AGFL is BB 3 * Block numbers in the AG:
* SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
*/ */
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BNO_BLOCK) #if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BNO_BLOCK)
xfs_agblock_t xfs_bno_block(struct xfs_mount *mp); xfs_agblock_t xfs_bno_block(struct xfs_mount *mp);
......
...@@ -192,7 +192,7 @@ xfs_cap_allow_set( ...@@ -192,7 +192,7 @@ xfs_cap_allow_set(
return EROFS; return EROFS;
if ((error = _MAC_VACCESS(vp, NULL, VWRITE))) if ((error = _MAC_VACCESS(vp, NULL, VWRITE)))
return error; return error;
va.va_mask = AT_UID; va.va_mask = XFS_AT_UID;
VOP_GETATTR(vp, &va, 0, NULL, error); VOP_GETATTR(vp, &va, 0, NULL, error);
if (error) if (error)
return error; return error;
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#ifndef __XFS_DIR2_H__ #ifndef __XFS_DIR2_H__
#define __XFS_DIR2_H__ #define __XFS_DIR2_H__
struct dirent;
struct uio; struct uio;
struct xfs_dabuf; struct xfs_dabuf;
struct xfs_da_args; struct xfs_da_args;
......
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
* Directory version 2, single block format structures * Directory version 2, single block format structures
*/ */
struct dirent;
struct uio; struct uio;
struct xfs_dabuf; struct xfs_dabuf;
struct xfs_da_args; struct xfs_da_args;
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
* Directory version 2, leaf block structures. * Directory version 2, leaf block structures.
*/ */
struct dirent;
struct uio; struct uio;
struct xfs_dabuf; struct xfs_dabuf;
struct xfs_da_args; struct xfs_da_args;
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
* Directory version 2, btree node format structures * Directory version 2, btree node format structures
*/ */
struct dirent;
struct uio; struct uio;
struct xfs_dabuf; struct xfs_dabuf;
struct xfs_da_args; struct xfs_da_args;
......
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
* fit into the literal area of the inode. * fit into the literal area of the inode.
*/ */
struct dirent;
struct uio; struct uio;
struct xfs_dabuf; struct xfs_dabuf;
struct xfs_da_args; struct xfs_da_args;
......
...@@ -42,7 +42,6 @@ ...@@ -42,7 +42,6 @@
* internal links in the Btree are logical block offsets into the file. * internal links in the Btree are logical block offsets into the file.
*/ */
struct dirent;
struct uio; struct uio;
struct xfs_bmap_free; struct xfs_bmap_free;
struct xfs_dabuf; struct xfs_dabuf;
......
...@@ -30,12 +30,8 @@ ...@@ -30,12 +30,8 @@
* *
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/ */
#ifndef _LINUX_XFS_FS_H #ifndef __XFS_FS_H__
#define _LINUX_XFS_FS_H #define __XFS_FS_H__
#include <linux/types.h>
#include <asm/ioctl.h>
/* /*
* SGI's XFS filesystem's major stuff (constants, structures) * SGI's XFS filesystem's major stuff (constants, structures)
...@@ -394,11 +390,13 @@ typedef struct { ...@@ -394,11 +390,13 @@ typedef struct {
* This is typically called by a stateless file server in order to generate * This is typically called by a stateless file server in order to generate
* "file handles". * "file handles".
*/ */
#ifndef MAXFIDSZ
#define MAXFIDSZ 46 #define MAXFIDSZ 46
typedef struct fid { typedef struct fid {
__u16 fid_len; /* length of data in bytes */ __u16 fid_len; /* length of data in bytes */
unsigned char fid_data[MAXFIDSZ]; /* data (variable length) */ unsigned char fid_data[MAXFIDSZ]; /* data (variable length) */
} fid_t; } fid_t;
#endif
typedef struct xfs_fid { typedef struct xfs_fid {
__u16 xfs_fid_len; /* length of remainder */ __u16 xfs_fid_len; /* length of remainder */
...@@ -499,4 +497,4 @@ typedef struct xfs_handle { ...@@ -499,4 +497,4 @@ typedef struct xfs_handle {
#define BTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT) #define BTOBBT(bytes) ((__u64)(bytes) >> BBSHIFT)
#define BBTOB(bbs) ((bbs) << BBSHIFT) #define BBTOB(bbs) ((bbs) << BBSHIFT)
#endif /* _LINUX_XFS_FS_H */ #endif /* __XFS_FS_H__ */
...@@ -433,7 +433,6 @@ xfs_inode_item_format( ...@@ -433,7 +433,6 @@ xfs_inode_item_format(
ASSERT(!(iip->ili_format.ilf_fields & ASSERT(!(iip->ili_format.ilf_fields &
(XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); (XFS_ILOG_ADATA | XFS_ILOG_ABROOT)));
if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) {
ASSERT(!(iip->ili_format.ilf_fields & XFS_ILOG_DEXT));
ASSERT(ip->i_afp->if_bytes > 0); ASSERT(ip->i_afp->if_bytes > 0);
ASSERT(ip->i_afp->if_u1.if_extents != NULL); ASSERT(ip->i_afp->if_u1.if_extents != NULL);
ASSERT(ip->i_d.di_anextents > 0); ASSERT(ip->i_d.di_anextents > 0);
......
...@@ -1839,10 +1839,9 @@ xlog_recover_do_dquot_buffer( ...@@ -1839,10 +1839,9 @@ xlog_recover_do_dquot_buffer(
uint type; uint type;
/* /*
* Non-root filesystems are required to send in quota flags * Filesystems are required to send in quota flags at mount time.
* at mount time.
*/ */
if (mp->m_qflags == 0 && mp->m_dev != rootdev) { if (mp->m_qflags == 0) {
return; return;
} }
...@@ -2289,30 +2288,28 @@ xlog_recover_do_dquot_trans(xlog_t *log, ...@@ -2289,30 +2288,28 @@ xlog_recover_do_dquot_trans(xlog_t *log,
mp = log->l_mp; mp = log->l_mp;
/* /*
* Non-root filesystems are required to send in quota flags * Filesystems are required to send in quota flags at mount time.
* at mount time.
*/ */
if (mp->m_qflags == 0 && mp->m_dev != rootdev) { if (mp->m_qflags == 0)
return (0); return (0);
}
recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr; recddq = (xfs_disk_dquot_t *)item->ri_buf[1].i_addr;
ASSERT(recddq); ASSERT(recddq);
/* /*
* This type of quotas was turned off, so ignore this record. * This type of quotas was turned off, so ignore this record.
*/ */
type = INT_GET(recddq->d_flags, ARCH_CONVERT)&(XFS_DQ_USER|XFS_DQ_GROUP); type = INT_GET(recddq->d_flags, ARCH_CONVERT) &
(XFS_DQ_USER | XFS_DQ_GROUP);
ASSERT(type); ASSERT(type);
if (log->l_quotaoffs_flag & type) if (log->l_quotaoffs_flag & type)
return (0); return (0);
/* /*
* At this point we know that if we are recovering a root filesystem * At this point we know that quota was _not_ turned off.
* then quota was _not_ turned off. Since there is no other flag * Since the mount flags are not indicating to us otherwise, this
* indicate to us otherwise, this must mean that quota's on, * must mean that quota is on, and the dquot needs to be replayed.
* and the dquot needs to be replayed. Remember that we may not have * Remember that we may not have fully recovered the superblock yet,
* fully recovered the superblock yet, so we can't do the usual trick * so we can't do the usual trick of looking at the SB quota bits.
* of looking at the SB quota bits.
* *
* The other possibility, of course, is that the quota subsystem was * The other possibility, of course, is that the quota subsystem was
* removed since the last mount - ENOSYS. * removed since the last mount - ENOSYS.
...@@ -2323,7 +2320,7 @@ xlog_recover_do_dquot_trans(xlog_t *log, ...@@ -2323,7 +2320,7 @@ xlog_recover_do_dquot_trans(xlog_t *log,
dq_f->qlf_id, dq_f->qlf_id,
0, XFS_QMOPT_DOWARN, 0, XFS_QMOPT_DOWARN,
"xlog_recover_do_dquot_trans (log copy)"))) { "xlog_recover_do_dquot_trans (log copy)"))) {
if (error == ENOSYS) if (error == ENOSYS)
return (0); return (0);
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
} }
......
...@@ -91,7 +91,8 @@ static struct { ...@@ -91,7 +91,8 @@ static struct {
{ offsetof(xfs_sb_t, sb_unit), 0 }, { offsetof(xfs_sb_t, sb_unit), 0 },
{ offsetof(xfs_sb_t, sb_width), 0 }, { offsetof(xfs_sb_t, sb_width), 0 },
{ offsetof(xfs_sb_t, sb_dirblklog), 0 }, { offsetof(xfs_sb_t, sb_dirblklog), 0 },
{ offsetof(xfs_sb_t, sb_dummy), 1 }, { offsetof(xfs_sb_t, sb_logsectlog), 0 },
{ offsetof(xfs_sb_t, sb_logsectsize),0 },
{ offsetof(xfs_sb_t, sb_logsunit), 0 }, { offsetof(xfs_sb_t, sb_logsunit), 0 },
{ sizeof(xfs_sb_t), 0 } { sizeof(xfs_sb_t), 0 }
}; };
...@@ -119,6 +120,7 @@ xfs_mount_init(void) ...@@ -119,6 +120,7 @@ xfs_mount_init(void)
spinlock_init(&mp->m_freeze_lock, "xfs_freeze"); spinlock_init(&mp->m_freeze_lock, "xfs_freeze");
init_sv(&mp->m_wait_unfreeze, SV_DEFAULT, "xfs_freeze", 0); init_sv(&mp->m_wait_unfreeze, SV_DEFAULT, "xfs_freeze", 0);
atomic_set(&mp->m_active_trans, 0); atomic_set(&mp->m_active_trans, 0);
mp->m_cxfstype = XFS_CXFS_NOT;
return mp; return mp;
} /* xfs_mount_init */ } /* xfs_mount_init */
...@@ -213,13 +215,26 @@ xfs_mount_validate_sb( ...@@ -213,13 +215,26 @@ xfs_mount_validate_sb(
return XFS_ERROR(EFSCORRUPTED); return XFS_ERROR(EFSCORRUPTED);
} }
if (!sbp->sb_logsectlog)
sbp->sb_logsectlog = sbp->sb_sectlog;
if (!sbp->sb_logsectsize)
sbp->sb_logsectsize = sbp->sb_sectsize;
/* /*
* More sanity checking. These were stolen directly from * More sanity checking. These were stolen directly from
* xfs_repair. * xfs_repair.
*/ */
if (sbp->sb_blocksize <= 0 || if (sbp->sb_agcount <= 0 ||
sbp->sb_agcount <= 0 || sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
sbp->sb_sectsize <= 0 || sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
sbp->sb_logsectsize < XFS_MIN_SECTORSIZE ||
sbp->sb_logsectsize > XFS_MAX_SECTORSIZE ||
sbp->sb_logsectlog < XFS_MIN_SECTORSIZE_LOG ||
sbp->sb_logsectlog > XFS_MAX_SECTORSIZE_LOG ||
sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG || sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG || sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_inodesize < XFS_DINODE_MIN_SIZE || sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
...@@ -232,7 +247,7 @@ xfs_mount_validate_sb( ...@@ -232,7 +247,7 @@ xfs_mount_validate_sb(
} }
/* /*
* sanity check ag count, size fields against data size field * Sanity check AG count, size fields against data size field
*/ */
if (sbp->sb_dblocks == 0 || if (sbp->sb_dblocks == 0 ||
sbp->sb_dblocks > sbp->sb_dblocks >
...@@ -268,7 +283,8 @@ xfs_mount_validate_sb( ...@@ -268,7 +283,8 @@ xfs_mount_validate_sb(
PAGE_SIZE); PAGE_SIZE);
return XFS_ERROR(EWRONGFS); return XFS_ERROR(EWRONGFS);
} }
return (0);
return 0;
} }
void void
...@@ -467,6 +483,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) ...@@ -467,6 +483,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
mp->m_maxagi = mp->m_sb.sb_agcount; mp->m_maxagi = mp->m_sb.sb_agcount;
mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1; mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog; mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
mp->m_litino = sbp->sb_inodesize - mp->m_litino = sbp->sb_inodesize -
...@@ -560,7 +577,7 @@ xfs_mountfs( ...@@ -560,7 +577,7 @@ xfs_mountfs(
xfs_daddr_t d; xfs_daddr_t d;
extern xfs_ioops_t xfs_iocore_xfs; /* from xfs_iocore.c */ extern xfs_ioops_t xfs_iocore_xfs; /* from xfs_iocore.c */
__uint64_t ret64; __uint64_t ret64;
uint quotaflags, quotaondisk, rootqcheck, needquotacheck; uint quotaflags, quotaondisk;
uint uquotaondisk = 0, gquotaondisk = 0; uint uquotaondisk = 0, gquotaondisk = 0;
boolean_t needquotamount; boolean_t needquotamount;
__int64_t update_flags; __int64_t update_flags;
...@@ -755,7 +772,9 @@ xfs_mountfs( ...@@ -755,7 +772,9 @@ xfs_mountfs(
goto error1; goto error1;
} }
if (!noio) { if (!noio) {
error = xfs_read_buf(mp, mp->m_ddev_targp, d - 1, 1, 0, &bp); error = xfs_read_buf(mp, mp->m_ddev_targp,
d - XFS_FSS_TO_BB(mp, 1),
XFS_FSS_TO_BB(mp, 1), 0, &bp);
if (!error) { if (!error) {
xfs_buf_relse(bp); xfs_buf_relse(bp);
} else { } else {
...@@ -775,7 +794,9 @@ xfs_mountfs( ...@@ -775,7 +794,9 @@ xfs_mountfs(
error = XFS_ERROR(E2BIG); error = XFS_ERROR(E2BIG);
goto error1; goto error1;
} }
error = xfs_read_buf(mp, mp->m_logdev_targp, d - 1, 1, 0, &bp); error = xfs_read_buf(mp, mp->m_logdev_targp,
d - XFS_LOGS_TO_BB(mp, 1),
XFS_LOGS_TO_BB(mp, 1), 0, &bp);
if (!error) { if (!error) {
xfs_buf_relse(bp); xfs_buf_relse(bp);
} else { } else {
...@@ -967,21 +988,13 @@ xfs_mountfs( ...@@ -967,21 +988,13 @@ xfs_mountfs(
/* /*
* Figure out if we'll need to do a quotacheck. * Figure out if we'll need to do a quotacheck.
* The requirements are a little different depending on whether
* this fs is root or not.
*/ */
rootqcheck = (mp->m_dev == rootdev && quotaondisk &&
((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT &&
(mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) ||
(mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT &&
(mp->m_sb.sb_qflags & XFS_GQUOTA_CHKD) == 0)));
needquotacheck = rootqcheck || XFS_QM_NEED_QUOTACHECK(mp);
if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
/* /*
* Call mount_quotas at this point only if we won't have to do * Call mount_quotas at this point only if we won't have to do
* a quotacheck. * a quotacheck.
*/ */
if (quotaondisk && !needquotacheck) { if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/* /*
* If the xfs quota code isn't installed, * If the xfs quota code isn't installed,
* we have to reset the quotachk'd bit. * we have to reset the quotachk'd bit.
...@@ -1020,10 +1033,6 @@ xfs_mountfs( ...@@ -1020,10 +1033,6 @@ xfs_mountfs(
if (needquotamount) { if (needquotamount) {
ASSERT(mp->m_qflags == 0); ASSERT(mp->m_qflags == 0);
mp->m_qflags = quotaflags; mp->m_qflags = quotaflags;
rootqcheck = (mp->m_dev == rootdev && needquotacheck);
if (rootqcheck && (error = xfs_dev_is_read_only(mp,
"quotacheck")))
goto error2;
if (xfs_qm_mount_quotas(mp)) if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp); xfs_mount_reset_sbqflags(mp);
} }
......
...@@ -232,6 +232,7 @@ typedef struct xfs_mount { ...@@ -232,6 +232,7 @@ typedef struct xfs_mount {
__uint8_t m_mk_sharedro; /* mark shared ro on unmount */ __uint8_t m_mk_sharedro; /* mark shared ro on unmount */
__uint8_t m_inode_quiesce;/* call quiesce on new inodes. __uint8_t m_inode_quiesce;/* call quiesce on new inodes.
field governed by m_ilock */ field governed by m_ilock */
__uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
__uint8_t m_dirversion; /* 1 or 2 */ __uint8_t m_dirversion; /* 1 or 2 */
xfs_dirops_t m_dirops; /* table of dir funcs */ xfs_dirops_t m_dirops; /* table of dir funcs */
int m_dirblksize; /* directory block sz--bytes */ int m_dirblksize; /* directory block sz--bytes */
...@@ -265,7 +266,7 @@ typedef struct xfs_mount { ...@@ -265,7 +266,7 @@ typedef struct xfs_mount {
#if XFS_BIG_FILESYSTEMS #if XFS_BIG_FILESYSTEMS
#define XFS_MOUNT_INO64 0x00000002 #define XFS_MOUNT_INO64 0x00000002
#endif #endif
#define XFS_MOUNT_ROOTQCHECK 0x00000004 /* 0x00000004 -- currently unused */
/* 0x00000008 -- currently unused */ /* 0x00000008 -- currently unused */
#define XFS_MOUNT_FS_SHUTDOWN 0x00000010 /* atomic stop of all filesystem #define XFS_MOUNT_FS_SHUTDOWN 0x00000010 /* atomic stop of all filesystem
operations, typically for operations, typically for
......
...@@ -301,27 +301,24 @@ xfs_qm_unmount_quotadestroy( ...@@ -301,27 +301,24 @@ xfs_qm_unmount_quotadestroy(
/* /*
* This is called from xfs_mountfs to start quotas and initialize all * This is called from xfs_mountfs to start quotas and initialize all
* necessary data structures like quotainfo, and in the rootfs's case * necessary data structures like quotainfo. This is also responsible for
* xfs_Gqm. This is also responsible for running a quotacheck as necessary. * running a quotacheck as necessary. We are guaranteed that the superblock
* We are guaranteed that the superblock is consistently read in at this * is consistently read in at this point.
* point.
*/ */
int int
xfs_qm_mount_quotas( xfs_qm_mount_quotas(
xfs_mount_t *mp) xfs_mount_t *mp)
{ {
unsigned long s; unsigned long s;
int error; int error = 0;
uint sbf; uint sbf;
error = 0;
/* /*
* If a non-root file system had quotas running earlier, but decided * If a file system had quotas running earlier, but decided to
* to mount without -o quota/pquota options, revoke the quotachecked * mount without -o quota/uquota/gquota options, revoke the
* license, and bail out. * quotachecked license, and bail out.
*/ */
if (! XFS_IS_QUOTA_ON(mp) && if (! XFS_IS_QUOTA_ON(mp) &&
(mp->m_dev != rootdev) &&
(mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) { (mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) {
mp->m_qflags = 0; mp->m_qflags = 0;
goto write_changes; goto write_changes;
...@@ -342,32 +339,6 @@ xfs_qm_mount_quotas( ...@@ -342,32 +339,6 @@ xfs_qm_mount_quotas(
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
cmn_err(CE_NOTE, "Attempting to turn on disk quotas."); cmn_err(CE_NOTE, "Attempting to turn on disk quotas.");
#endif #endif
/*
* If this is the root file system, mark flags in mount struct first.
* We couldn't do this earlier because we didn't have the superblock
* read in.
*/
if (mp->m_dev == rootdev) {
ASSERT(XFS_SB_VERSION_HASQUOTA(&mp->m_sb));
ASSERT(mp->m_sb.sb_qflags &
(XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT));
if (xfs_Gqm == NULL) {
if ((xfs_Gqm = xfs_qm_init()) == NULL) {
mp->m_qflags = 0;
error = EINVAL;
goto write_changes;
}
}
mp->m_qflags = mp->m_sb.sb_qflags;
if (mp->m_qflags & XFS_UQUOTA_ACCT)
mp->m_qflags |= XFS_UQUOTA_ACTIVE;
if (mp->m_qflags & XFS_GQUOTA_ACCT)
mp->m_qflags |= XFS_GQUOTA_ACTIVE;
/*
* The quotainode of the root file system may or may not
* exist at this point.
*/
}
ASSERT(XFS_IS_QUOTA_RUNNING(mp)); ASSERT(XFS_IS_QUOTA_RUNNING(mp));
/* /*
...@@ -545,20 +516,6 @@ xfs_qm_dqflush_all( ...@@ -545,20 +516,6 @@ xfs_qm_dqflush_all(
if (error) if (error)
return (error); return (error);
/*
* If this is the root filesystem doing a quotacheck,
* we should do periodic bflushes. This is because there's
* no bflushd at this point.
*/
if (mp->m_flags & XFS_MOUNT_ROOTQCHECK) {
if (++niters == XFS_QM_MAX_DQCLUSTER_LOGSZ) {
xfs_log_force(mp, (xfs_lsn_t)0,
XFS_LOG_FORCE | XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
niters = 0;
}
}
xfs_qm_mplist_lock(mp); xfs_qm_mplist_lock(mp);
if (recl != XFS_QI_MPLRECLAIMS(mp)) { if (recl != XFS_QI_MPLRECLAIMS(mp)) {
xfs_qm_mplist_unlock(mp); xfs_qm_mplist_unlock(mp);
...@@ -1568,22 +1525,6 @@ xfs_qm_dqiter_bufs( ...@@ -1568,22 +1525,6 @@ xfs_qm_dqiter_bufs(
flags & XFS_QMOPT_UQUOTA ? flags & XFS_QMOPT_UQUOTA ?
XFS_DQ_USER : XFS_DQ_GROUP); XFS_DQ_USER : XFS_DQ_GROUP);
xfs_bdwrite(mp, bp); xfs_bdwrite(mp, bp);
/*
* When quotachecking the root filesystem,
* we may not have bdflush, and we may fill
* up all available freebufs.
* The workaround here is to push on the
* log and do a bflush on the rootdev
* periodically.
*/
if (mp->m_flags & XFS_MOUNT_ROOTQCHECK) {
if (++notcommitted == incr) {
xfs_log_force(mp, (xfs_lsn_t)0,
XFS_LOG_FORCE | XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
notcommitted = 0;
}
}
/* /*
* goto the next block. * goto the next block.
*/ */
...@@ -1982,7 +1923,6 @@ xfs_qm_quotacheck( ...@@ -1982,7 +1923,6 @@ xfs_qm_quotacheck(
error_return: error_return:
cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
mp->m_flags &= ~(XFS_MOUNT_ROOTQCHECK);
return (error); return (error);
} }
...@@ -2034,9 +1974,9 @@ xfs_qm_init_quotainos( ...@@ -2034,9 +1974,9 @@ xfs_qm_init_quotainos(
/* /*
* Create the two inodes, if they don't exist already. The changes * Create the two inodes, if they don't exist already. The changes
* made above will get added to a transaction and logged in one of * made above will get added to a transaction and logged in one of
* the qino_alloc calls below. * the qino_alloc calls below. If the device is readonly,
* temporarily switch to read-write to do this.
*/ */
if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
if ((error = xfs_qm_qino_alloc(mp, &uip, if ((error = xfs_qm_qino_alloc(mp, &uip,
sbflags | XFS_SB_UQUOTINO, sbflags | XFS_SB_UQUOTINO,
...@@ -2145,20 +2085,6 @@ xfs_qm_shake_freelist( ...@@ -2145,20 +2085,6 @@ xfs_qm_shake_freelist(
*/ */
if (XFS_DQ_IS_DIRTY(dqp)) { if (XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY"); xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
/*
* We'll be doing a dqflush, and it is
* possible to fill up the entire buffer cache
* with dirty delayed write buffers when doing
* this on a root filesystem, if bdflush isn't
* running. So, do a flush periodically.
*/
if (dqp->q_mount->m_flags & XFS_MOUNT_ROOTQCHECK) {
if (!(++nflushes % XFS_QM_MAX_DQCLUSTER_LOGSZ)){
xfs_log_force(dqp->q_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE | XFS_LOG_SYNC);
XFS_bflush(dqp->q_mount->m_ddev_targp);
}
}
/* /*
* We flush it delayed write, so don't bother * We flush it delayed write, so don't bother
* releasing the mplock. * releasing the mplock.
...@@ -2329,21 +2255,6 @@ xfs_qm_dqreclaim_one(void) ...@@ -2329,21 +2255,6 @@ xfs_qm_dqreclaim_one(void)
*/ */
if (XFS_DQ_IS_DIRTY(dqp)) { if (XFS_DQ_IS_DIRTY(dqp)) {
xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY"); xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
/*
* We'll be doing a dqflush, and it is
* possible to fill up the entire buffer cache
* with dirty delayed write buffers when doing
* this on a root filesystem, if bdflush isn't
* running. So, do a flush periodically.
*/
if (dqp->q_mount->m_flags & XFS_MOUNT_ROOTQCHECK) {
if (!(++nflushes % XFS_QM_MAX_DQCLUSTER_LOGSZ)) {
xfs_log_force(dqp->q_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE | XFS_LOG_SYNC);
XFS_bflush(dqp->q_mount->m_ddev_targp);
}
}
/* /*
* We flush it delayed write, so don't bother * We flush it delayed write, so don't bother
* releasing the freelist lock. * releasing the freelist lock.
......
...@@ -99,8 +99,6 @@ linvfs_setxstate( ...@@ -99,8 +99,6 @@ linvfs_setxstate(
return -xfs_qm_scall_quotaon(mp, qflags); return -xfs_qm_scall_quotaon(mp, qflags);
case Q_XQUOTAOFF: case Q_XQUOTAOFF:
qflags = xfs_qm_import_flags(flags); qflags = xfs_qm_import_flags(flags);
if (mp->m_dev == rootdev)
return -xfs_qm_scall_quotaoff(mp, qflags, B_FALSE);
if (!XFS_IS_QUOTA_ON(mp)) if (!XFS_IS_QUOTA_ON(mp))
return -ESRCH; return -ESRCH;
return -xfs_qm_scall_quotaoff(mp, qflags, B_FALSE); return -xfs_qm_scall_quotaoff(mp, qflags, B_FALSE);
...@@ -169,17 +167,16 @@ xfs_qm_scall_quotaoff( ...@@ -169,17 +167,16 @@ xfs_qm_scall_quotaoff(
int error; int error;
uint inactivate_flags; uint inactivate_flags;
xfs_qoff_logitem_t *qoffstart; xfs_qoff_logitem_t *qoffstart;
uint sbflags, newflags;
int nculprits; int nculprits;
if (!force && !capable(CAP_SYS_ADMIN)) if (!force && !capable(CAP_SYS_ADMIN))
return XFS_ERROR(EPERM); return XFS_ERROR(EPERM);
/* /*
* Only root file system can have quotas enabled on disk but not * No file system can have quotas enabled on disk but not in core.
* in core. Note that quota utilities (like quotaoff) _expect_ * Note that quota utilities (like quotaoff) _expect_
* errno == EEXIST here. * errno == EEXIST here.
*/ */
if (mp->m_dev != rootdev && (mp->m_qflags & flags) == 0) if ((mp->m_qflags & flags) == 0)
return XFS_ERROR(EEXIST); return XFS_ERROR(EEXIST);
error = 0; error = 0;
...@@ -191,54 +188,14 @@ xfs_qm_scall_quotaoff( ...@@ -191,54 +188,14 @@ xfs_qm_scall_quotaoff(
* critical thing. * critical thing.
* If quotaoff, then we must be dealing with the root filesystem. * If quotaoff, then we must be dealing with the root filesystem.
*/ */
ASSERT(mp->m_quotainfo || mp->m_dev == rootdev); ASSERT(mp->m_quotainfo);
if (mp->m_quotainfo) if (mp->m_quotainfo)
mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
/*
* Root file system may or may not have quotas on in core.
* We have to perform the quotaoff accordingly.
*/
if (mp->m_dev == rootdev) {
s = XFS_SB_LOCK(mp);
sbflags = mp->m_sb.sb_qflags;
if ((mp->m_qflags & flags) == 0) {
mp->m_sb.sb_qflags &= ~(flags);
newflags = mp->m_sb.sb_qflags;
XFS_SB_UNLOCK(mp, s);
if (mp->m_quotainfo)
mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
if (sbflags != newflags)
error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
return (error);
}
XFS_SB_UNLOCK(mp, s);
if ((sbflags & flags) != (mp->m_qflags & flags)) {
/*
* This can happen only with grp+usr quota
* combination. Note: 1) accounting cannot be turned
* off without enforcement also getting turned off.
* 2) Every flag that exists in mpqflags MUST exist
* in sbqflags (but not vice versa).
* which means at this point sbqflags = UQ+GQ+..,
* and mpqflags = UQ or GQ.
*/
ASSERT(sbflags & XFS_GQUOTA_ACCT);
ASSERT((sbflags & XFS_ALL_QUOTA_ACCT) !=
(mp->m_qflags & XFS_ALL_QUOTA_ACCT));
qdprintk("quotaoff, sbflags=%x flags=%x m_qflags=%x\n",
sbflags, flags, mp->m_qflags);
/* XXX TBD Finish this for group quota support */
/* We need to update the SB and mp separately */
return XFS_ERROR(EINVAL);
}
}
ASSERT(mp->m_quotainfo); ASSERT(mp->m_quotainfo);
/* /*
* if we're just turning off quota enforcement, change mp and go. * If we're just turning off quota enforcement, change mp and go.
*/ */
if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
mp->m_qflags &= ~(flags); mp->m_qflags &= ~(flags);
...@@ -272,8 +229,8 @@ xfs_qm_scall_quotaoff( ...@@ -272,8 +229,8 @@ xfs_qm_scall_quotaoff(
} }
/* /*
* Nothing to do? Don't complain. * Nothing to do? Don't complain. This happens when we're just
* This happens when we're just turning off quota enforcement. * turning off quota enforcement.
*/ */
if ((mp->m_qflags & flags) == 0) { if ((mp->m_qflags & flags) == 0) {
mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); mutex_unlock(&(XFS_QI_QOFFLOCK(mp)));
...@@ -326,9 +283,8 @@ xfs_qm_scall_quotaoff( ...@@ -326,9 +283,8 @@ xfs_qm_scall_quotaoff(
* So, if we couldn't purge all the dquots from the filesystem, * So, if we couldn't purge all the dquots from the filesystem,
* we can't get rid of the incore data structures. * we can't get rid of the incore data structures.
*/ */
while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF))) { while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype|XFS_QMOPT_QUOTAOFF)))
delay(10 * nculprits); delay(10 * nculprits);
}
/* /*
* Transactions that had started before ACTIVE state bit was cleared * Transactions that had started before ACTIVE state bit was cleared
...@@ -406,11 +362,9 @@ xfs_qm_scall_trunc_qfiles( ...@@ -406,11 +362,9 @@ xfs_qm_scall_trunc_qfiles(
/* /*
* This does two separate functions: * Switch on (a given) quota enforcement for a filesystem. This takes
* Switch on quotas for the root file system. This will take effect only * effect immediately.
* on reboot. * (Switching on quota accounting must be done at mount time.)
* Switch on (a given) quota enforcement for both root and non-root filesystems.
* This takes effect immediately.
*/ */
STATIC int STATIC int
xfs_qm_scall_quotaon( xfs_qm_scall_quotaon(
...@@ -422,37 +376,26 @@ xfs_qm_scall_quotaon( ...@@ -422,37 +376,26 @@ xfs_qm_scall_quotaon(
uint qf; uint qf;
uint accflags; uint accflags;
__int64_t sbflags; __int64_t sbflags;
boolean_t rootfs;
boolean_t delay;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return XFS_ERROR(EPERM); return XFS_ERROR(EPERM);
rootfs = (boolean_t) (mp->m_dev == rootdev);
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
/* /*
* If caller wants to turn on accounting on /, but accounting * Switching on quota accounting must be done at mount time.
* is already turned on, ignore ACCTing flags.
* Switching on quota accounting for non-root filesystems
* must be done at mount time.
*/ */
accflags = flags & XFS_ALL_QUOTA_ACCT; accflags = flags & XFS_ALL_QUOTA_ACCT;
if (!rootfs || flags &= ~(XFS_ALL_QUOTA_ACCT);
(accflags && rootfs && ((mp->m_qflags & accflags) == accflags))) {
flags &= ~(XFS_ALL_QUOTA_ACCT);
}
sbflags = 0; sbflags = 0;
delay = (boolean_t) ((flags & XFS_ALL_QUOTA_ACCT) != 0);
if (flags == 0) { if (flags == 0) {
qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags); qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags);
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
} }
/* Only rootfs can turn on quotas with a delayed effect */ /* No fs can turn on quotas with a delayed effect */
ASSERT(!delay || rootfs); ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
/* /*
* Can't enforce without accounting. We check the superblock * Can't enforce without accounting. We check the superblock
...@@ -476,22 +419,6 @@ xfs_qm_scall_quotaon( ...@@ -476,22 +419,6 @@ xfs_qm_scall_quotaon(
if ((mp->m_qflags & flags) == flags) if ((mp->m_qflags & flags) == flags)
return XFS_ERROR(EEXIST); return XFS_ERROR(EEXIST);
/*
* Change superblock version (if needed) for the root filesystem
*/
if (rootfs && !XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) {
qdprintk("Old superblock version %x\n", mp->m_sb.sb_versionnum);
s = XFS_SB_LOCK(mp);
XFS_SB_VERSION_ADDQUOTA(&mp->m_sb);
mp->m_sb.sb_uquotino = NULLFSINO;
mp->m_sb.sb_gquotino = NULLFSINO;
mp->m_sb.sb_qflags = 0;
XFS_SB_UNLOCK(mp, s);
qdprintk("Converted to version %x\n", mp->m_sb.sb_versionnum);
sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
}
/* /*
* Change sb_qflags on disk but not incore mp->qflags * Change sb_qflags on disk but not incore mp->qflags
* if this is the root filesystem. * if this is the root filesystem.
...@@ -511,11 +438,9 @@ xfs_qm_scall_quotaon( ...@@ -511,11 +438,9 @@ xfs_qm_scall_quotaon(
if ((error = xfs_qm_write_sb_changes(mp, sbflags))) if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
return (error); return (error);
/* /*
* If we had just turned on quotas (ondisk) for rootfs, or if we aren't * If we aren't trying to switch on quota enforcement, we are done.
* trying to switch on quota enforcement, we are done.
*/ */
if (delay || if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
(mp->m_qflags & XFS_UQUOTA_ACCT)) || (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
(flags & XFS_ALL_QUOTA_ENFD) == 0) (flags & XFS_ALL_QUOTA_ENFD) == 0)
return (0); return (0);
...@@ -524,8 +449,7 @@ xfs_qm_scall_quotaon( ...@@ -524,8 +449,7 @@ xfs_qm_scall_quotaon(
return XFS_ERROR(ESRCH); return XFS_ERROR(ESRCH);
/* /*
* Switch on quota enforcement in core. This applies to both root * Switch on quota enforcement in core.
* and non-root file systems.
*/ */
mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD); mutex_lock(&(XFS_QI_QOFFLOCK(mp)), PINOD);
mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
...@@ -546,7 +470,6 @@ xfs_qm_scall_getqstat( ...@@ -546,7 +470,6 @@ xfs_qm_scall_getqstat(
{ {
xfs_inode_t *uip, *gip; xfs_inode_t *uip, *gip;
boolean_t tempuqip, tempgqip; boolean_t tempuqip, tempgqip;
__uint16_t sbflags;
uip = gip = NULL; uip = gip = NULL;
tempuqip = tempgqip = B_FALSE; tempuqip = tempgqip = B_FALSE;
...@@ -561,20 +484,6 @@ xfs_qm_scall_getqstat( ...@@ -561,20 +484,6 @@ xfs_qm_scall_getqstat(
out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
(XFS_ALL_QUOTA_ACCT| (XFS_ALL_QUOTA_ACCT|
XFS_ALL_QUOTA_ENFD)); XFS_ALL_QUOTA_ENFD));
/*
* If the qflags are different on disk, as can be the case when
* root filesystem's quotas are being turned on, return them in the
* HI 8 bits.
*/
if (mp->m_dev == rootdev) {
sbflags = (__uint16_t) xfs_qm_export_flags(mp->m_sb.sb_qflags &
(XFS_ALL_QUOTA_ACCT|
XFS_ALL_QUOTA_ENFD));
ASSERT((out->qs_flags & 0xff00) == 0);
if (sbflags != out->qs_flags)
out->qs_flags |= ((sbflags & 0x00ff) << 8);
}
out->qs_pad = 0; out->qs_pad = 0;
out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
......
...@@ -78,8 +78,8 @@ STATIC int ...@@ -78,8 +78,8 @@ STATIC int
xfs_lock_for_rename( xfs_lock_for_rename(
xfs_inode_t *dp1, /* old (source) directory inode */ xfs_inode_t *dp1, /* old (source) directory inode */
xfs_inode_t *dp2, /* new (target) directory inode */ xfs_inode_t *dp2, /* new (target) directory inode */
struct dentry *dentry1, /* old entry name */ vname_t *dentry1, /* old entry name */
struct dentry *dentry2, /* new entry name */ vname_t *dentry2, /* new entry name */
xfs_inode_t **ipp1, /* inode of old entry */ xfs_inode_t **ipp1, /* inode of old entry */
xfs_inode_t **ipp2, /* inode of new entry, if it xfs_inode_t **ipp2, /* inode of new entry, if it
already exists, NULL otherwise. */ already exists, NULL otherwise. */
...@@ -224,9 +224,9 @@ int xfs_renames; ...@@ -224,9 +224,9 @@ int xfs_renames;
int int
xfs_rename( xfs_rename(
bhv_desc_t *src_dir_bdp, bhv_desc_t *src_dir_bdp,
struct dentry *src_dentry, vname_t *src_dentry,
vnode_t *target_dir_vp, vnode_t *target_dir_vp,
struct dentry *target_dentry, vname_t *target_dentry,
cred_t *credp) cred_t *credp)
{ {
xfs_trans_t *tp; xfs_trans_t *tp;
...@@ -246,8 +246,8 @@ xfs_rename( ...@@ -246,8 +246,8 @@ xfs_rename(
int spaceres; int spaceres;
int target_link_zero = 0; int target_link_zero = 0;
int num_inodes; int num_inodes;
char *src_name = (char *)src_dentry->d_name.name; char *src_name = VNAME(src_dentry);
char *target_name = (char *)target_dentry->d_name.name; char *target_name = VNAME(target_dentry);
int src_namelen; int src_namelen;
int target_namelen; int target_namelen;
#ifdef DEBUG #ifdef DEBUG
...@@ -268,10 +268,10 @@ xfs_rename( ...@@ -268,10 +268,10 @@ xfs_rename(
if (target_dir_bdp == NULL) { if (target_dir_bdp == NULL) {
return XFS_ERROR(EXDEV); return XFS_ERROR(EXDEV);
} }
src_namelen = src_dentry->d_name.len; src_namelen = VNAMELEN(src_dentry);
if (src_namelen >= MAXNAMELEN) if (src_namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
target_namelen = target_dentry->d_name.len; target_namelen = VNAMELEN(target_dentry);
if (target_namelen >= MAXNAMELEN) if (target_namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
src_dp = XFS_BHVTOI(src_dir_bdp); src_dp = XFS_BHVTOI(src_dir_bdp);
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
/* /*
* Super block * Super block
* Fits into a 512-byte buffer at daddr_t 0 of each allocation group. * Fits into a sector-sized buffer at address 0 of each allocation group.
* Only the first of these is ever updated except during growfs. * Only the first of these is ever updated except during growfs.
*/ */
...@@ -140,7 +140,8 @@ typedef struct xfs_sb ...@@ -140,7 +140,8 @@ typedef struct xfs_sb
__uint32_t sb_unit; /* stripe or raid unit */ __uint32_t sb_unit; /* stripe or raid unit */
__uint32_t sb_width; /* stripe or raid width */ __uint32_t sb_width; /* stripe or raid width */
__uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */ __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
__uint8_t sb_dummy[3]; /* padding */ __uint8_t sb_logsectlog; /* log2 of the log sector size */
__uint16_t sb_logsectsize; /* sector size for the log, bytes */
__uint32_t sb_logsunit; /* stripe unit size for the log */ __uint32_t sb_logsunit; /* stripe unit size for the log */
} xfs_sb_t; } xfs_sb_t;
...@@ -159,7 +160,7 @@ typedef enum { ...@@ -159,7 +160,7 @@ typedef enum {
XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO, XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN, XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG, XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
XFS_SBS_DUMMY, XFS_SBS_LOGSUNIT, XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
XFS_SBS_FIELDCOUNT XFS_SBS_FIELDCOUNT
} xfs_sb_field_t; } xfs_sb_field_t;
...@@ -444,7 +445,7 @@ int xfs_sb_version_subextflgbit(xfs_sb_t *sbp); ...@@ -444,7 +445,7 @@ int xfs_sb_version_subextflgbit(xfs_sb_t *sbp);
* end of superblock version macros * end of superblock version macros
*/ */
#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */ #define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_BLOCK) #if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_SB_BLOCK)
xfs_agblock_t xfs_sb_block(struct xfs_mount *mp); xfs_agblock_t xfs_sb_block(struct xfs_mount *mp);
#define XFS_SB_BLOCK(mp) xfs_sb_block(mp) #define XFS_SB_BLOCK(mp) xfs_sb_block(mp)
...@@ -474,6 +475,28 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno); ...@@ -474,6 +475,28 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno);
XFS_FSB_TO_AGBNO(mp,fsbno)) XFS_FSB_TO_AGBNO(mp,fsbno))
#endif #endif
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBP)
xfs_sb_t *xfs_buf_to_sbp(struct xfs_buf *bp);
#define XFS_BUF_TO_SBP(bp) xfs_buf_to_sbp(bp)
#else
#define XFS_BUF_TO_SBP(bp) ((xfs_sb_t *)XFS_BUF_PTR(bp))
#endif
/*
* File system sector to basic block conversions.
*/
#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log)
#define XFS_LOGS_TO_BB(mp,sec) ((sec) << ((mp)->m_sb.sb_logsectlog - BBSHIFT))
#define XFS_BB_TO_FSS(mp,bb) \
(((bb) + (XFS_FSS_TO_BB(mp,1) - 1)) >> (mp)->m_sectbb_log)
#define XFS_BB_TO_FSST(mp,bb) ((bb) >> (mp)->m_sectbb_log)
/*
* File system sector to byte conversions.
*/
#define XFS_FSS_TO_B(mp,sectno) ((xfs_fsize_t)(sectno) << (mp)->m_sb.sb_sectlog)
#define XFS_B_TO_FSST(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_sectlog)
/* /*
* File system block to basic block conversions. * File system block to basic block conversions.
*/ */
...@@ -493,11 +516,4 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno); ...@@ -493,11 +516,4 @@ xfs_daddr_t xfs_fsb_to_daddr(struct xfs_mount *mp, xfs_fsblock_t fsbno);
#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog) #define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask) #define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BUF_TO_SBP)
xfs_sb_t *xfs_buf_to_sbp(struct xfs_buf *bp);
#define XFS_BUF_TO_SBP(bp) xfs_buf_to_sbp(bp)
#else
#define XFS_BUF_TO_SBP(bp) ((xfs_sb_t *)XFS_BUF_PTR(bp))
#endif
#endif /* __XFS_SB_H__ */ #endif /* __XFS_SB_H__ */
...@@ -151,7 +151,7 @@ typedef __uint64_t xfs_fileoff_t; /* block number in a file */ ...@@ -151,7 +151,7 @@ typedef __uint64_t xfs_fileoff_t; /* block number in a file */
typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */
typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */
typedef __uint8_t xfs_arch_t; /* architecutre of an xfs fs */ typedef __uint8_t xfs_arch_t; /* architecture of an xfs fs */
/* /*
* Null values for the types. * Null values for the types.
...@@ -195,119 +195,6 @@ typedef enum { ...@@ -195,119 +195,6 @@ typedef enum {
} xfs_btnum_t; } xfs_btnum_t;
#if defined(CONFIG_PROC_FS) && defined(__KERNEL__) && !defined(XFS_STATS_OFF)
/*
* XFS global statistics
*/
struct xfsstats {
# define XFSSTAT_END_EXTENT_ALLOC 4
__uint32_t xs_allocx;
__uint32_t xs_allocb;
__uint32_t xs_freex;
__uint32_t xs_freeb;
# define XFSSTAT_END_ALLOC_BTREE (XFSSTAT_END_EXTENT_ALLOC+4)
__uint32_t xs_abt_lookup;
__uint32_t xs_abt_compare;
__uint32_t xs_abt_insrec;
__uint32_t xs_abt_delrec;
# define XFSSTAT_END_BLOCK_MAPPING (XFSSTAT_END_ALLOC_BTREE+7)
__uint32_t xs_blk_mapr;
__uint32_t xs_blk_mapw;
__uint32_t xs_blk_unmap;
__uint32_t xs_add_exlist;
__uint32_t xs_del_exlist;
__uint32_t xs_look_exlist;
__uint32_t xs_cmp_exlist;
# define XFSSTAT_END_BLOCK_MAP_BTREE (XFSSTAT_END_BLOCK_MAPPING+4)
__uint32_t xs_bmbt_lookup;
__uint32_t xs_bmbt_compare;
__uint32_t xs_bmbt_insrec;
__uint32_t xs_bmbt_delrec;
# define XFSSTAT_END_DIRECTORY_OPS (XFSSTAT_END_BLOCK_MAP_BTREE+4)
__uint32_t xs_dir_lookup;
__uint32_t xs_dir_create;
__uint32_t xs_dir_remove;
__uint32_t xs_dir_getdents;
# define XFSSTAT_END_TRANSACTIONS (XFSSTAT_END_DIRECTORY_OPS+3)
__uint32_t xs_trans_sync;
__uint32_t xs_trans_async;
__uint32_t xs_trans_empty;
# define XFSSTAT_END_INODE_OPS (XFSSTAT_END_TRANSACTIONS+7)
__uint32_t xs_ig_attempts;
__uint32_t xs_ig_found;
__uint32_t xs_ig_frecycle;
__uint32_t xs_ig_missed;
__uint32_t xs_ig_dup;
__uint32_t xs_ig_reclaims;
__uint32_t xs_ig_attrchg;
# define XFSSTAT_END_LOG_OPS (XFSSTAT_END_INODE_OPS+5)
__uint32_t xs_log_writes;
__uint32_t xs_log_blocks;
__uint32_t xs_log_noiclogs;
__uint32_t xs_log_force;
__uint32_t xs_log_force_sleep;
# define XFSSTAT_END_TAIL_PUSHING (XFSSTAT_END_LOG_OPS+10)
__uint32_t xs_try_logspace;
__uint32_t xs_sleep_logspace;
__uint32_t xs_push_ail;
__uint32_t xs_push_ail_success;
__uint32_t xs_push_ail_pushbuf;
__uint32_t xs_push_ail_pinned;
__uint32_t xs_push_ail_locked;
__uint32_t xs_push_ail_flushing;
__uint32_t xs_push_ail_restarts;
__uint32_t xs_push_ail_flush;
# define XFSSTAT_END_WRITE_CONVERT (XFSSTAT_END_TAIL_PUSHING+2)
__uint32_t xs_xstrat_quick;
__uint32_t xs_xstrat_split;
# define XFSSTAT_END_READ_WRITE_OPS (XFSSTAT_END_WRITE_CONVERT+2)
__uint32_t xs_write_calls;
__uint32_t xs_read_calls;
# define XFSSTAT_END_ATTRIBUTE_OPS (XFSSTAT_END_READ_WRITE_OPS+4)
__uint32_t xs_attr_get;
__uint32_t xs_attr_set;
__uint32_t xs_attr_remove;
__uint32_t xs_attr_list;
# define XFSSTAT_END_QUOTA_OPS (XFSSTAT_END_ATTRIBUTE_OPS+8)
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
__uint32_t xs_qm_dqcachemisses;
__uint32_t xs_qm_dqcachehits;
__uint32_t xs_qm_dqwants;
__uint32_t xs_qm_dqshake_reclaims;
__uint32_t xs_qm_dqinact_reclaims;
# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_QUOTA_OPS+3)
__uint32_t xs_iflush_count;
__uint32_t xs_icluster_flushcnt;
__uint32_t xs_icluster_flushinode;
# define XFSSTAT_END_VNODE_OPS (XFSSTAT_END_INODE_CLUSTER+8)
__uint32_t vn_active; /* # vnodes not on free lists */
__uint32_t vn_alloc; /* # times vn_alloc called */
__uint32_t vn_get; /* # times vn_get called */
__uint32_t vn_hold; /* # times vn_hold called */
__uint32_t vn_rele; /* # times vn_rele called */
__uint32_t vn_reclaim; /* # times vn_reclaim called */
__uint32_t vn_remove; /* # times vn_remove called */
__uint32_t vn_free; /* # times vn_free called */
/* Extra precision counters */
__uint64_t xs_xstrat_bytes;
__uint64_t xs_write_bytes;
__uint64_t xs_read_bytes;
};
extern struct xfsstats xfsstats;
# define XFS_STATS_INC(count) ( (count)++ )
# define XFS_STATS_DEC(count) ( (count)-- )
# define XFS_STATS_ADD(count, inc) ( (count) += (inc) )
#else /* !CONFIG_PROC_FS */
# define XFS_STATS_INC(count)
# define XFS_STATS_DEC(count)
# define XFS_STATS_ADD(count, inc)
#endif /* !CONFIG_PROC_FS */
/* /*
* Juggle IRIX device numbers - still used in ondisk structures * Juggle IRIX device numbers - still used in ondisk structures
*/ */
......
...@@ -45,11 +45,11 @@ struct xfsstats xfsstats; ...@@ -45,11 +45,11 @@ struct xfsstats xfsstats;
*/ */
int int
xfs_get_dir_entry( xfs_get_dir_entry(
struct dentry *dentry, vname_t *dentry,
xfs_inode_t **ipp) xfs_inode_t **ipp)
{ {
vnode_t *vp; vnode_t *vp;
bhv_desc_t *bdp; bhv_desc_t *bdp;
ASSERT(dentry->d_inode); ASSERT(dentry->d_inode);
...@@ -66,24 +66,23 @@ xfs_get_dir_entry( ...@@ -66,24 +66,23 @@ xfs_get_dir_entry(
int int
xfs_dir_lookup_int( xfs_dir_lookup_int(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
uint lock_mode, uint lock_mode,
struct dentry *dentry, vname_t *dentry,
xfs_ino_t *inum, xfs_ino_t *inum,
xfs_inode_t **ipp) xfs_inode_t **ipp)
{ {
vnode_t *dir_vp; vnode_t *dir_vp;
xfs_inode_t *dp; xfs_inode_t *dp;
int error; int error;
dir_vp = BHV_TO_VNODE(dir_bdp); dir_vp = BHV_TO_VNODE(dir_bdp);
vn_trace_entry(dir_vp, "xfs_dir_lookup_int", vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
(inst_t *)__return_address);
dp = XFS_BHVTOI(dir_bdp); dp = XFS_BHVTOI(dir_bdp);
error = XFS_DIR_LOOKUP(dp->i_mount, NULL, dp, error = XFS_DIR_LOOKUP(dp->i_mount, NULL, dp,
(char *)dentry->d_name.name, dentry->d_name.len, inum); VNAME(dentry), VNAMELEN(dentry), inum);
if (!error) { if (!error) {
/* /*
* Unlock the directory. We do this because we can't * Unlock the directory. We do this because we can't
......
...@@ -37,66 +37,16 @@ ...@@ -37,66 +37,16 @@
#define ITRACE(ip) vn_trace_ref(XFS_ITOV(ip), __FILE__, __LINE__, \ #define ITRACE(ip) vn_trace_ref(XFS_ITOV(ip), __FILE__, __LINE__, \
(inst_t *)__return_address) (inst_t *)__return_address)
struct bhv_desc; extern int xfs_rename (bhv_desc_t *, vname_t *, vnode_t *, vname_t *, cred_t *);
struct cred; extern int xfs_get_dir_entry (vname_t *, xfs_inode_t **);
struct vnode; extern int xfs_dir_lookup_int (bhv_desc_t *, uint, vname_t *, xfs_ino_t *,
struct xfs_inode; xfs_inode_t **);
struct xfs_mount; extern int xfs_truncate_file (xfs_mount_t *, xfs_inode_t *);
struct xfs_trans; extern int xfs_dir_ialloc (xfs_trans_t **, xfs_inode_t *, mode_t, nlink_t,
xfs_dev_t, cred_t *, prid_t, int,
extern int xfs_inode_t **, int *);
xfs_rename( extern int xfs_droplink (xfs_trans_t *, xfs_inode_t *);
struct bhv_desc *src_dir_bdp, extern int xfs_bumplink (xfs_trans_t *, xfs_inode_t *);
struct dentry *src_dentry, extern void xfs_bump_ino_vers2 (xfs_trans_t *, xfs_inode_t *);
struct vnode *target_dir_vp,
struct dentry *target_dentry, #endif /* __XFS_UTILS_H__ */
struct cred *credp);
extern int
xfs_get_dir_entry(
struct dentry *dentry,
xfs_inode_t **ipp);
extern int
xfs_dir_lookup_int(
struct bhv_desc *dir_bdp,
uint lock_mode,
struct dentry *dentry,
xfs_ino_t *inum,
struct xfs_inode **ipp);
extern int
xfs_truncate_file(
struct xfs_mount *mp,
struct xfs_inode *ip);
extern int
xfs_dir_ialloc(
struct xfs_trans **tpp,
struct xfs_inode *dp,
mode_t mode,
nlink_t nlink,
xfs_dev_t rdev,
struct cred *credp,
prid_t prid,
int okalloc,
struct xfs_inode **ipp,
int *committed);
extern int
xfs_droplink(
struct xfs_trans *tp,
struct xfs_inode *ip);
extern int
xfs_bumplink(
struct xfs_trans *tp,
struct xfs_inode *ip);
extern void
xfs_bump_ino_vers2(
struct xfs_trans *tp,
struct xfs_inode *ip);
#endif /* XFS_UTILS_H */
...@@ -393,7 +393,7 @@ xfs_mount( ...@@ -393,7 +393,7 @@ xfs_mount(
xfs_mount_t *mp; xfs_mount_t *mp;
struct block_device *ddev, *logdev, *rtdev; struct block_device *ddev, *logdev, *rtdev;
int ronly = (vfsp->vfs_flag & VFS_RDONLY); int ronly = (vfsp->vfs_flag & VFS_RDONLY);
int error = 0; int flags = 0, error;
ddev = vfsp->vfs_super->s_bdev; ddev = vfsp->vfs_super->s_bdev;
logdev = rtdev = NULL; logdev = rtdev = NULL;
...@@ -430,17 +430,11 @@ xfs_mount( ...@@ -430,17 +430,11 @@ xfs_mount(
vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp); vfs_insertbhv(vfsp, &mp->m_bhv, &xfs_vfsops, mp);
mp->m_ddev_targp = xfs_alloc_buftarg(ddev); mp->m_ddev_targp = xfs_alloc_buftarg(ddev);
if (rtdev != NULL) { if (rtdev)
mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev); mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev);
set_blocksize(rtdev, 512); mp->m_logdev_targp = (logdev && logdev != ddev) ?
} xfs_alloc_buftarg(logdev) : mp->m_ddev_targp;
if (logdev != NULL && logdev != ddev) {
mp->m_logdev_targp = xfs_alloc_buftarg(logdev);
set_blocksize(logdev, 512);
} else {
mp->m_logdev_targp = mp->m_ddev_targp;
}
error = xfs_start_flags(args, mp, ronly); error = xfs_start_flags(args, mp, ronly);
if (error) if (error)
goto error; goto error;
...@@ -455,16 +449,16 @@ xfs_mount( ...@@ -455,16 +449,16 @@ xfs_mount(
goto error; goto error;
} }
mp->m_ddev_targp->pbr_blocksize = mp->m_sb.sb_blocksize; xfs_size_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
if (logdev != 0 && logdev != ddev) { mp->m_sb.sb_sectsize);
mp->m_logdev_targp->pbr_blocksize = mp->m_sb.sb_blocksize; if (logdev && logdev != ddev)
} xfs_size_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize,
if (rtdev != 0) { mp->m_sb.sb_logsectsize);
mp->m_rtdev_targp->pbr_blocksize = mp->m_sb.sb_blocksize; if (rtdev)
} xfs_size_buftarg(mp->m_logdev_targp, mp->m_sb.sb_blocksize,
mp->m_sb.sb_blocksize);
mp->m_cxfstype = XFS_CXFS_NOT; error = xfs_mountfs(vfsp, mp, ddev->bd_dev, flags);
error = xfs_mountfs(vfsp, mp, ddev->bd_dev, 0);
if (error) if (error)
goto error; goto error;
return 0; return 0;
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
*/ */
#include <xfs.h> #include <xfs.h>
#include <asm/fcntl.h>
/* /*
...@@ -61,7 +60,6 @@ xfs_ctrunc_trace( ...@@ -61,7 +60,6 @@ xfs_ctrunc_trace(
* fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively, * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively,
* when a new vnode is first looked up or created. * when a new vnode is first looked up or created.
*/ */
/*ARGSUSED*/
STATIC int STATIC int
xfs_open( xfs_open(
bhv_desc_t *bdp, bhv_desc_t *bdp,
...@@ -93,7 +91,6 @@ xfs_open( ...@@ -93,7 +91,6 @@ xfs_open(
/* /*
* xfs_getattr * xfs_getattr
*/ */
/*ARGSUSED*/
int int
xfs_getattr( xfs_getattr(
bhv_desc_t *bdp, bhv_desc_t *bdp,
...@@ -106,8 +103,7 @@ xfs_getattr( ...@@ -106,8 +103,7 @@ xfs_getattr(
vnode_t *vp; vnode_t *vp;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(vp, "xfs_getattr", (inst_t *)__return_address);
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
mp = ip->i_mount; mp = ip->i_mount;
...@@ -119,7 +115,7 @@ xfs_getattr( ...@@ -119,7 +115,7 @@ xfs_getattr(
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
vap->va_size = ip->i_d.di_size; vap->va_size = ip->i_d.di_size;
if (vap->va_mask == AT_SIZE) { if (vap->va_mask == XFS_AT_SIZE) {
if (!(flags & ATTR_LAZY)) if (!(flags & ATTR_LAZY))
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0; return 0;
...@@ -137,7 +133,8 @@ xfs_getattr( ...@@ -137,7 +133,8 @@ xfs_getattr(
/* /*
* Quick exit for non-stat callers * Quick exit for non-stat callers
*/ */
if ((vap->va_mask & ~(AT_SIZE|AT_FSID|AT_NODEID|AT_NLINK)) == 0) { if ((vap->va_mask &
~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK)) == 0) {
if (!(flags & ATTR_LAZY)) if (!(flags & ATTR_LAZY))
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0; return 0;
...@@ -224,8 +221,8 @@ xfs_getattr( ...@@ -224,8 +221,8 @@ xfs_getattr(
* to be filled in are needed. * to be filled in are needed.
*/ */
if ((vap->va_mask & if ((vap->va_mask &
(AT_XFLAGS|AT_EXTSIZE|AT_NEXTENTS|AT_ANEXTENTS| (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
AT_GENCOUNT|AT_VCODE)) == 0) { XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) {
if (!(flags & ATTR_LAZY)) if (!(flags & ATTR_LAZY))
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
return 0; return 0;
...@@ -284,17 +281,17 @@ xfs_setattr( ...@@ -284,17 +281,17 @@ xfs_setattr(
int privileged; int privileged;
int mandlock_before, mandlock_after; int mandlock_before, mandlock_after;
uint qflags; uint qflags;
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; xfs_dquot_t *udqp, *gdqp, *olddquot1, *olddquot2;
int file_owner; int file_owner;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(vp, "xfs_setattr", (inst_t *)__return_address);
/* /*
* Cannot set certain attributes. * Cannot set certain attributes.
*/ */
mask = vap->va_mask; mask = vap->va_mask;
if (mask & AT_NOSET) { if (mask & XFS_AT_NOSET) {
return XFS_ERROR(EINVAL); return XFS_ERROR(EINVAL);
} }
...@@ -308,11 +305,11 @@ xfs_setattr( ...@@ -308,11 +305,11 @@ xfs_setattr(
* Timestamps do not need to be logged and hence do not * Timestamps do not need to be logged and hence do not
* need to be done within a transaction. * need to be done within a transaction.
*/ */
if (mask & AT_UPDTIMES) { if (mask & XFS_AT_UPDTIMES) {
ASSERT((mask & ~AT_UPDTIMES) == 0); ASSERT((mask & ~XFS_AT_UPDTIMES) == 0);
timeflags = ((mask & AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) | timeflags = ((mask & XFS_AT_UPDATIME) ? XFS_ICHGTIME_ACC : 0) |
((mask & AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) | ((mask & XFS_AT_UPDCTIME) ? XFS_ICHGTIME_CHG : 0) |
((mask & AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0); ((mask & XFS_AT_UPDMTIME) ? XFS_ICHGTIME_MOD : 0);
xfs_ichgtime(ip, timeflags); xfs_ichgtime(ip, timeflags);
return 0; return 0;
} }
...@@ -328,15 +325,15 @@ xfs_setattr( ...@@ -328,15 +325,15 @@ xfs_setattr(
* If the IDs do change before we take the ilock, we're covered * If the IDs do change before we take the ilock, we're covered
* because the i_*dquot fields will get updated anyway. * because the i_*dquot fields will get updated anyway.
*/ */
if (XFS_IS_QUOTA_ON(mp) && (mask & (AT_UID|AT_GID))) { if (XFS_IS_QUOTA_ON(mp) && (mask & (XFS_AT_UID|XFS_AT_GID))) {
qflags = 0; qflags = 0;
if (mask & AT_UID) { if (mask & XFS_AT_UID) {
uid = vap->va_uid; uid = vap->va_uid;
qflags |= XFS_QMOPT_UQUOTA; qflags |= XFS_QMOPT_UQUOTA;
} else { } else {
uid = ip->i_d.di_uid; uid = ip->i_d.di_uid;
} }
if (mask & AT_GID) { if (mask & XFS_AT_GID) {
gid = vap->va_gid; gid = vap->va_gid;
qflags |= XFS_QMOPT_GQUOTA; qflags |= XFS_QMOPT_GQUOTA;
} else { } else {
...@@ -360,8 +357,8 @@ xfs_setattr( ...@@ -360,8 +357,8 @@ xfs_setattr(
*/ */
tp = NULL; tp = NULL;
lock_flags = XFS_ILOCK_EXCL; lock_flags = XFS_ILOCK_EXCL;
if (!(mask & AT_SIZE)) { if (!(mask & XFS_AT_SIZE)) {
if ((mask != (AT_CTIME|AT_ATIME|AT_MTIME)) || if ((mask != (XFS_AT_CTIME|XFS_AT_ATIME|XFS_AT_MTIME)) ||
(mp->m_flags & XFS_MOUNT_WSYNC)) { (mp->m_flags & XFS_MOUNT_WSYNC)) {
tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
commit_flags = 0; commit_flags = 0;
...@@ -400,7 +397,9 @@ xfs_setattr( ...@@ -400,7 +397,9 @@ xfs_setattr(
* Only the owner or users with CAP_FOWNER * Only the owner or users with CAP_FOWNER
* capability may do these things. * capability may do these things.
*/ */
if (mask & (AT_MODE|AT_XFLAGS|AT_EXTSIZE|AT_UID|AT_GID|AT_PROJID)) { if (mask &
(XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID|
XFS_AT_GID|XFS_AT_PROJID)) {
/* /*
* CAP_FOWNER overrides the following restrictions: * CAP_FOWNER overrides the following restrictions:
* *
...@@ -424,7 +423,7 @@ xfs_setattr( ...@@ -424,7 +423,7 @@ xfs_setattr(
* IDs of the calling process shall match the group owner of * IDs of the calling process shall match the group owner of
* the file when setting the set-group-ID bit on that file * the file when setting the set-group-ID bit on that file
*/ */
if (mask & AT_MODE) { if (mask & XFS_AT_MODE) {
mode_t m = 0; mode_t m = 0;
if ((vap->va_mode & ISUID) && !file_owner) if ((vap->va_mode & ISUID) && !file_owner)
...@@ -449,7 +448,7 @@ xfs_setattr( ...@@ -449,7 +448,7 @@ xfs_setattr(
* and can change the group id only to a group of which he * and can change the group id only to a group of which he
* or she is a member. * or she is a member.
*/ */
if (mask & (AT_UID|AT_GID|AT_PROJID)) { if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) {
/* /*
* These IDs could have changed since we last looked at them. * These IDs could have changed since we last looked at them.
* But, we're assured that if the ownership did change * But, we're assured that if the ownership did change
...@@ -459,9 +458,9 @@ xfs_setattr( ...@@ -459,9 +458,9 @@ xfs_setattr(
iuid = ip->i_d.di_uid; iuid = ip->i_d.di_uid;
iprojid = ip->i_d.di_projid; iprojid = ip->i_d.di_projid;
igid = ip->i_d.di_gid; igid = ip->i_d.di_gid;
gid = (mask & AT_GID) ? vap->va_gid : igid; gid = (mask & XFS_AT_GID) ? vap->va_gid : igid;
uid = (mask & AT_UID) ? vap->va_uid : iuid; uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid;
projid = (mask & AT_PROJID) ? (xfs_prid_t)vap->va_projid : projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid :
iprojid; iprojid;
/* /*
...@@ -506,13 +505,13 @@ xfs_setattr( ...@@ -506,13 +505,13 @@ xfs_setattr(
/* /*
* Truncate file. Must have write permission and not be a directory. * Truncate file. Must have write permission and not be a directory.
*/ */
if (mask & AT_SIZE) { if (mask & XFS_AT_SIZE) {
/* Short circuit the truncate case for zero length files */ /* Short circuit the truncate case for zero length files */
if ((vap->va_size == 0) && if ((vap->va_size == 0) &&
(ip->i_d.di_size == 0) && (ip->i_d.di_nextents == 0)) { (ip->i_d.di_size == 0) && (ip->i_d.di_nextents == 0)) {
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
lock_flags &= ~XFS_ILOCK_EXCL; lock_flags &= ~XFS_ILOCK_EXCL;
if (mask & AT_CTIME) if (mask & XFS_AT_CTIME)
xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); xfs_ichgtime(ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
code = 0; code = 0;
goto error_return; goto error_return;
...@@ -537,7 +536,7 @@ xfs_setattr( ...@@ -537,7 +536,7 @@ xfs_setattr(
/* /*
* Change file access or modified times. * Change file access or modified times.
*/ */
if (mask & (AT_ATIME|AT_MTIME)) { if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) {
if (!file_owner) { if (!file_owner) {
if ((flags & ATTR_UTIME) && if ((flags & ATTR_UTIME) &&
!capable(CAP_FOWNER)) { !capable(CAP_FOWNER)) {
...@@ -550,11 +549,11 @@ xfs_setattr( ...@@ -550,11 +549,11 @@ xfs_setattr(
/* /*
* Change extent size or realtime flag. * Change extent size or realtime flag.
*/ */
if (mask & (AT_EXTSIZE|AT_XFLAGS)) { if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) {
/* /*
* Can't change extent size if any extents are allocated. * Can't change extent size if any extents are allocated.
*/ */
if (ip->i_d.di_nextents && (mask & AT_EXTSIZE) && if (ip->i_d.di_nextents && (mask & XFS_AT_EXTSIZE) &&
((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
vap->va_extsize) ) { vap->va_extsize) ) {
code = XFS_ERROR(EINVAL); /* EFBIG? */ code = XFS_ERROR(EINVAL); /* EFBIG? */
...@@ -569,11 +568,11 @@ xfs_setattr( ...@@ -569,11 +568,11 @@ xfs_setattr(
* with buffered data writes is implemented. * with buffered data writes is implemented.
* *
*/ */
if ((mask & AT_EXTSIZE) && if ((mask & XFS_AT_EXTSIZE) &&
((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
vap->va_extsize) && vap->va_extsize) &&
(!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
((mask & AT_XFLAGS) && ((mask & XFS_AT_XFLAGS) &&
(vap->va_xflags & XFS_XFLAG_REALTIME))))) { (vap->va_xflags & XFS_XFLAG_REALTIME))))) {
code = XFS_ERROR(EINVAL); code = XFS_ERROR(EINVAL);
goto error_return; goto error_return;
...@@ -582,7 +581,7 @@ xfs_setattr( ...@@ -582,7 +581,7 @@ xfs_setattr(
/* /*
* Can't change realtime flag if any extents are allocated. * Can't change realtime flag if any extents are allocated.
*/ */
if (ip->i_d.di_nextents && (mask & AT_XFLAGS) && if (ip->i_d.di_nextents && (mask & XFS_AT_XFLAGS) &&
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) !=
(vap->va_xflags & XFS_XFLAG_REALTIME)) { (vap->va_xflags & XFS_XFLAG_REALTIME)) {
code = XFS_ERROR(EINVAL); /* EFBIG? */ code = XFS_ERROR(EINVAL); /* EFBIG? */
...@@ -592,11 +591,11 @@ xfs_setattr( ...@@ -592,11 +591,11 @@ xfs_setattr(
* Extent size must be a multiple of the appropriate block * Extent size must be a multiple of the appropriate block
* size, if set at all. * size, if set at all.
*/ */
if ((mask & AT_EXTSIZE) && vap->va_extsize != 0) { if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) {
xfs_extlen_t size; xfs_extlen_t size;
if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
((mask & AT_XFLAGS) && ((mask & XFS_AT_XFLAGS) &&
(vap->va_xflags & XFS_XFLAG_REALTIME))) { (vap->va_xflags & XFS_XFLAG_REALTIME))) {
size = mp->m_sb.sb_rextsize << size = mp->m_sb.sb_rextsize <<
mp->m_sb.sb_blocklog; mp->m_sb.sb_blocklog;
...@@ -611,7 +610,7 @@ xfs_setattr( ...@@ -611,7 +610,7 @@ xfs_setattr(
/* /*
* If realtime flag is set then must have realtime data. * If realtime flag is set then must have realtime data.
*/ */
if ((mask & AT_XFLAGS) && if ((mask & XFS_AT_XFLAGS) &&
(vap->va_xflags & XFS_XFLAG_REALTIME)) { (vap->va_xflags & XFS_XFLAG_REALTIME)) {
if ((mp->m_sb.sb_rblocks == 0) || if ((mp->m_sb.sb_rblocks == 0) ||
(mp->m_sb.sb_rextsize == 0) || (mp->m_sb.sb_rextsize == 0) ||
...@@ -624,13 +623,13 @@ xfs_setattr( ...@@ -624,13 +623,13 @@ xfs_setattr(
/* /*
* Now we can make the changes. Before we join the inode * Now we can make the changes. Before we join the inode
* to the transaction, if AT_SIZE is set then take care of * to the transaction, if XFS_AT_SIZE is set then take care of
* the part of the truncation that must be done without the * the part of the truncation that must be done without the
* inode lock. This needs to be done before joining the inode * inode lock. This needs to be done before joining the inode
* to the transaction, because the inode cannot be unlocked * to the transaction, because the inode cannot be unlocked
* once it is a part of the transaction. * once it is a part of the transaction.
*/ */
if (mask & AT_SIZE) { if (mask & XFS_AT_SIZE) {
if (vap->va_size > ip->i_d.di_size) { if (vap->va_size > ip->i_d.di_size) {
code = xfs_igrow_start(ip, vap->va_size, credp); code = xfs_igrow_start(ip, vap->va_size, credp);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
...@@ -673,7 +672,7 @@ xfs_setattr( ...@@ -673,7 +672,7 @@ xfs_setattr(
/* /*
* Truncate file. Must have write permission and not be a directory. * Truncate file. Must have write permission and not be a directory.
*/ */
if (mask & AT_SIZE) { if (mask & XFS_AT_SIZE) {
if (vap->va_size > ip->i_d.di_size) { if (vap->va_size > ip->i_d.di_size) {
xfs_igrow_finish(tp, ip, vap->va_size, xfs_igrow_finish(tp, ip, vap->va_size,
!(flags & ATTR_DMI)); !(flags & ATTR_DMI));
...@@ -703,7 +702,7 @@ xfs_setattr( ...@@ -703,7 +702,7 @@ xfs_setattr(
/* /*
* Change file access modes. * Change file access modes.
*/ */
if (mask & AT_MODE) { if (mask & XFS_AT_MODE) {
ip->i_d.di_mode &= IFMT; ip->i_d.di_mode &= IFMT;
ip->i_d.di_mode |= vap->va_mode & ~IFMT; ip->i_d.di_mode |= vap->va_mode & ~IFMT;
...@@ -718,7 +717,7 @@ xfs_setattr( ...@@ -718,7 +717,7 @@ xfs_setattr(
* and can change the group id only to a group of which he * and can change the group id only to a group of which he
* or she is a member. * or she is a member.
*/ */
if (mask & (AT_UID|AT_GID|AT_PROJID)) { if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) {
/* /*
* CAP_FSETID overrides the following restrictions: * CAP_FSETID overrides the following restrictions:
* *
...@@ -736,7 +735,7 @@ xfs_setattr( ...@@ -736,7 +735,7 @@ xfs_setattr(
*/ */
if (iuid != uid) { if (iuid != uid) {
if (XFS_IS_UQUOTA_ON(mp)) { if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(mask & AT_UID); ASSERT(mask & XFS_AT_UID);
ASSERT(udqp); ASSERT(udqp);
ASSERT(xfs_qm_dqid(udqp) == (xfs_dqid_t)uid); ASSERT(xfs_qm_dqid(udqp) == (xfs_dqid_t)uid);
olddquot1 = xfs_qm_vop_chown(tp, ip, olddquot1 = xfs_qm_vop_chown(tp, ip,
...@@ -750,7 +749,7 @@ xfs_setattr( ...@@ -750,7 +749,7 @@ xfs_setattr(
} }
if (igid != gid) { if (igid != gid) {
if (XFS_IS_GQUOTA_ON(mp)) { if (XFS_IS_GQUOTA_ON(mp)) {
ASSERT(mask & AT_GID); ASSERT(mask & XFS_AT_GID);
ASSERT(gdqp); ASSERT(gdqp);
ASSERT(xfs_qm_dqid(gdqp) == gid); ASSERT(xfs_qm_dqid(gdqp) == gid);
olddquot2 = xfs_qm_vop_chown(tp, ip, olddquot2 = xfs_qm_vop_chown(tp, ip,
...@@ -778,14 +777,14 @@ xfs_setattr( ...@@ -778,14 +777,14 @@ xfs_setattr(
/* /*
* Change file access or modified times. * Change file access or modified times.
*/ */
if (mask & (AT_ATIME|AT_MTIME)) { if (mask & (XFS_AT_ATIME|XFS_AT_MTIME)) {
if (mask & AT_ATIME) { if (mask & XFS_AT_ATIME) {
ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec; ip->i_d.di_atime.t_sec = vap->va_atime.tv_sec;
ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec; ip->i_d.di_atime.t_nsec = vap->va_atime.tv_nsec;
ip->i_update_core = 1; ip->i_update_core = 1;
timeflags &= ~XFS_ICHGTIME_ACC; timeflags &= ~XFS_ICHGTIME_ACC;
} }
if (mask & AT_MTIME) { if (mask & XFS_AT_MTIME) {
ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec; ip->i_d.di_mtime.t_sec = vap->va_mtime.tv_sec;
ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec; ip->i_d.di_mtime.t_nsec = vap->va_mtime.tv_nsec;
timeflags &= ~XFS_ICHGTIME_MOD; timeflags &= ~XFS_ICHGTIME_MOD;
...@@ -798,15 +797,15 @@ xfs_setattr( ...@@ -798,15 +797,15 @@ xfs_setattr(
/* /*
* Change XFS-added attributes. * Change XFS-added attributes.
*/ */
if (mask & (AT_EXTSIZE|AT_XFLAGS)) { if (mask & (XFS_AT_EXTSIZE|XFS_AT_XFLAGS)) {
if (mask & AT_EXTSIZE) { if (mask & XFS_AT_EXTSIZE) {
/* /*
* Converting bytes to fs blocks. * Converting bytes to fs blocks.
*/ */
ip->i_d.di_extsize = vap->va_extsize >> ip->i_d.di_extsize = vap->va_extsize >>
mp->m_sb.sb_blocklog; mp->m_sb.sb_blocklog;
} }
if (mask & AT_XFLAGS) { if (mask & XFS_AT_XFLAGS) {
ip->i_d.di_flags = 0; ip->i_d.di_flags = 0;
if (vap->va_xflags & XFS_XFLAG_REALTIME) { if (vap->va_xflags & XFS_XFLAG_REALTIME) {
ip->i_d.di_flags |= XFS_DIFLAG_REALTIME; ip->i_d.di_flags |= XFS_DIFLAG_REALTIME;
...@@ -822,11 +821,11 @@ xfs_setattr( ...@@ -822,11 +821,11 @@ xfs_setattr(
} }
/* /*
* Change file inode change time only if AT_CTIME set * Change file inode change time only if XFS_AT_CTIME set
* AND we have been called by a DMI function. * AND we have been called by a DMI function.
*/ */
if ( (flags & ATTR_DMI) && (mask & AT_CTIME) ) { if ( (flags & ATTR_DMI) && (mask & XFS_AT_CTIME) ) {
ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec; ip->i_d.di_ctime.t_sec = vap->va_ctime.tv_sec;
ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec; ip->i_d.di_ctime.t_nsec = vap->va_ctime.tv_nsec;
ip->i_update_core = 1; ip->i_update_core = 1;
...@@ -912,14 +911,13 @@ xfs_setattr( ...@@ -912,14 +911,13 @@ xfs_setattr(
xfs_iunlock(ip, lock_flags); xfs_iunlock(ip, lock_flags);
} }
return code; return code;
} /* xfs_setattr */ }
/* /*
* xfs_access * xfs_access
* Null conversion from vnode mode bits to inode mode bits, as in efs. * Null conversion from vnode mode bits to inode mode bits, as in efs.
*/ */
/*ARGSUSED*/
STATIC int STATIC int
xfs_access( xfs_access(
bhv_desc_t *bdp, bhv_desc_t *bdp,
...@@ -929,7 +927,7 @@ xfs_access( ...@@ -929,7 +927,7 @@ xfs_access(
xfs_inode_t *ip; xfs_inode_t *ip;
int error; int error;
vn_trace_entry(BHV_TO_VNODE(bdp), "xfs_access", vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__,
(inst_t *)__return_address); (inst_t *)__return_address);
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
...@@ -944,7 +942,6 @@ xfs_access( ...@@ -944,7 +942,6 @@ xfs_access(
* xfs_readlink * xfs_readlink
* *
*/ */
/*ARGSUSED*/
STATIC int STATIC int
xfs_readlink( xfs_readlink(
bhv_desc_t *bdp, bhv_desc_t *bdp,
...@@ -966,8 +963,7 @@ xfs_readlink( ...@@ -966,8 +963,7 @@ xfs_readlink(
xfs_buf_t *bp; xfs_buf_t *bp;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(vp, "xfs_readlink", (inst_t *)__return_address);
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
mp = ip->i_mount; mp = ip->i_mount;
...@@ -1047,6 +1043,7 @@ xfs_readlink( ...@@ -1047,6 +1043,7 @@ xfs_readlink(
return error; return error;
} }
/* /*
* xfs_fsync * xfs_fsync
* *
...@@ -1056,7 +1053,6 @@ xfs_readlink( ...@@ -1056,7 +1053,6 @@ xfs_readlink(
* be held while flushing the data, so acquire after we're done * be held while flushing the data, so acquire after we're done
* with that. * with that.
*/ */
/*ARGSUSED*/
STATIC int STATIC int
xfs_fsync( xfs_fsync(
bhv_desc_t *bdp, bhv_desc_t *bdp,
...@@ -1067,16 +1063,13 @@ xfs_fsync( ...@@ -1067,16 +1063,13 @@ xfs_fsync(
{ {
xfs_inode_t *ip; xfs_inode_t *ip;
int error; int error;
/* REFERENCED */
int error2; int error2;
/* REFERENCED */
int syncall; int syncall;
vnode_t *vp; vnode_t *vp;
xfs_trans_t *tp; xfs_trans_t *tp;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(vp, "xfs_fsync", (inst_t *)__return_address);
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
...@@ -1526,6 +1519,7 @@ xfs_inactive_symlink_local( ...@@ -1526,6 +1519,7 @@ xfs_inactive_symlink_local(
xfs_trans_t **tpp) xfs_trans_t **tpp)
{ {
int error; int error;
ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip)); ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip));
/* /*
* We're freeing a symlink which fit into * We're freeing a symlink which fit into
...@@ -1608,7 +1602,6 @@ xfs_inactive_attrs( ...@@ -1608,7 +1602,6 @@ xfs_inactive_attrs(
return (0); return (0);
} }
/*ARGSUSED*/
STATIC int STATIC int
xfs_release( xfs_release(
bhv_desc_t *bdp) bhv_desc_t *bdp)
...@@ -1655,14 +1648,12 @@ xfs_release( ...@@ -1655,14 +1648,12 @@ xfs_release(
* now be truncated. Also, we clear all of the read-ahead state * now be truncated. Also, we clear all of the read-ahead state
* kept for the inode here since the file is now closed. * kept for the inode here since the file is now closed.
*/ */
/*ARGSUSED*/
STATIC int STATIC int
xfs_inactive( xfs_inactive(
bhv_desc_t *bdp, bhv_desc_t *bdp,
cred_t *credp) cred_t *credp)
{ {
xfs_inode_t *ip; xfs_inode_t *ip;
/* REFERENCED */
vnode_t *vp; vnode_t *vp;
xfs_trans_t *tp; xfs_trans_t *tp;
xfs_mount_t *mp; xfs_mount_t *mp;
...@@ -1671,8 +1662,7 @@ xfs_inactive( ...@@ -1671,8 +1662,7 @@ xfs_inactive(
int truncate; int truncate;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(vp, "xfs_inactive", (inst_t *)__return_address);
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
...@@ -1880,15 +1870,14 @@ xfs_inactive( ...@@ -1880,15 +1870,14 @@ xfs_inactive(
/* /*
* xfs_lookup * xfs_lookup
*/ */
/*ARGSUSED*/
STATIC int STATIC int
xfs_lookup( xfs_lookup(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
struct dentry *dentry, vname_t *dentry,
vnode_t **vpp, vnode_t **vpp,
int flags, int flags,
vnode_t *rdir, vnode_t *rdir,
cred_t *credp) cred_t *credp)
{ {
xfs_inode_t *dp, *ip; xfs_inode_t *dp, *ip;
struct vnode *vp; struct vnode *vp;
...@@ -1898,8 +1887,7 @@ xfs_lookup( ...@@ -1898,8 +1887,7 @@ xfs_lookup(
vnode_t *dir_vp; vnode_t *dir_vp;
dir_vp = BHV_TO_VNODE(dir_bdp); dir_vp = BHV_TO_VNODE(dir_bdp);
vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(dir_vp, "xfs_lookup", (inst_t *)__return_address);
dp = XFS_BHVTOI(dir_bdp); dp = XFS_BHVTOI(dir_bdp);
...@@ -1963,13 +1951,13 @@ xfs_ctrunc_trace( ...@@ -1963,13 +1951,13 @@ xfs_ctrunc_trace(
*/ */
STATIC int STATIC int
xfs_create( xfs_create(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
struct dentry *dentry, vname_t *dentry,
vattr_t *vap, vattr_t *vap,
vnode_t **vpp, vnode_t **vpp,
cred_t *credp) cred_t *credp)
{ {
char *name = (char *)dentry->d_name.name; char *name = VNAME(dentry);
vnode_t *dir_vp; vnode_t *dir_vp;
xfs_inode_t *dp, *ip; xfs_inode_t *dp, *ip;
vnode_t *vp=NULL; vnode_t *vp=NULL;
...@@ -1984,19 +1972,19 @@ xfs_create( ...@@ -1984,19 +1972,19 @@ xfs_create(
uint cancel_flags; uint cancel_flags;
int committed; int committed;
xfs_prid_t prid; xfs_prid_t prid;
struct xfs_dquot *udqp, *gdqp; xfs_dquot_t *udqp, *gdqp;
uint resblks; uint resblks;
int dm_di_mode; int dm_di_mode;
int namelen; int namelen;
ASSERT(!*vpp); ASSERT(!*vpp);
dir_vp = BHV_TO_VNODE(dir_bdp); dir_vp = BHV_TO_VNODE(dir_bdp);
dp = XFS_BHVTOI(dir_bdp); vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(dir_vp, "xfs_create", (inst_t *)__return_address); dp = XFS_BHVTOI(dir_bdp);
dm_di_mode = vap->va_mode|VTTOIF(vap->va_type); dm_di_mode = vap->va_mode|VTTOIF(vap->va_type);
namelen = dentry->d_name.len; namelen = VNAMELEN(dentry);
if (namelen >= MAXNAMELEN) if (namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
...@@ -2015,7 +2003,7 @@ xfs_create( ...@@ -2015,7 +2003,7 @@ xfs_create(
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
udqp = gdqp = NULL; udqp = gdqp = NULL;
if (vap->va_mask & AT_PROJID) if (vap->va_mask & XFS_AT_PROJID)
prid = (xfs_prid_t)vap->va_projid; prid = (xfs_prid_t)vap->va_projid;
else else
prid = (xfs_prid_t)dfltprid; prid = (xfs_prid_t)dfltprid;
...@@ -2076,7 +2064,7 @@ xfs_create( ...@@ -2076,7 +2064,7 @@ xfs_create(
if (resblks == 0 && if (resblks == 0 &&
(error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen))) (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen)))
goto error_return; goto error_return;
rdev = (vap->va_mask & AT_RDEV) ? vap->va_rdev : 0; rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
error = xfs_dir_ialloc(&tp, dp, error = xfs_dir_ialloc(&tp, dp,
MAKEIMODE(vap->va_type,vap->va_mode), 1, MAKEIMODE(vap->va_type,vap->va_mode), 1,
rdev, credp, prid, resblks > 0, rdev, credp, prid, resblks > 0,
...@@ -2220,7 +2208,6 @@ xfs_create( ...@@ -2220,7 +2208,6 @@ xfs_create(
} }
#ifdef DEBUG #ifdef DEBUG
/* /*
* Some counters to see if (and how often) we are hitting some deadlock * Some counters to see if (and how often) we are hitting some deadlock
* prevention code paths. * prevention code paths.
...@@ -2261,14 +2248,14 @@ int xfs_rm_attempts; ...@@ -2261,14 +2248,14 @@ int xfs_rm_attempts;
STATIC int STATIC int
xfs_lock_dir_and_entry( xfs_lock_dir_and_entry(
xfs_inode_t *dp, xfs_inode_t *dp,
struct dentry *dentry, vname_t *dentry,
xfs_inode_t *ip, /* inode of entry 'name' */ xfs_inode_t *ip, /* inode of entry 'name' */
int *entry_changed) int *entry_changed)
{ {
int attempts; int attempts;
xfs_ino_t e_inum; xfs_ino_t e_inum;
xfs_inode_t *ips[2]; xfs_inode_t *ips[2];
xfs_log_item_t *lp; xfs_log_item_t *lp;
#ifdef DEBUG #ifdef DEBUG
xfs_rm_locks++; xfs_rm_locks++;
...@@ -2357,12 +2344,13 @@ int xfs_lock_delays; ...@@ -2357,12 +2344,13 @@ int xfs_lock_delays;
* in the log. * in the log.
*/ */
void void
xfs_lock_inodes (xfs_inode_t **ips, xfs_lock_inodes(
int inodes, xfs_inode_t **ips,
int first_locked, int inodes,
uint lock_mode) int first_locked,
uint lock_mode)
{ {
int attempts = 0, i, j, try_lock; int attempts = 0, i, j, try_lock;
xfs_log_item_t *lp; xfs_log_item_t *lp;
ASSERT(ips && (inodes >= 2)); /* we need at least two */ ASSERT(ips && (inodes >= 2)); /* we need at least two */
...@@ -2468,18 +2456,19 @@ int remove_which_error_return = 0; ...@@ -2468,18 +2456,19 @@ int remove_which_error_return = 0;
#define REMOVE_DEBUG_TRACE(x) #define REMOVE_DEBUG_TRACE(x)
#endif /* ! DEBUG */ #endif /* ! DEBUG */
/* /*
* xfs_remove * xfs_remove
* *
*/ */
STATIC int STATIC int
xfs_remove( xfs_remove(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
struct dentry *dentry, vname_t *dentry,
cred_t *credp) cred_t *credp)
{ {
vnode_t *dir_vp; vnode_t *dir_vp;
char *name = (char *) dentry->d_name.name; char *name = VNAME(dentry);
xfs_inode_t *dp, *ip; xfs_inode_t *dp, *ip;
xfs_trans_t *tp = NULL; xfs_trans_t *tp = NULL;
xfs_mount_t *mp; xfs_mount_t *mp;
...@@ -2493,11 +2482,9 @@ xfs_remove( ...@@ -2493,11 +2482,9 @@ xfs_remove(
int link_zero; int link_zero;
uint resblks; uint resblks;
int namelen; int namelen;
/* bhv_desc_t *bdp; */
dir_vp = BHV_TO_VNODE(dir_bdp); dir_vp = BHV_TO_VNODE(dir_bdp);
vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(dir_vp, "xfs_remove", (inst_t *)__return_address);
dp = XFS_BHVTOI(dir_bdp); dp = XFS_BHVTOI(dir_bdp);
mp = dp->i_mount; mp = dp->i_mount;
...@@ -2505,7 +2492,7 @@ xfs_remove( ...@@ -2505,7 +2492,7 @@ xfs_remove(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
namelen = dentry->d_name.len; namelen = VNAMELEN(dentry);
if (namelen >= MAXNAMELEN) if (namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) { if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
...@@ -2540,7 +2527,7 @@ xfs_remove( ...@@ -2540,7 +2527,7 @@ xfs_remove(
dm_di_mode = ip->i_d.di_mode; dm_di_mode = ip->i_d.di_mode;
vn_trace_entry(XFS_ITOV(ip), "xfs_remove", (inst_t *)__return_address); vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
ITRACE(ip); ITRACE(ip);
...@@ -2699,8 +2686,7 @@ xfs_remove( ...@@ -2699,8 +2686,7 @@ xfs_remove(
goto std_return; goto std_return;
} }
vn_trace_exit(XFS_ITOV(ip), "xfs_remove", vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
(inst_t *)__return_address);
/* /*
* Let interposed file systems know about removed links. * Let interposed file systems know about removed links.
...@@ -2752,10 +2738,10 @@ xfs_remove( ...@@ -2752,10 +2738,10 @@ xfs_remove(
*/ */
STATIC int STATIC int
xfs_link( xfs_link(
bhv_desc_t *target_dir_bdp, bhv_desc_t *target_dir_bdp,
vnode_t *src_vp, vnode_t *src_vp,
struct dentry *dentry, vname_t *dentry,
cred_t *credp) cred_t *credp)
{ {
xfs_inode_t *tdp, *sip; xfs_inode_t *tdp, *sip;
xfs_trans_t *tp; xfs_trans_t *tp;
...@@ -2769,18 +2755,18 @@ xfs_link( ...@@ -2769,18 +2755,18 @@ xfs_link(
vnode_t *target_dir_vp; vnode_t *target_dir_vp;
bhv_desc_t *src_bdp; bhv_desc_t *src_bdp;
int resblks; int resblks;
char *target_name = (char *)dentry->d_name.name; char *target_name = VNAME(dentry);
int target_namelen; int target_namelen;
target_dir_vp = BHV_TO_VNODE(target_dir_bdp); target_dir_vp = BHV_TO_VNODE(target_dir_bdp);
vn_trace_entry(target_dir_vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(target_dir_vp, "xfs_link", (inst_t *)__return_address); target_namelen = VNAMELEN(dentry);
target_namelen = dentry->d_name.len;
if (target_namelen >= MAXNAMELEN) if (target_namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
if (src_vp->v_type == VDIR)
vn_trace_entry(src_vp, "xfs_link", (inst_t *)__return_address); return XFS_ERROR(EPERM);
/* /*
* For now, manually find the XFS behavior descriptor for * For now, manually find the XFS behavior descriptor for
...@@ -2925,21 +2911,19 @@ xfs_link( ...@@ -2925,21 +2911,19 @@ xfs_link(
} }
/* /*
* xfs_mkdir * xfs_mkdir
* *
*/ */
STATIC int STATIC int
xfs_mkdir( xfs_mkdir(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
struct dentry *dentry, vname_t *dentry,
vattr_t *vap, vattr_t *vap,
vnode_t **vpp, vnode_t **vpp,
cred_t *credp) cred_t *credp)
{ {
char *dir_name = (char *)dentry->d_name.name; char *dir_name = VNAME(dentry);
xfs_inode_t *dp; xfs_inode_t *dp;
xfs_inode_t *cdp; /* inode of created dir */ xfs_inode_t *cdp; /* inode of created dir */
vnode_t *cvp; /* vnode of created dir */ vnode_t *cvp; /* vnode of created dir */
...@@ -2956,7 +2940,7 @@ xfs_mkdir( ...@@ -2956,7 +2940,7 @@ xfs_mkdir(
boolean_t created = B_FALSE; boolean_t created = B_FALSE;
int dm_event_sent = 0; int dm_event_sent = 0;
xfs_prid_t prid; xfs_prid_t prid;
struct xfs_dquot *udqp, *gdqp; xfs_dquot_t *udqp, *gdqp;
uint resblks; uint resblks;
int dm_di_mode; int dm_di_mode;
int dir_namelen; int dir_namelen;
...@@ -2968,7 +2952,7 @@ xfs_mkdir( ...@@ -2968,7 +2952,7 @@ xfs_mkdir(
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
dir_namelen = dentry->d_name.len; dir_namelen = VNAMELEN(dentry);
if (dir_namelen >= MAXNAMELEN) if (dir_namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
...@@ -2985,11 +2969,11 @@ xfs_mkdir( ...@@ -2985,11 +2969,11 @@ xfs_mkdir(
/* Return through std_return after this point. */ /* Return through std_return after this point. */
vn_trace_entry(dir_vp, "xfs_mkdir", (inst_t *)__return_address); vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
mp = dp->i_mount; mp = dp->i_mount;
udqp = gdqp = NULL; udqp = gdqp = NULL;
if (vap->va_mask & AT_PROJID) if (vap->va_mask & XFS_AT_PROJID)
prid = (xfs_prid_t)vap->va_projid; prid = (xfs_prid_t)vap->va_projid;
else else
prid = (xfs_prid_t)dfltprid; prid = (xfs_prid_t)dfltprid;
...@@ -3049,7 +3033,7 @@ xfs_mkdir( ...@@ -3049,7 +3033,7 @@ xfs_mkdir(
/* /*
* create the directory inode. * create the directory inode.
*/ */
rdev = (vap->va_mask & AT_RDEV) ? vap->va_rdev : 0; rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
error = xfs_dir_ialloc(&tp, dp, error = xfs_dir_ialloc(&tp, dp,
MAKEIMODE(vap->va_type,vap->va_mode), 2, MAKEIMODE(vap->va_type,vap->va_mode), 2,
rdev, credp, prid, resblks > 0, rdev, credp, prid, resblks > 0,
...@@ -3182,16 +3166,15 @@ xfs_mkdir( ...@@ -3182,16 +3166,15 @@ xfs_mkdir(
*/ */
STATIC int STATIC int
xfs_rmdir( xfs_rmdir(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
struct dentry *dentry, vname_t *dentry,
cred_t *credp) cred_t *credp)
{ {
char *name = (char *)dentry->d_name.name; char *name = VNAME(dentry);
xfs_inode_t *dp; xfs_inode_t *dp;
xfs_inode_t *cdp; /* child directory */ xfs_inode_t *cdp; /* child directory */
xfs_trans_t *tp; xfs_trans_t *tp;
xfs_mount_t *mp; xfs_mount_t *mp;
/* bhv_desc_t *bdp;*/
int error; int error;
xfs_bmap_free_t free_list; xfs_bmap_free_t free_list;
xfs_fsblock_t first_block; xfs_fsblock_t first_block;
...@@ -3207,11 +3190,11 @@ xfs_rmdir( ...@@ -3207,11 +3190,11 @@ xfs_rmdir(
dir_vp = BHV_TO_VNODE(dir_bdp); dir_vp = BHV_TO_VNODE(dir_bdp);
dp = XFS_BHVTOI(dir_bdp); dp = XFS_BHVTOI(dir_bdp);
vn_trace_entry(dir_vp, "xfs_rmdir", (inst_t *)__return_address); vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
if (XFS_FORCED_SHUTDOWN(XFS_BHVTOI(dir_bdp)->i_mount)) if (XFS_FORCED_SHUTDOWN(XFS_BHVTOI(dir_bdp)->i_mount))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
namelen = dentry->d_name.len; namelen = VNAMELEN(dentry);
if (namelen >= MAXNAMELEN) if (namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
...@@ -3444,14 +3427,12 @@ xfs_rmdir( ...@@ -3444,14 +3427,12 @@ xfs_rmdir(
} }
/* /*
* xfs_readdir * xfs_readdir
* *
* Read dp's entries starting at uiop->uio_offset and translate them into * Read dp's entries starting at uiop->uio_offset and translate them into
* bufsize bytes worth of struct dirents starting at bufbase. * bufsize bytes worth of struct dirents starting at bufbase.
*/ */
/*ARGSUSED*/
STATIC int STATIC int
xfs_readdir( xfs_readdir(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
...@@ -3459,13 +3440,13 @@ xfs_readdir( ...@@ -3459,13 +3440,13 @@ xfs_readdir(
cred_t *credp, cred_t *credp,
int *eofp) int *eofp)
{ {
xfs_inode_t *dp; xfs_inode_t *dp;
xfs_trans_t *tp = NULL; xfs_trans_t *tp = NULL;
int error = 0; int error = 0;
uint lock_mode; uint lock_mode;
xfs_off_t start_offset; xfs_off_t start_offset;
vn_trace_entry(BHV_TO_VNODE(dir_bdp), "xfs_readdir", vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__,
(inst_t *)__return_address); (inst_t *)__return_address);
dp = XFS_BHVTOI(dir_bdp); dp = XFS_BHVTOI(dir_bdp);
...@@ -3491,18 +3472,19 @@ xfs_readdir( ...@@ -3491,18 +3472,19 @@ xfs_readdir(
return error; return error;
} }
/* /*
* xfs_symlink * xfs_symlink
* *
*/ */
STATIC int STATIC int
xfs_symlink( xfs_symlink(
bhv_desc_t *dir_bdp, bhv_desc_t *dir_bdp,
struct dentry *dentry, vname_t *dentry,
vattr_t *vap, vattr_t *vap,
char *target_path, char *target_path,
vnode_t **vpp, vnode_t **vpp,
cred_t *credp) cred_t *credp)
{ {
xfs_trans_t *tp; xfs_trans_t *tp;
xfs_mount_t *mp; xfs_mount_t *mp;
...@@ -3527,9 +3509,9 @@ xfs_symlink( ...@@ -3527,9 +3509,9 @@ xfs_symlink(
int n; int n;
xfs_buf_t *bp; xfs_buf_t *bp;
xfs_prid_t prid; xfs_prid_t prid;
struct xfs_dquot *udqp, *gdqp; xfs_dquot_t *udqp, *gdqp;
uint resblks; uint resblks;
char *link_name = (char *)dentry->d_name.name; char *link_name = VNAME(dentry);
int link_namelen; int link_namelen;
*vpp = NULL; *vpp = NULL;
...@@ -3540,14 +3522,14 @@ xfs_symlink( ...@@ -3540,14 +3522,14 @@ xfs_symlink(
ip = NULL; ip = NULL;
tp = NULL; tp = NULL;
vn_trace_entry(dir_vp, "xfs_symlink", (inst_t *)__return_address); vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
mp = dp->i_mount; mp = dp->i_mount;
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
link_namelen = dentry->d_name.len; link_namelen = VNAMELEN(dentry);
if (link_namelen >= MAXNAMELEN) if (link_namelen >= MAXNAMELEN)
return XFS_ERROR(ENAMETOOLONG); return XFS_ERROR(ENAMETOOLONG);
/* /*
...@@ -3594,7 +3576,7 @@ xfs_symlink( ...@@ -3594,7 +3576,7 @@ xfs_symlink(
/* Return through std_return after this point. */ /* Return through std_return after this point. */
udqp = gdqp = NULL; udqp = gdqp = NULL;
if (vap->va_mask & AT_PROJID) if (vap->va_mask & XFS_AT_PROJID)
prid = (xfs_prid_t)vap->va_projid; prid = (xfs_prid_t)vap->va_projid;
else else
prid = (xfs_prid_t)dfltprid; prid = (xfs_prid_t)dfltprid;
...@@ -3662,7 +3644,7 @@ xfs_symlink( ...@@ -3662,7 +3644,7 @@ xfs_symlink(
/* /*
* Allocate an inode for the symlink. * Allocate an inode for the symlink.
*/ */
rdev = (vap->va_mask & AT_RDEV) ? vap->va_rdev : 0; rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
error = xfs_dir_ialloc(&tp, dp, IFLNK | (vap->va_mode&~IFMT), error = xfs_dir_ialloc(&tp, dp, IFLNK | (vap->va_mode&~IFMT),
1, rdev, credp, prid, resblks > 0, &ip, NULL); 1, rdev, credp, prid, resblks > 0, &ip, NULL);
...@@ -3839,8 +3821,8 @@ xfs_fid2( ...@@ -3839,8 +3821,8 @@ xfs_fid2(
xfs_inode_t *ip; xfs_inode_t *ip;
xfs_fid2_t *xfid; xfs_fid2_t *xfid;
vn_trace_entry(BHV_TO_VNODE(bdp), "xfs_fid2", vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__,
(inst_t *)__return_address); (inst_t *)__return_address);
ASSERT(sizeof(fid_t) >= sizeof(xfs_fid2_t)); ASSERT(sizeof(fid_t) >= sizeof(xfs_fid2_t));
xfid = (xfs_fid2_t *)fidp; xfid = (xfs_fid2_t *)fidp;
...@@ -3915,8 +3897,9 @@ xfs_rwunlock( ...@@ -3915,8 +3897,9 @@ xfs_rwunlock(
} }
STATIC int STATIC int
xfs_inode_flush(bhv_desc_t *bdp, xfs_inode_flush(
int flags) bhv_desc_t *bdp,
int flags)
{ {
xfs_inode_t *ip; xfs_inode_t *ip;
xfs_dinode_t *dip; xfs_dinode_t *dip;
...@@ -4061,7 +4044,7 @@ xfs_reclaim( ...@@ -4061,7 +4044,7 @@ xfs_reclaim(
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, "xfs_reclaim", (inst_t *)__return_address); vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
ASSERT(!VN_MAPPED(vp)); ASSERT(!VN_MAPPED(vp));
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
...@@ -4249,8 +4232,7 @@ xfs_alloc_file_space( ...@@ -4249,8 +4232,7 @@ xfs_alloc_file_space(
xfs_trans_t *tp; xfs_trans_t *tp;
int xfs_bmapi_flags; int xfs_bmapi_flags;
vn_trace_entry(XFS_ITOV(ip), "xfs_alloc_file_space", vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
(inst_t *)__return_address);
mp = ip->i_mount; mp = ip->i_mount;
if (XFS_FORCED_SHUTDOWN(mp)) if (XFS_FORCED_SHUTDOWN(mp))
...@@ -4408,7 +4390,7 @@ xfs_alloc_file_space( ...@@ -4408,7 +4390,7 @@ xfs_alloc_file_space(
NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */ NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
if (error == 0) if (error == 0)
goto retry; /* Maybe DMAPI app. has made space */ goto retry; /* Maybe DMAPI app. has made space */
/* else fall through with error = xfs_dm_send_data_event result. */ /* else fall through with error from xfs_dm_send_data_event */
} }
return error; return error;
...@@ -4532,8 +4514,7 @@ xfs_free_file_space( ...@@ -4532,8 +4514,7 @@ xfs_free_file_space(
xfs_fileoff_t startoffset_fsb; xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp; xfs_trans_t *tp;
vn_trace_entry(XFS_ITOV(ip), "xfs_free_file_space", vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
(inst_t *)__return_address);
mp = ip->i_mount; mp = ip->i_mount;
if (XFS_IS_QUOTA_ON(mp)) { if (XFS_IS_QUOTA_ON(mp)) {
...@@ -4736,9 +4717,8 @@ xfs_change_file_space( ...@@ -4736,9 +4717,8 @@ xfs_change_file_space(
vnode_t *vp; vnode_t *vp;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
vn_trace_entry(vp, "xfs_change_file_space",
(inst_t *)__return_address);
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
mp = ip->i_mount; mp = ip->i_mount;
...@@ -4824,7 +4804,7 @@ xfs_change_file_space( ...@@ -4824,7 +4804,7 @@ xfs_change_file_space(
break; break;
} }
va.va_mask = AT_SIZE; va.va_mask = XFS_AT_SIZE;
va.va_size = startoffset; va.va_size = startoffset;
error = xfs_setattr(bdp, &va, attr_flags, credp); error = xfs_setattr(bdp, &va, attr_flags, credp);
...@@ -4887,6 +4867,7 @@ xfs_change_file_space( ...@@ -4887,6 +4867,7 @@ xfs_change_file_space(
} }
vnodeops_t xfs_vnodeops = { vnodeops_t xfs_vnodeops = {
BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS),
.vop_open = xfs_open, .vop_open = xfs_open,
.vop_read = xfs_read, .vop_read = xfs_read,
.vop_write = xfs_write, .vop_write = xfs_write,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment