Commit 1ee983bd authored by Nathan Scott's avatar Nathan Scott

Merge sgi.com:/source2/linux-2.6 into sgi.com:/source2/xfs-linux-2.6

parents 4547e81c eb88dec6
......@@ -34,13 +34,29 @@ EXTRA_CFLAGS += -Ifs/xfs -funsigned-char
ifeq ($(CONFIG_XFS_DEBUG),y)
EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG -DXFSDEBUG
EXTRA_CFLAGS += -DPAGEBUF_LOCK_TRACKING
endif
ifeq ($(CONFIG_PAGEBUF_DEBUG),y)
ifeq ($(CONFIG_XFS_TRACE),y)
EXTRA_CFLAGS += -DXFS_ALLOC_TRACE
EXTRA_CFLAGS += -DXFS_ATTR_TRACE
EXTRA_CFLAGS += -DXFS_BLI_TRACE
EXTRA_CFLAGS += -DXFS_BMAP_TRACE
EXTRA_CFLAGS += -DXFS_BMBT_TRACE
EXTRA_CFLAGS += -DXFS_DIR_TRACE
EXTRA_CFLAGS += -DXFS_DIR2_TRACE
EXTRA_CFLAGS += -DXFS_DQUOT_TRACE
EXTRA_CFLAGS += -DXFS_ILOCK_TRACE
EXTRA_CFLAGS += -DXFS_LOG_TRACE
EXTRA_CFLAGS += -DXFS_RW_TRACE
EXTRA_CFLAGS += -DPAGEBUF_TRACE
# EXTRA_CFLAGS += -DXFS_VNODE_TRACE
endif
obj-$(CONFIG_XFS_FS) += xfs.o
ifneq ($(CONFIG_XFS_DMAPI),y)
xfs-y += xfs_dmops.o
endif
xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
xfs_dquot.o \
......@@ -49,9 +65,10 @@ xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
xfs_qm_syscalls.o \
xfs_qm_bhv.o \
xfs_qm.o)
ifeq ($(CONFIG_XFS_QUOTA),y)
xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
else
xfs-y += xfs_qmops.o
endif
xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
......@@ -79,7 +96,6 @@ xfs-y += xfs_alloc.o \
xfs_dir2_leaf.o \
xfs_dir2_node.o \
xfs_dir2_sf.o \
xfs_dir2_trace.o \
xfs_dir_leaf.o \
xfs_error.o \
xfs_extfree_item.o \
......@@ -108,10 +124,10 @@ xfs-y += xfs_alloc.o \
xfs_vnodeops.o \
xfs_rw.o
xfs-$(CONFIG_XFS_TRACE) += xfs_dir2_trace.o
# Objects in pagebuf/
xfs-y += $(addprefix pagebuf/, \
page_buf.o \
page_buf_locking.o)
xfs-y += pagebuf/page_buf.o
# Objects in linux/
xfs-y += $(addprefix linux/, \
......@@ -131,27 +147,10 @@ xfs-y += $(addprefix linux/, \
# Objects in support/
xfs-y += $(addprefix support/, \
debug.o \
ktrace.o \
move.o \
mrlock.o \
qsort.o \
uuid.o)
# Quota and DMAPI stubs
xfs-y += xfs_dmops.o \
xfs_qmops.o
# If both xfs and kdb modules are built in then xfsidbg is built in. If xfs is
# a module and kdb modules are being compiled then xfsidbg must be a module, to
# follow xfs. If xfs is built in then xfsidbg tracks the kdb module state.
# This must come after the main xfs code so xfs initialises before xfsidbg.
# KAO
ifneq ($(CONFIG_KDB_MODULES),)
ifeq ($(CONFIG_XFS_FS),y)
obj-$(CONFIG_KDB_MODULES) += xfsidbg.o
else
obj-$(CONFIG_XFS_FS) += xfsidbg.o
endif
endif
xfs-$(CONFIG_XFS_TRACE) += support/ktrace.o
CFLAGS_xfsidbg.o += -Iarch/$(ARCH)/kdb
This diff is collapsed.
......@@ -36,9 +36,57 @@
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_buf.h"
#include "xfs_inum.h"
#include "xfs_log.h"
#include "xfs_clnt.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_imap.h"
#include "xfs_alloc.h"
#include "xfs_dmapi.h"
#include "xfs_quota.h"
#include "xfs_mount.h"
#include "xfs_alloc_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_attr_sf.h"
#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bit.h"
#include "xfs_rtalloc.h"
#include "xfs_error.h"
#include "xfs_itable.h"
#include "xfs_rw.h"
#include "xfs_da_btree.h"
#include "xfs_dir_leaf.h"
#include "xfs_dir2_data.h"
#include "xfs_dir2_leaf.h"
#include "xfs_dir2_block.h"
#include "xfs_dir2_node.h"
#include "xfs_dir2_trace.h"
#include "xfs_acl.h"
#include "xfs_cap.h"
#include "xfs_mac.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_inode_item.h"
#include "xfs_buf_item.h"
#include "xfs_extfree_item.h"
#include "xfs_log_priv.h"
#include "xfs_trans_priv.h"
#include "xfs_trans_space.h"
#include "xfs_utils.h"
/*
* System memory size - used to scale certain data structures in XFS.
......@@ -69,10 +117,34 @@ xfs_param_t xfs_params = {
*/
cred_t sys_cred_val, *sys_cred = &sys_cred_val;
/* Export XFS symbols used by xfsidbg */
/*
* Export symbols used for XFS debugging
*/
EXPORT_SYMBOL(xfs_next_bit);
EXPORT_SYMBOL(xfs_contig_bits);
EXPORT_SYMBOL(xfs_bmbt_get_all);
#if ARCH_CONVERT != ARCH_NOCONVERT
EXPORT_SYMBOL(xfs_bmbt_disk_get_all);
#endif
/*
* Export symbols used for XFS tracing
*/
#ifdef XFS_ALLOC_TRACE
EXPORT_SYMBOL(xfs_alloc_trace_buf);
#endif
#ifdef XFS_BMAP_TRACE
EXPORT_SYMBOL(xfs_bmap_trace_buf);
#endif
#ifdef XFS_BMBT_TRACE
EXPORT_SYMBOL(xfs_bmbt_trace_buf);
#endif
#ifdef XFS_ATTR_TRACE
EXPORT_SYMBOL(xfs_attr_trace_buf);
#endif
#ifdef XFS_DIR2_TRACE
EXPORT_SYMBOL(xfs_dir2_trace_buf);
#endif
#ifdef XFS_DIR_TRACE
EXPORT_SYMBOL(xfs_dir_trace_buf);
#endif
......@@ -943,7 +943,7 @@ xfs_ioc_bulkstat(
bulkreq.ubuffer, &done);
} else {
error = xfs_bulkstat(mp, NULL, &inlast, &count,
(bulkstat_one_pf)xfs_bulkstat_one,
(bulkstat_one_pf)xfs_bulkstat_one, NULL,
sizeof(xfs_bstat_t), bulkreq.ubuffer,
BULKSTAT_FG_QUICK, &done);
}
......
......@@ -67,6 +67,7 @@
#include "xfs_buf_item.h"
#include "xfs_trans_space.h"
#include "xfs_utils.h"
#include "xfs_iomap.h"
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
......@@ -74,14 +75,14 @@
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
STATIC int
_xfs_imap_to_bmap(
xfs_imap_to_bmap(
xfs_iocore_t *io,
xfs_off_t offset,
int new,
xfs_bmbt_irec_t *imap,
page_buf_bmap_t *pbmapp,
xfs_iomap_t *iomapp,
int imaps, /* Number of imap entries */
int pbmaps) /* Number of pbmap entries */
int iomaps, /* Number of iomap entries */
int flags)
{
xfs_mount_t *mp;
xfs_fsize_t nisize;
......@@ -93,35 +94,32 @@ _xfs_imap_to_bmap(
if (io->io_new_size > nisize)
nisize = io->io_new_size;
for (pbm = 0; imaps && pbm < pbmaps; imaps--, pbmapp++, imap++, pbm++) {
pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ?
for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) {
iomapp->iomap_target = io->io_flags & XFS_IOCORE_RT ?
mp->m_rtdev_targp : mp->m_ddev_targp;
pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
pbmapp->pbm_delta = offset - pbmapp->pbm_offset;
pbmapp->pbm_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
pbmapp->pbm_flags = 0;
iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
iomapp->iomap_delta = offset - iomapp->iomap_offset;
iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount);
iomapp->iomap_flags = flags;
start_block = imap->br_startblock;
if (start_block == HOLESTARTBLOCK) {
pbmapp->pbm_bn = PAGE_BUF_DADDR_NULL;
pbmapp->pbm_flags = PBMF_HOLE;
iomapp->iomap_bn = IOMAP_DADDR_NULL;
iomapp->iomap_flags = IOMAP_HOLE;
} else if (start_block == DELAYSTARTBLOCK) {
pbmapp->pbm_bn = PAGE_BUF_DADDR_NULL;
pbmapp->pbm_flags = PBMF_DELAY;
iomapp->iomap_bn = IOMAP_DADDR_NULL;
iomapp->iomap_flags = IOMAP_DELAY;
} else {
pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block);
iomapp->iomap_bn = XFS_FSB_TO_DB_IO(io, start_block);
if (ISUNWRITTEN(imap))
pbmapp->pbm_flags |= PBMF_UNWRITTEN;
iomapp->iomap_flags |= IOMAP_UNWRITTEN;
}
if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) {
pbmapp->pbm_flags |= PBMF_EOF;
if ((iomapp->iomap_offset + iomapp->iomap_bsize) >= nisize) {
iomapp->iomap_flags |= IOMAP_EOF;
}
if (new)
pbmapp->pbm_flags |= PBMF_NEW;
offset += pbmapp->pbm_bsize - pbmapp->pbm_delta;
offset += iomapp->iomap_bsize - iomapp->iomap_delta;
}
return pbm; /* Return the number filled */
}
......@@ -132,54 +130,54 @@ xfs_iomap(
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
xfs_iomap_t *iomapp,
int *niomaps)
{
xfs_mount_t *mp = io->io_mount;
xfs_fileoff_t offset_fsb, end_fsb;
int error = 0;
int new = 0;
int lockmode = 0;
xfs_bmbt_irec_t imap;
int nimaps = 1;
int bmap_flags = 0;
int bmapi_flags = 0;
int iomap_flags = 0;
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
return XFS_ERROR(EIO);
switch (flags &
(BMAP_READ | BMAP_WRITE | BMAP_ALLOCATE |
BMAP_UNWRITTEN | BMAP_DEVICE)) {
case BMAP_READ:
(BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE |
BMAPI_UNWRITTEN | BMAPI_DEVICE)) {
case BMAPI_READ:
lockmode = XFS_LCK_MAP_SHARED(mp, io);
bmap_flags = XFS_BMAPI_ENTIRE;
if (flags & BMAP_IGNSTATE)
bmap_flags |= XFS_BMAPI_IGSTATE;
bmapi_flags = XFS_BMAPI_ENTIRE;
if (flags & BMAPI_IGNSTATE)
bmapi_flags |= XFS_BMAPI_IGSTATE;
break;
case BMAP_WRITE:
case BMAPI_WRITE:
lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
bmap_flags = 0;
bmapi_flags = 0;
XFS_ILOCK(mp, io, lockmode);
break;
case BMAP_ALLOCATE:
case BMAPI_ALLOCATE:
lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD;
bmap_flags = XFS_BMAPI_ENTIRE;
bmapi_flags = XFS_BMAPI_ENTIRE;
/* Attempt non-blocking lock */
if (flags & BMAP_TRYLOCK) {
if (flags & BMAPI_TRYLOCK) {
if (!XFS_ILOCK_NOWAIT(mp, io, lockmode))
return XFS_ERROR(EAGAIN);
} else {
XFS_ILOCK(mp, io, lockmode);
}
break;
case BMAP_UNWRITTEN:
case BMAPI_UNWRITTEN:
goto phase2;
case BMAP_DEVICE:
case BMAPI_DEVICE:
lockmode = XFS_LCK_MAP_SHARED(mp, io);
pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ?
iomapp->iomap_target = io->io_flags & XFS_IOCORE_RT ?
mp->m_rtdev_targp : mp->m_ddev_targp;
error = 0;
*npbmaps = 1;
*niomaps = 1;
goto out;
default:
BUG();
......@@ -192,30 +190,30 @@ xfs_iomap(
offset_fsb = XFS_B_TO_FSBT(mp, offset);
error = XFS_BMAPI(mp, NULL, io, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb) ,
bmap_flags, NULL, 0, &imap,
(xfs_filblks_t)(end_fsb - offset_fsb),
bmapi_flags, NULL, 0, &imap,
&nimaps, NULL);
if (error)
goto out;
phase2:
switch (flags & (BMAP_WRITE|BMAP_ALLOCATE|BMAP_UNWRITTEN)) {
case BMAP_WRITE:
switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE|BMAPI_UNWRITTEN)) {
case BMAPI_WRITE:
/* If we found an extent, return it */
if (nimaps && (imap.br_startblock != HOLESTARTBLOCK))
break;
if (flags & (BMAP_DIRECT|BMAP_MMAP)) {
if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) {
error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset,
count, flags, &imap, &nimaps, nimaps);
} else {
error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count,
flags, &imap, &nimaps);
}
new = 1;
iomap_flags = IOMAP_NEW;
break;
case BMAP_ALLOCATE:
case BMAPI_ALLOCATE:
/* If we found an extent, return it */
XFS_IUNLOCK(mp, io, lockmode);
lockmode = 0;
......@@ -225,7 +223,7 @@ xfs_iomap(
error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps);
break;
case BMAP_UNWRITTEN:
case BMAPI_UNWRITTEN:
lockmode = 0;
error = XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count);
nimaps = 0;
......@@ -233,10 +231,10 @@ xfs_iomap(
}
if (nimaps) {
*npbmaps = _xfs_imap_to_bmap(io, offset, new, &imap,
pbmapp, nimaps, *npbmaps);
} else if (npbmaps) {
*npbmaps = 0;
*niomaps = xfs_imap_to_bmap(io, offset, &imap,
iomapp, nimaps, *niomaps, iomap_flags);
} else if (niomaps) {
*niomaps = 0;
}
out:
......@@ -251,29 +249,25 @@ xfs_flush_space(
int *fsynced,
int *ioflags)
{
vnode_t *vp = XFS_ITOV(ip);
switch (*fsynced) {
case 0:
if (ip->i_delayed_blks) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
xfs_flush_inode(ip);
xfs_ilock(ip, XFS_ILOCK_EXCL);
*fsynced = 1;
} else {
*ioflags |= BMAP_SYNC;
*ioflags |= BMAPI_SYNC;
*fsynced = 2;
}
return 0;
case 1:
*fsynced = 2;
*ioflags |= BMAP_SYNC;
*ioflags |= BMAPI_SYNC;
return 0;
case 2:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE|XFS_LOG_SYNC);
xfs_flush_device(ip);
xfs_ilock(ip, XFS_ILOCK_EXCL);
*fsynced = 3;
return 0;
......@@ -400,7 +394,7 @@ xfs_iomap_write_direct(
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
if (!(flags & BMAP_MMAP) && (offset < ip->i_d.di_size || rt))
if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt))
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
......@@ -499,7 +493,7 @@ xfs_iomap_write_delay(
* We don't bother with this for sync writes, because we need
* to minimize the amount we write for good performance.
*/
if (!(ioflag & BMAP_SYNC) && ((offset + count) > ip->i_d.di_size)) {
if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) {
xfs_off_t aligned_offset;
unsigned int iosize;
xfs_fileoff_t ioalign;
......
......@@ -110,7 +110,7 @@ linvfs_mknod(
vattr_t va;
vnode_t *vp = NULL, *dvp = LINVFS_GET_VP(dir);
xfs_acl_t *default_acl = NULL;
xattr_exists_t test_default_acl = _ACL_DEFAULT_EXISTS;
attrexists_t test_default_acl = _ACL_DEFAULT_EXISTS;
int error;
/*
......@@ -552,61 +552,6 @@ linvfs_truncate(
block_truncate_page(inode->i_mapping, inode->i_size, linvfs_get_block);
}
/*
* Extended attributes interfaces
*/
#define SYSTEM_NAME "system." /* VFS shared names/values */
#define ROOT_NAME "trusted." /* root's own names/values */
#define USER_NAME "user." /* user's own names/values */
STATIC xattr_namespace_t xfs_namespace_array[] = {
{ .name= SYSTEM_NAME, .namelen= sizeof(SYSTEM_NAME)-1,.exists= NULL },
{ .name= ROOT_NAME, .namelen= sizeof(ROOT_NAME)-1, .exists= NULL },
{ .name= USER_NAME, .namelen= sizeof(USER_NAME)-1, .exists= NULL },
{ .name= NULL }
};
xattr_namespace_t *xfs_namespaces = &xfs_namespace_array[0];
#define POSIXACL_ACCESS "posix_acl_access"
#define POSIXACL_ACCESS_SIZE (sizeof(POSIXACL_ACCESS)-1)
#define POSIXACL_DEFAULT "posix_acl_default"
#define POSIXACL_DEFAULT_SIZE (sizeof(POSIXACL_DEFAULT)-1)
#define POSIXCAP "posix_capabilities"
#define POSIXCAP_SIZE (sizeof(POSIXCAP)-1)
#define POSIXMAC "posix_mac"
#define POSIXMAC_SIZE (sizeof(POSIXMAC)-1)
STATIC xattr_namespace_t sys_namespace_array[] = {
{ .name= POSIXACL_ACCESS,
.namelen= POSIXACL_ACCESS_SIZE, .exists= _ACL_ACCESS_EXISTS },
{ .name= POSIXACL_DEFAULT,
.namelen= POSIXACL_DEFAULT_SIZE, .exists= _ACL_DEFAULT_EXISTS },
{ .name= POSIXCAP,
.namelen= POSIXCAP_SIZE, .exists= _CAP_EXISTS },
{ .name= POSIXMAC,
.namelen= POSIXMAC_SIZE, .exists= _MAC_EXISTS },
{ .name= NULL }
};
/*
* Some checks to prevent people abusing EAs to get over quota:
* - Don't allow modifying user EAs on devices/symlinks;
* - Don't allow modifying user EAs if sticky bit set;
*/
STATIC int
capable_user_xattr(
struct inode *inode)
{
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
!capable(CAP_SYS_ADMIN))
return 0;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
(current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
return 0;
return 1;
}
STATIC int
linvfs_setxattr(
struct dentry *dentry,
......@@ -615,59 +560,27 @@ linvfs_setxattr(
size_t size,
int flags)
{
struct inode *inode = dentry->d_inode;
vnode_t *vp = LINVFS_GET_VP(inode);
char *p = (char *)name;
vnode_t *vp = LINVFS_GET_VP(dentry->d_inode);
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
int error;
if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name,
xfs_namespaces[SYSTEM_NAMES].namelen) == 0) {
error = -EINVAL;
if (flags & XATTR_CREATE)
return error;
error = -EOPNOTSUPP;
p += xfs_namespaces[SYSTEM_NAMES].namelen;
if (strcmp(p, POSIXACL_ACCESS) == 0)
error = xfs_acl_vset(vp, (void *) data, size,
_ACL_TYPE_ACCESS);
else if (strcmp(p, POSIXACL_DEFAULT) == 0)
error = xfs_acl_vset(vp, (void *) data, size,
_ACL_TYPE_DEFAULT);
else if (strcmp(p, POSIXCAP) == 0)
error = xfs_cap_vset(vp, (void *) data, size);
if (!error)
error = vn_revalidate(vp);
namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
if (!namesp)
return -EOPNOTSUPP;
attr += namesp->attr_namelen;
error = namesp->attr_capable(vp, NULL);
if (error)
return error;
}
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
/* Convert Linux syscall to XFS internal ATTR flags */
if (flags & XATTR_CREATE)
xflags |= ATTR_CREATE;
if (flags & XATTR_REPLACE)
xflags |= ATTR_REPLACE;
if (strncmp(name, xfs_namespaces[ROOT_NAMES].name,
xfs_namespaces[ROOT_NAMES].namelen) == 0) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
xflags |= ATTR_ROOT;
p += xfs_namespaces[ROOT_NAMES].namelen;
VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error);
return -error;
}
if (strncmp(name, xfs_namespaces[USER_NAMES].name,
xfs_namespaces[USER_NAMES].namelen) == 0) {
if (!capable_user_xattr(inode))
return -EPERM;
p += xfs_namespaces[USER_NAMES].namelen;
VOP_ATTR_SET(vp, p, (void *) data, size, xflags, NULL, error);
return -error;
}
return -EOPNOTSUPP;
xflags |= namesp->attr_flag;
return namesp->attr_set(vp, attr, (void *)data, size, xflags);
}
STATIC ssize_t
......@@ -677,53 +590,27 @@ linvfs_getxattr(
void *data,
size_t size)
{
struct inode *inode = dentry->d_inode;
vnode_t *vp = LINVFS_GET_VP(inode);
char *p = (char *)name;
vnode_t *vp = LINVFS_GET_VP(dentry->d_inode);
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
ssize_t error;
if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name,
xfs_namespaces[SYSTEM_NAMES].namelen) == 0) {
error = -EOPNOTSUPP;
p += xfs_namespaces[SYSTEM_NAMES].namelen;
if (strcmp(p, POSIXACL_ACCESS) == 0)
error = xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS);
else if (strcmp(p, POSIXACL_DEFAULT) == 0)
error = xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT);
else if (strcmp(p, POSIXCAP) == 0)
error = xfs_cap_vget(vp, data, size);
namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
if (!namesp)
return -EOPNOTSUPP;
attr += namesp->attr_namelen;
error = namesp->attr_capable(vp, NULL);
if (error)
return error;
}
/* Convert Linux syscall to XFS internal ATTR flags */
if (!size) {
xflags |= ATTR_KERNOVAL;
data = NULL;
}
if (strncmp(name, xfs_namespaces[ROOT_NAMES].name,
xfs_namespaces[ROOT_NAMES].namelen) == 0) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
xflags |= ATTR_ROOT;
p += xfs_namespaces[ROOT_NAMES].namelen;
VOP_ATTR_GET(vp, p, data, (int *)&size, xflags, NULL, error);
if (!error)
error = -size;
return -error;
}
if (strncmp(name, xfs_namespaces[USER_NAMES].name,
xfs_namespaces[USER_NAMES].namelen) == 0) {
p += xfs_namespaces[USER_NAMES].namelen;
if (!capable_user_xattr(inode))
return -EPERM;
VOP_ATTR_GET(vp, p, data, (int *)&size, xflags, NULL, error);
if (!error)
error = -size;
return -error;
}
return -EOPNOTSUPP;
xflags |= namesp->attr_flag;
return namesp->attr_get(vp, attr, (void *)data, size, xflags);
}
STATIC ssize_t
......@@ -732,40 +619,18 @@ linvfs_listxattr(
char *data,
size_t size)
{
attrlist_cursor_kern_t cursor;
xattr_namespace_t *sys;
vnode_t *vp = LINVFS_GET_VP(dentry->d_inode);
char *k = data;
int xflags = ATTR_KERNAMELS;
int result = 0;
ssize_t error;
int error, xflags = ATTR_KERNAMELS;
ssize_t result;
if (!size)
xflags |= ATTR_KERNOVAL;
if (capable(CAP_SYS_ADMIN))
xflags |= ATTR_KERNFULLS;
memset(&cursor, 0, sizeof(cursor));
VOP_ATTR_LIST(vp, data, size, xflags, &cursor, NULL, error);
if (error > 0)
return -error;
result += -error;
k += result; /* advance start of our buffer */
for (sys = &sys_namespace_array[0]; sys->name != NULL; sys++) {
if (sys->exists == NULL || !sys->exists(vp))
continue;
result += xfs_namespaces[SYSTEM_NAMES].namelen;
result += sys->namelen + 1;
if (size) {
if (result > size)
return -ERANGE;
strcpy(k, xfs_namespaces[SYSTEM_NAMES].name);
k += xfs_namespaces[SYSTEM_NAMES].namelen;
strcpy(k, sys->name);
k += sys->namelen + 1;
}
}
error = attr_generic_list(vp, data, size, xflags, &result);
if (error < 0)
return error;
return result;
}
......@@ -774,51 +639,25 @@ linvfs_removexattr(
struct dentry *dentry,
const char *name)
{
struct inode *inode = dentry->d_inode;
vnode_t *vp = LINVFS_GET_VP(inode);
char *p = (char *)name;
vnode_t *vp = LINVFS_GET_VP(dentry->d_inode);
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
int error;
if (strncmp(name, xfs_namespaces[SYSTEM_NAMES].name,
xfs_namespaces[SYSTEM_NAMES].namelen) == 0) {
error = -EOPNOTSUPP;
p += xfs_namespaces[SYSTEM_NAMES].namelen;
if (strcmp(p, POSIXACL_ACCESS) == 0)
error = xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
else if (strcmp(p, POSIXACL_DEFAULT) == 0)
error = xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT);
else if (strcmp(p, POSIXCAP) == 0)
error = xfs_cap_vremove(vp);
namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
if (!namesp)
return -EOPNOTSUPP;
attr += namesp->attr_namelen;
error = namesp->attr_capable(vp, NULL);
if (error)
return error;
}
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
if (strncmp(name, xfs_namespaces[ROOT_NAMES].name,
xfs_namespaces[ROOT_NAMES].namelen) == 0) {
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
xflags |= ATTR_ROOT;
p += xfs_namespaces[ROOT_NAMES].namelen;
VOP_ATTR_REMOVE(vp, p, xflags, NULL, error);
return -error;
}
if (strncmp(name, xfs_namespaces[USER_NAMES].name,
xfs_namespaces[USER_NAMES].namelen) == 0) {
p += xfs_namespaces[USER_NAMES].namelen;
if (!capable_user_xattr(inode))
return -EPERM;
VOP_ATTR_REMOVE(vp, p, xflags, NULL, error);
return -error;
}
return -EOPNOTSUPP;
xflags |= namesp->attr_flag;
return namesp->attr_remove(vp, attr, xflags);
}
struct inode_operations linvfs_file_inode_operations =
{
struct inode_operations linvfs_file_inode_operations = {
.permission = linvfs_permission,
.truncate = linvfs_truncate,
.getattr = linvfs_getattr,
......@@ -829,8 +668,7 @@ struct inode_operations linvfs_file_inode_operations =
.removexattr = linvfs_removexattr,
};
struct inode_operations linvfs_dir_inode_operations =
{
struct inode_operations linvfs_dir_inode_operations = {
.create = linvfs_create,
.lookup = linvfs_lookup,
.link = linvfs_link,
......@@ -849,8 +687,7 @@ struct inode_operations linvfs_dir_inode_operations =
.removexattr = linvfs_removexattr,
};
struct inode_operations linvfs_symlink_inode_operations =
{
struct inode_operations linvfs_symlink_inode_operations = {
.readlink = linvfs_readlink,
.follow_link = linvfs_follow_link,
.permission = linvfs_permission,
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -32,30 +32,6 @@
#ifndef __XFS_IOPS_H__
#define __XFS_IOPS_H__
/*
* Extended system attributes.
* So far only POSIX ACLs are supported, but this will need to
* grow in time (capabilities, mandatory access control, etc).
*/
#define XFS_SYSTEM_NAMESPACE SYSTEM_POSIXACL
/*
* Define a table of the namespaces XFS supports
*/
typedef int (*xattr_exists_t)(vnode_t *);
typedef struct xattr_namespace {
char *name;
unsigned int namelen;
xattr_exists_t exists;
} xattr_namespace_t;
#define SYSTEM_NAMES 0
#define ROOT_NAMES 1
#define USER_NAMES 2
extern struct xattr_namespace *xfs_namespaces;
extern struct inode_operations linvfs_file_inode_operations;
extern struct inode_operations linvfs_dir_inode_operations;
extern struct inode_operations linvfs_symlink_inode_operations;
......@@ -69,4 +45,7 @@ extern struct address_space_operations linvfs_aops;
extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern void linvfs_unwritten_done(struct buffer_head *, int);
extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
int, unsigned int, unsigned long);
#endif /* __XFS_IOPS_H__ */
......@@ -69,9 +69,11 @@
#include <pagebuf/page_buf.h>
#ifndef STATIC
#define STATIC static
#endif
/*
* Feature macros (disable/enable)
*/
#undef HAVE_REFCACHE /* reference cache not needed for NFS in 2.6 */
#define HAVE_SENDFILE /* sendfile(2) exists in 2.6, but not in 2.4 */
/*
* State flag for unwritten extent buffers.
......@@ -100,6 +102,11 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_inherit_nodump xfs_params.inherit_nodump.val
#define xfs_inherit_noatime xfs_params.inherit_noatim.val
#define current_cpu() smp_processor_id()
#define current_pid() (current->pid)
#define current_fsuid(cred) (current->fsuid)
#define current_fsgid(cred) (current->fsgid)
#define NBPP PAGE_SIZE
#define DPPSHFT (PAGE_SHIFT - 9)
#define NDPP (1 << (PAGE_SHIFT - 9))
......@@ -200,6 +207,11 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define howmany(x, y) (((x)+((y)-1))/(y))
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
static inline void xfs_stack_trace(void)
{
dump_stack();
}
/* Move the kernel do_div definition off to one side */
#if defined __i386__
......
......@@ -71,10 +71,76 @@
#include "xfs_inode_item.h"
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_iomap.h"
#include <linux/capability.h>
#if defined(XFS_RW_TRACE)
void
xfs_rw_enter_trace(
int tag,
xfs_iocore_t *io,
const struct iovec *iovp,
size_t segs,
loff_t offset,
int ioflags)
{
xfs_inode_t *ip = XFS_IO_INODE(io);
if (ip->i_rwtrace == NULL)
return;
ktrace_enter(ip->i_rwtrace,
(void *)(unsigned long)tag,
(void *)ip,
(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
(void *)(__psint_t)iovp,
(void *)((unsigned long)segs),
(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
(void *)((unsigned long)(offset & 0xffffffff)),
(void *)((unsigned long)ioflags),
(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
(void *)((unsigned long)(io->io_new_size & 0xffffffff)),
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL);
}
void
xfs_inval_cached_trace(
xfs_iocore_t *io,
xfs_off_t offset,
xfs_off_t len,
xfs_off_t first,
xfs_off_t last)
{
xfs_inode_t *ip = XFS_IO_INODE(io);
if (ip->i_rwtrace == NULL)
return;
ktrace_enter(ip->i_rwtrace,
(void *)(__psint_t)XFS_INVAL_CACHED,
(void *)ip,
(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
(void *)((unsigned long)(offset & 0xffffffff)),
(void *)((unsigned long)((len >> 32) & 0xffffffff)),
(void *)((unsigned long)(len & 0xffffffff)),
(void *)((unsigned long)((first >> 32) & 0xffffffff)),
(void *)((unsigned long)(first & 0xffffffff)),
(void *)((unsigned long)((last >> 32) & 0xffffffff)),
(void *)((unsigned long)(last & 0xffffffff)),
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL);
}
#endif
/*
* xfs_iozero
*
......@@ -142,6 +208,59 @@ xfs_iozero(
return (-status);
}
/*
* xfs_inval_cached_pages
*
* This routine is responsible for keeping direct I/O and buffered I/O
* somewhat coherent. From here we make sure that we're at least
* temporarily holding the inode I/O lock exclusively and then call
* the page cache to flush and invalidate any cached pages. If there
* are no cached pages this routine will be very quick.
*/
void
xfs_inval_cached_pages(
vnode_t *vp,
xfs_iocore_t *io,
xfs_off_t offset,
int write,
int relock)
{
xfs_mount_t *mp;
if (!VN_CACHED(vp)) {
return;
}
mp = io->io_mount;
/*
* We need to get the I/O lock exclusively in order
* to safely invalidate pages and mappings.
*/
if (relock) {
XFS_IUNLOCK(mp, io, XFS_IOLOCK_SHARED);
XFS_ILOCK(mp, io, XFS_IOLOCK_EXCL);
}
/* Writing beyond EOF creates a hole that must be zeroed */
if (write && (offset > XFS_SIZE(mp, io))) {
xfs_fsize_t isize;
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
isize = XFS_SIZE(mp, io);
if (offset > isize) {
xfs_zero_eof(vp, io, offset, isize, offset);
}
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
}
xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1);
VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED);
if (relock) {
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
}
}
ssize_t /* bytes read, or (-) error */
xfs_read(
bhv_desc_t *bdp,
......@@ -684,9 +803,13 @@ xfs_write(
retry:
if (ioflags & IO_ISDIRECT) {
xfs_inval_cached_pages(vp, &xip->i_iocore, *offset, 1, 1);
xfs_inval_cached_pages(vp, io, *offset, 1, 1);
xfs_rw_enter_trace(XFS_DIOWR_ENTER,
io, iovp, segs, *offset, ioflags);
} else {
xfs_rw_enter_trace(XFS_WRITE_ENTER,
io, iovp, segs, *offset, ioflags);
}
ret = generic_file_aio_write_nolock(iocb, iovp, segs, offset);
if ((ret == -ENOSPC) &&
......@@ -702,7 +825,6 @@ xfs_write(
xfs_rwlock(bdp, locktype);
*offset = xip->i_d.di_size;
goto retry;
}
if (*offset > xip->i_d.di_size) {
......@@ -846,8 +968,8 @@ xfs_bmap(bhv_desc_t *bdp,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
xfs_iomap_t *iomapp,
int *niomaps)
{
xfs_inode_t *ip = XFS_BHVTOI(bdp);
xfs_iocore_t *io = &ip->i_iocore;
......@@ -856,7 +978,7 @@ xfs_bmap(bhv_desc_t *bdp,
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
return xfs_iomap(io, offset, count, flags, pbmapp, npbmaps);
return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
}
/*
......
......@@ -39,21 +39,59 @@ struct xfs_iocore;
struct xfs_inode;
struct xfs_bmbt_irec;
struct page_buf_s;
struct page_buf_bmap_s;
struct xfs_iomap;
#if defined(XFS_RW_TRACE)
/*
* Defines for the trace mechanisms in xfs_lrw.c.
*/
#define XFS_RW_KTRACE_SIZE 64
#define XFS_STRAT_KTRACE_SIZE 64
#define XFS_STRAT_GTRACE_SIZE 512
#define XFS_READ_ENTER 1
#define XFS_WRITE_ENTER 2
#define XFS_IOMAP_READ_ENTER 3
#define XFS_IOMAP_WRITE_ENTER 4
#define XFS_IOMAP_READ_MAP 5
#define XFS_IOMAP_WRITE_MAP 6
#define XFS_IOMAP_WRITE_NOSPACE 7
#define XFS_ITRUNC_START 8
#define XFS_ITRUNC_FINISH1 9
#define XFS_ITRUNC_FINISH2 10
#define XFS_CTRUNC1 11
#define XFS_CTRUNC2 12
#define XFS_CTRUNC3 13
#define XFS_CTRUNC4 14
#define XFS_CTRUNC5 15
#define XFS_CTRUNC6 16
#define XFS_BUNMAPI 17
#define XFS_INVAL_CACHED 18
#define XFS_DIORD_ENTER 19
#define XFS_DIOWR_ENTER 20
extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
const struct iovec *, size_t, loff_t, int);
extern void xfs_inval_cached_trace(struct xfs_iocore *,
xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
#else
#define xfs_rw_enter_trace(tag, io, iovec, segs, offset, ioflags)
#define xfs_inval_cached_trace(io, offset, len, first, last)
#endif
/*
* Maximum count of bmaps used by read and write paths.
*/
#define XFS_MAX_RW_NBMAPS 4
extern int xfs_bmap(struct bhv_desc *, xfs_off_t, ssize_t, int,
struct page_buf_bmap_s *, int *);
struct xfs_iomap *, int *);
extern int xfsbdstrat(struct xfs_mount *, struct page_buf_s *);
extern int xfs_bdstrat_cb(struct page_buf_s *);
extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
xfs_fsize_t, xfs_fsize_t);
extern void xfs_inval_cached_pages(struct vnode *, struct xfs_iocore *,
xfs_off_t, int, int);
extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, int, struct cred *);
......@@ -64,16 +102,6 @@ extern ssize_t xfs_sendfile(struct bhv_desc *, struct file *,
loff_t *, int, size_t, read_actor_t,
void *, struct cred *);
extern int xfs_iomap(struct xfs_iocore *, xfs_off_t, ssize_t, int,
struct page_buf_bmap_s *, int *);
extern int xfs_iomap_write_direct(struct xfs_inode *, loff_t, size_t,
int, struct xfs_bmbt_irec *, int *, int);
extern int xfs_iomap_write_delay(struct xfs_inode *, loff_t, size_t,
int, struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_allocate(struct xfs_inode *,
struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_unwritten(struct xfs_inode *, loff_t, size_t);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
#define XFS_FSB_TO_DB_IO(io,fsb) \
......
......@@ -120,7 +120,7 @@ xfs_max_file_offset(
*/
#if BITS_PER_LONG == 32
# if defined(HAVE_SECTOR_T)
# if defined(CONFIG_LBD)
ASSERT(sizeof(sector_t) == 8);
pagefactor = PAGE_CACHE_SIZE;
bitshift = BITS_PER_LONG;
......@@ -241,6 +241,23 @@ xfs_initialize_vnode(
}
}
void
xfs_flush_inode(
xfs_inode_t *ip)
{
struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
filemap_fdatawrite(inode->i_mapping);
}
void
xfs_flush_device(
xfs_inode_t *ip)
{
sync_blockdev(XFS_ITOV(ip)->v_vfsp->vfs_super->s_bdev);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
int
xfs_blkdev_get(
xfs_mount_t *mp,
......@@ -860,15 +877,16 @@ init_xfs_fs( void )
{
int error;
struct sysinfo si;
static char message[] __initdata =
KERN_INFO "SGI XFS " XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n";
static char message[] __initdata = KERN_INFO \
XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n";
printk(message);
si_meminfo(&si);
xfs_physmem = si.totalram;
ktrace_init(64);
error = init_inodecache();
if (error < 0)
goto undo_inodecache;
......@@ -907,12 +925,12 @@ exit_xfs_fs( void )
vfs_exitdmapi();
pagebuf_terminate();
destroy_inodecache();
ktrace_uninit();
}
module_init(init_xfs_fs);
module_exit(exit_xfs_fs);
MODULE_AUTHOR("Silicon Graphics, Inc.");
MODULE_DESCRIPTION(
"SGI XFS " XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
MODULE_LICENSE("GPL");
......@@ -76,10 +76,10 @@
# define XFS_BIGFS_STRING
#endif
#ifdef CONFIG_XFS_VNODE_TRACING
# define XFS_VNTRACE_STRING "VN-trace, "
#ifdef CONFIG_XFS_TRACE
# define XFS_TRACE_STRING "tracing, "
#else
# define XFS_VNTRACE_STRING
# define XFS_TRACE_STRING
#endif
#ifdef XFSDEBUG
......@@ -91,7 +91,7 @@
#define XFS_BUILD_OPTIONS XFS_ACL_STRING \
XFS_REALTIME_STRING \
XFS_BIGFS_STRING \
XFS_VNTRACE_STRING \
XFS_TRACE_STRING \
XFS_DBG_STRING /* DBG must be last */
#define LINVFS_GET_VFS(s) \
......@@ -99,6 +99,7 @@
#define LINVFS_SET_VFS(s, vfsp) \
((s)->s_fs_info = vfsp)
struct xfs_inode;
struct xfs_mount;
struct pb_target;
struct block_device;
......@@ -107,6 +108,9 @@ extern __uint64_t xfs_max_file_offset(unsigned int);
extern void xfs_initialize_vnode(bhv_desc_t *, vnode_t *, bhv_desc_t *, int);
extern void xfs_flush_inode(struct xfs_inode *);
extern void xfs_flush_device(struct xfs_inode *);
extern int xfs_blkdev_get(struct xfs_mount *, const char *,
struct block_device **);
extern void xfs_blkdev_put(struct block_device *);
......
......@@ -51,7 +51,7 @@ xfs_stats_clear_proc_handler(
int c, ret, *valp = ctl->data;
__uint32_t vn_active;
ret = proc_doulongvec_minmax(ctl, write, filp, buffer, lenp);
ret = proc_dointvec_minmax(ctl, write, filp, buffer, lenp);
if (!ret && write && *valp) {
printk("XFS Clearing xfsstats\n");
......
......@@ -39,6 +39,6 @@
#ifndef __XFS_VERSION_H__
#define __XFS_VERSION_H__
#define XFS_VERSION_STRING "for Linux"
#define XFS_VERSION_STRING "SGI XFS"
#endif /* __XFS_VERSION_H__ */
......@@ -134,7 +134,7 @@ vfs_root(
int
vfs_statvfs(
struct bhv_desc *bdp,
struct kstatfs *sp,
xfs_statfs_t *sp,
struct vnode *vp)
{
struct bhv_desc *next = bdp;
......
......@@ -33,6 +33,7 @@
#define __XFS_VFS_H__
#include <linux/vfs.h>
#include "xfs_fs.h"
struct fid;
struct cred;
......@@ -42,10 +43,12 @@ struct seq_file;
struct super_block;
struct xfs_mount_args;
typedef struct kstatfs xfs_statfs_t;
typedef struct vfs {
u_int vfs_flag; /* flags */
fsid_t vfs_fsid; /* file system ID */
fsid_t *vfs_altfsid; /* An ID fixed for life of FS */
xfs_fsid_t vfs_fsid; /* file system ID */
xfs_fsid_t *vfs_altfsid; /* An ID fixed for life of FS */
bhv_head_t vfs_bh; /* head of vfs behavior chain */
struct super_block *vfs_super; /* Linux superblock structure */
struct task_struct *vfs_sync_task;
......@@ -101,7 +104,7 @@ typedef int (*vfs_unmount_t)(bhv_desc_t *, int, struct cred *);
typedef int (*vfs_mntupdate_t)(bhv_desc_t *, int *,
struct xfs_mount_args *);
typedef int (*vfs_root_t)(bhv_desc_t *, struct vnode **);
typedef int (*vfs_statvfs_t)(bhv_desc_t *, struct kstatfs *, struct vnode *);
typedef int (*vfs_statvfs_t)(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
typedef int (*vfs_sync_t)(bhv_desc_t *, int, struct cred *);
typedef int (*vfs_vget_t)(bhv_desc_t *, struct vnode **, struct fid *);
typedef int (*vfs_dmapiops_t)(bhv_desc_t *, caddr_t);
......@@ -168,7 +171,7 @@ extern int vfs_showargs(bhv_desc_t *, struct seq_file *);
extern int vfs_unmount(bhv_desc_t *, int, struct cred *);
extern int vfs_mntupdate(bhv_desc_t *, int *, struct xfs_mount_args *);
extern int vfs_root(bhv_desc_t *, struct vnode **);
extern int vfs_statvfs(bhv_desc_t *, struct kstatfs *, struct vnode *);
extern int vfs_statvfs(bhv_desc_t *, xfs_statfs_t *, struct vnode *);
extern int vfs_sync(bhv_desc_t *, int, struct cred *);
extern int vfs_vget(bhv_desc_t *, struct vnode **, struct fid *);
extern int vfs_dmapiops(bhv_desc_t *, caddr_t);
......
......@@ -98,7 +98,7 @@ vn_reclaim(
vp->v_type = VNON;
vp->v_fbhv = NULL;
#ifdef CONFIG_XFS_VNODE_TRACING
#ifdef XFS_VNODE_TRACE
ktrace_free(vp->v_trace);
vp->v_trace = NULL;
#endif
......@@ -154,9 +154,10 @@ vn_initialize(
/* Initialize the first behavior and the behavior chain head. */
vn_bhv_head_init(VN_BHV_HEAD(vp), "vnode");
#ifdef CONFIG_XFS_VNODE_TRACING
#ifdef XFS_VNODE_TRACE
vp->v_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP);
#endif /* CONFIG_XFS_VNODE_TRACING */
printk("Allocated VNODE_TRACE at 0x%p\n", vp->v_trace);
#endif /* XFS_VNODE_TRACE */
vn_trace_exit(vp, "vn_initialize", (inst_t *)__return_address);
return vp;
......@@ -392,7 +393,7 @@ vn_remove(
}
#ifdef CONFIG_XFS_VNODE_TRACING
#ifdef XFS_VNODE_TRACE
#define KTRACE_ENTER(vp, vk, s, line, ra) \
ktrace_enter( (vp)->v_trace, \
......@@ -439,4 +440,4 @@ vn_trace_rele(vnode_t *vp, char *file, int line, inst_t *ra)
{
KTRACE_ENTER(vp, VNODE_KTRACE_RELE, file, line, ra);
}
#endif /* CONFIG_XFS_VNODE_TRACING */
#endif /* XFS_VNODE_TRACE */
......@@ -62,7 +62,7 @@
struct uio;
struct file;
struct vattr;
struct page_buf_bmap_s;
struct xfs_iomap;
struct attrlist_cursor_kern;
/*
......@@ -87,7 +87,7 @@ typedef struct vnode {
vn_bhv_head_t v_bh; /* behavior head */
spinlock_t v_lock; /* VN_LOCK/VN_UNLOCK */
struct inode v_inode; /* Linux inode */
#ifdef CONFIG_XFS_VNODE_TRACING
#ifdef XFS_VNODE_TRACE
struct ktrace *v_trace; /* trace header structure */
#endif
} vnode_t;
......@@ -226,7 +226,7 @@ typedef int (*vop_release_t)(bhv_desc_t *);
typedef int (*vop_rwlock_t)(bhv_desc_t *, vrwlock_t);
typedef void (*vop_rwunlock_t)(bhv_desc_t *, vrwlock_t);
typedef int (*vop_bmap_t)(bhv_desc_t *, xfs_off_t, ssize_t, int,
struct page_buf_bmap_s *, int *);
struct xfs_iomap *, int *);
typedef int (*vop_reclaim_t)(bhv_desc_t *);
typedef int (*vop_attr_get_t)(bhv_desc_t *, char *, char *, int *, int,
struct cred *);
......@@ -545,21 +545,17 @@ static inline int vn_count(struct vnode *vp)
extern vnode_t *vn_hold(struct vnode *);
extern void vn_rele(struct vnode *);
#if defined(CONFIG_XFS_VNODE_TRACING)
#if defined(XFS_VNODE_TRACE)
#define VN_HOLD(vp) \
((void)vn_hold(vp), \
((void)vn_hold(vp), \
vn_trace_hold(vp, __FILE__, __LINE__, (inst_t *)__return_address))
#define VN_RELE(vp) \
(vn_trace_rele(vp, __FILE__, __LINE__, (inst_t *)__return_address), \
iput(LINVFS_GET_IP(vp)))
#else /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */
#else
#define VN_HOLD(vp) ((void)vn_hold(vp))
#define VN_RELE(vp) (iput(LINVFS_GET_IP(vp)))
#endif /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */
#endif
/*
* Vname handling macros.
......@@ -590,6 +586,13 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
spin_unlock(&vp->v_lock);
}
/*
* Update modify/access/change times on the vnode
*/
#define VN_MTIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_mtime = *(tvp))
#define VN_ATIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_atime = *(tvp))
#define VN_CTIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_ctime = *(tvp))
/*
* Some useful predicates.
*/
......@@ -617,13 +620,12 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
#define FSYNC_INVAL 0x2 /* flush and invalidate cached data */
#define FSYNC_DATA 0x4 /* synchronous fsync of data only */
#if (defined(CONFIG_XFS_VNODE_TRACING))
#define VNODE_TRACE_SIZE 16 /* number of trace entries */
/*
* Tracing entries.
* Tracking vnode activity.
*/
#if defined(XFS_VNODE_TRACE)
#define VNODE_TRACE_SIZE 16 /* number of trace entries */
#define VNODE_KTRACE_ENTRY 1
#define VNODE_KTRACE_EXIT 2
#define VNODE_KTRACE_HOLD 3
......@@ -635,18 +637,16 @@ extern void vn_trace_exit(struct vnode *, char *, inst_t *);
extern void vn_trace_hold(struct vnode *, char *, int, inst_t *);
extern void vn_trace_ref(struct vnode *, char *, int, inst_t *);
extern void vn_trace_rele(struct vnode *, char *, int, inst_t *);
#define VN_TRACE(vp) \
vn_trace_ref(vp, __FILE__, __LINE__, (inst_t *)__return_address)
#else /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */
#else
#define vn_trace_entry(a,b,c)
#define vn_trace_exit(a,b,c)
#define vn_trace_hold(a,b,c,d)
#define vn_trace_ref(a,b,c,d)
#define vn_trace_rele(a,b,c,d)
#define VN_TRACE(vp)
#endif /* ! (defined(CONFIG_XFS_VNODE_TRACING)) */
#endif
#endif /* __XFS_VNODE_H__ */
This diff is collapsed.
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -47,11 +47,6 @@
#include <linux/buffer_head.h>
#include <linux/uio.h>
/*
* Turn this on to get pagebuf lock ownership
#define PAGEBUF_LOCK_TRACKING
*/
/*
* Base types
*/
......@@ -61,8 +56,6 @@ typedef loff_t page_buf_daddr_t;
#define PAGE_BUF_DADDR_NULL ((page_buf_daddr_t) (-1LL))
typedef size_t page_buf_dsize_t; /* size of buffer in blocks */
#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
......@@ -74,29 +67,6 @@ typedef enum page_buf_rw_e {
PBRW_ZERO = 3 /* Zero target memory */
} page_buf_rw_t;
typedef enum { /* pbm_flags values */
PBMF_EOF = 0x01, /* mapping contains EOF */
PBMF_HOLE = 0x02, /* mapping covers a hole */
PBMF_DELAY = 0x04, /* mapping covers delalloc region */
PBMF_UNWRITTEN = 0x20, /* mapping covers allocated */
/* but uninitialized file data */
PBMF_NEW = 0x40 /* just allocated */
} bmap_flags_t;
typedef enum {
/* base extent manipulation calls */
BMAP_READ = (1 << 0), /* read extents */
BMAP_WRITE = (1 << 1), /* create extents */
BMAP_ALLOCATE = (1 << 2), /* delayed allocate to real extents */
BMAP_UNWRITTEN = (1 << 3), /* unwritten extents to real extents */
/* modifiers */
BMAP_IGNSTATE = (1 << 4), /* ignore unwritten state on read */
BMAP_DIRECT = (1 << 5), /* direct instead of buffered write */
BMAP_MMAP = (1 << 6), /* allocate for mmap write */
BMAP_SYNC = (1 << 7), /* sync write */
BMAP_TRYLOCK = (1 << 8), /* non-blocking request */
BMAP_DEVICE = (1 << 9), /* we only want to know the device */
} bmapi_flags_t;
typedef enum page_buf_flags_e { /* pb_flags values */
PBF_READ = (1 << 0), /* buffer intended for reading from device */
......@@ -123,12 +93,13 @@ typedef enum page_buf_flags_e { /* pb_flags values */
_PBF_PRIVATE_BH = (1 << 17), /* do not use public buffer heads */
_PBF_ALL_PAGES_MAPPED = (1 << 18), /* all pages in range mapped */
_PBF_ADDR_ALLOCATED = (1 << 19), /* pb_addr space was allocated */
_PBF_MEM_ALLOCATED = (1 << 20), /* pb_mem+underlying pages alloc'd */
_PBF_MEM_ALLOCATED = (1 << 20), /* underlying pages are allocated */
_PBF_MEM_SLAB = (1 << 21), /* underlying pages are slab allocated */
PBF_FORCEIO = (1 << 21),
PBF_FLUSH = (1 << 22), /* flush disk write cache */
PBF_READ_AHEAD = (1 << 23),
PBF_RUN_QUEUES = (1 << 24), /* run block device task queue */
PBF_FORCEIO = (1 << 22), /* ignore any cache state */
PBF_FLUSH = (1 << 23), /* flush disk write cache */
PBF_READ_AHEAD = (1 << 24), /* asynchronous read-ahead */
PBF_RUN_QUEUES = (1 << 25), /* run block device task queue */
} page_buf_flags_t;
......@@ -145,36 +116,6 @@ typedef struct pb_target {
size_t pbr_smask;
} pb_target_t;
/*
* page_buf_bmap_t: File system I/O map
*
* The pbm_bn, pbm_offset and pbm_length fields are expressed in disk blocks.
* The pbm_length field specifies the size of the underlying backing store
* for the particular mapping.
*
* The pbm_bsize, pbm_size and pbm_delta fields are in bytes and indicate
* the size of the mapping, the number of bytes that are valid to access
* (read or write), and the offset into the mapping, given the offset
* supplied to the file I/O map routine. pbm_delta is the offset of the
* desired data from the beginning of the mapping.
*
* When a request is made to read beyond the logical end of the object,
* pbm_size may be set to 0, but pbm_offset and pbm_length should be set to
* the actual amount of underlying storage that has been allocated, if any.
*/
typedef struct page_buf_bmap_s {
page_buf_daddr_t pbm_bn; /* block number in file system */
pb_target_t *pbm_target; /* device to do I/O to */
loff_t pbm_offset; /* byte offset of mapping in file */
size_t pbm_delta; /* offset of request into bmap */
size_t pbm_bsize; /* size of this mapping in bytes */
bmap_flags_t pbm_flags; /* options flags for mapping */
} page_buf_bmap_t;
typedef page_buf_bmap_t pb_bmap_t;
/*
* page_buf_t: Buffer structure for page cache-based buffers
*
......@@ -381,4 +322,19 @@ extern void pagebuf_delwri_dequeue(
extern int pagebuf_init(void);
extern void pagebuf_terminate(void);
#ifdef PAGEBUF_TRACE
extern ktrace_t *pagebuf_trace_buf;
extern void pagebuf_trace(
page_buf_t *, /* buffer being traced */
char *, /* description of operation */
void *, /* arbitrary diagnostic value */
void *); /* return address */
#else
# define pagebuf_trace(pb, id, ptr, ra) do { } while (0)
#endif
#define pagebuf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; })
#endif /* __PAGE_BUF_H__ */
/*
* Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
/*
* Written by Steve Lord at SGI
*/
#ifndef __PAGE_BUF_PRIVATE_H__
#define __PAGE_BUF_PRIVATE_H__
#include <linux/percpu.h>
#include "page_buf.h"
#define _PAGE_BUF_INTERNAL_
#define PB_DEFINE_TRACES
#include "page_buf_trace.h"
#ifdef PAGEBUF_LOCK_TRACKING
#define PB_SET_OWNER(pb) (pb->pb_last_holder = current->pid)
#define PB_CLEAR_OWNER(pb) (pb->pb_last_holder = -1)
#define PB_GET_OWNER(pb) (pb->pb_last_holder)
#else
#define PB_SET_OWNER(pb)
#define PB_CLEAR_OWNER(pb)
#define PB_GET_OWNER(pb)
#endif /* PAGEBUF_LOCK_TRACKING */
/* Tracing utilities for pagebuf */
typedef struct {
int event;
unsigned long pb;
page_buf_flags_t flags;
unsigned short hold;
unsigned short lock_value;
void *task;
void *misc;
void *ra;
loff_t offset;
size_t size;
} pagebuf_trace_t;
struct pagebuf_trace_buf {
pagebuf_trace_t *buf;
volatile int start;
volatile int end;
};
#define PB_TRACE_BUFSIZE 1024
#define CIRC_INC(i) (((i) + 1) & (PB_TRACE_BUFSIZE - 1))
/*
* Tunable pagebuf parameters
*/
typedef struct pb_sysctl_val {
int min;
int val;
int max;
} pb_sysctl_val_t;
typedef struct pagebuf_param {
pb_sysctl_val_t flush_interval; /* interval between runs of the
* delwri flush daemon. */
pb_sysctl_val_t age_buffer; /* time for buffer to age before
* we flush it. */
pb_sysctl_val_t stats_clear; /* clear the pagebuf stats */
pb_sysctl_val_t debug; /* debug tracing on or off */
} pagebuf_param_t;
enum {
PB_FLUSH_INT = 1,
PB_FLUSH_AGE = 2,
PB_STATS_CLEAR = 3,
PB_DEBUG = 4
};
extern pagebuf_param_t pb_params;
/*
* Pagebuf statistics
*/
struct pbstats {
u_int32_t pb_get;
u_int32_t pb_create;
u_int32_t pb_get_locked;
u_int32_t pb_get_locked_waited;
u_int32_t pb_busy_locked;
u_int32_t pb_miss_locked;
u_int32_t pb_page_retries;
u_int32_t pb_page_found;
u_int32_t pb_get_read;
};
DECLARE_PER_CPU(struct pbstats, pbstats);
/* We don't disable preempt, not too worried about poking the
* wrong cpu's stat for now */
#define PB_STATS_INC(count) (__get_cpu_var(pbstats).count++)
#ifndef STATIC
# define STATIC static
#endif
#endif /* __PAGE_BUF_PRIVATE_H__ */
......@@ -124,7 +124,7 @@ xfs_qm_dqinit(
initnsema(&dqp->q_flock, 1, "fdq");
sv_init(&dqp->q_pinwait, SV_DEFAULT, "pdq");
#ifdef DQUOT_TRACING
#ifdef XFS_DQUOT_TRACE
dqp->q_trace = ktrace_alloc(DQUOT_TRACE_SIZE, KM_SLEEP);
xfs_dqtrace_entry(dqp, "DQINIT");
#endif
......@@ -148,7 +148,7 @@ xfs_qm_dqinit(
dqp->q_hash = 0;
ASSERT(dqp->dq_flnext == dqp->dq_flprev);
#ifdef DQUOT_TRACING
#ifdef XFS_DQUOT_TRACE
ASSERT(dqp->q_trace);
xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
#endif
......@@ -173,7 +173,7 @@ xfs_qm_dqdestroy(
freesema(&dqp->q_flock);
sv_destroy(&dqp->q_pinwait);
#ifdef DQUOT_TRACING
#ifdef XFS_DQUOT_TRACE
if (dqp->q_trace)
ktrace_free(dqp->q_trace);
dqp->q_trace = NULL;
......@@ -201,20 +201,20 @@ xfs_qm_dqinit_core(
}
#ifdef DQUOT_TRACING
#ifdef XFS_DQUOT_TRACE
/*
* Dquot tracing for debugging.
*/
/* ARGSUSED */
void
xfs_dqtrace_entry__(
xfs_dquot_t *dqp,
char *func,
void *retaddr,
xfs_inode_t *ip)
__xfs_dqtrace_entry(
xfs_dquot_t *dqp,
char *func,
void *retaddr,
xfs_inode_t *ip)
{
xfs_dquot_t *udqp = NULL;
int ino;
xfs_dquot_t *udqp = NULL;
xfs_ino_t ino = 0;
ASSERT(dqp->q_trace);
if (ip) {
......@@ -227,13 +227,19 @@ xfs_dqtrace_entry__(
(void *)(__psint_t)dqp->q_nrefs,
(void *)(__psint_t)dqp->dq_flags,
(void *)(__psint_t)dqp->q_res_bcount,
(void *)(__psint_t)INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_icount, ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_blk_hardlimit, ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_blk_softlimit, ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_ino_hardlimit, ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_ino_softlimit, ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_id, ARCH_CONVERT), /* 11 */
(void *)(__psint_t)INT_GET(dqp->q_core.d_bcount,
ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_icount,
ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_blk_hardlimit,
ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_blk_softlimit,
ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_ino_hardlimit,
ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_ino_softlimit,
ARCH_CONVERT),
(void *)(__psint_t)INT_GET(dqp->q_core.d_id, ARCH_CONVERT),
(void *)(__psint_t)current_pid(),
(void *)(__psint_t)ino,
(void *)(__psint_t)retaddr,
......@@ -751,7 +757,6 @@ xfs_qm_idtodq(
}
*O_dqpp = dqp;
ASSERT(! XFS_DQ_IS_LOCKED(dqp));
return (0);
error0:
......@@ -1000,7 +1005,6 @@ xfs_qm_dqget(
/*
* Dquot lock comes after hashlock in the lock ordering
*/
ASSERT(! XFS_DQ_IS_LOCKED(dqp));
if (ip) {
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (! XFS_IS_DQTYPE_ON(mp, type)) {
......@@ -1504,7 +1508,7 @@ xfs_qm_dqpurge(
*/
ASSERT(XFS_DQ_IS_ON_FREELIST(dqp));
dqp->q_mount = NULL;;
dqp->q_mount = NULL;
dqp->q_hash = NULL;
dqp->dq_flags = XFS_DQ_INACTIVE;
memset(&dqp->q_core, 0, sizeof(dqp->q_core));
......
......@@ -99,7 +99,7 @@ typedef struct xfs_dquot {
sema_t q_flock; /* flush lock */
uint q_pincount; /* pin count for this dquot */
sv_t q_pinwait; /* sync var for pinning */
#ifdef DQUOT_TRACING
#ifdef XFS_DQUOT_TRACE
struct ktrace *q_trace; /* trace header structure */
#endif
} xfs_dquot_t;
......@@ -175,23 +175,25 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
#define XFS_IS_THIS_QUOTA_OFF(d) (! (XFS_QM_ISUDQ(d) ? \
(XFS_IS_UQUOTA_ON((d)->q_mount)) : \
(XFS_IS_GQUOTA_ON((d)->q_mount))))
#ifdef DQUOT_TRACING
#ifdef XFS_DQUOT_TRACE
/*
* Dquot Tracing stuff.
*/
#define DQUOT_TRACE_SIZE 64
#define DQUOT_KTRACE_ENTRY 1
extern void __xfs_dqtrace_entry(xfs_dquot_t *dqp, char *func,
void *, xfs_inode_t *);
#define xfs_dqtrace_entry_ino(a,b,ip) \
xfs_dqtrace_entry__((a), (b), (void*)__return_address, (ip))
__xfs_dqtrace_entry((a), (b), (void*)__return_address, (ip))
#define xfs_dqtrace_entry(a,b) \
xfs_dqtrace_entry__((a), (b), (void*)__return_address, NULL)
extern void xfs_dqtrace_entry__(xfs_dquot_t *dqp, char *func,
void *, xfs_inode_t *);
__xfs_dqtrace_entry((a), (b), (void*)__return_address, NULL)
#else
#define xfs_dqtrace_entry(a,b)
#define xfs_dqtrace_entry_ino(a,b,ip)
#endif
#ifdef QUOTADEBUG
extern void xfs_qm_dqprint(xfs_dquot_t *);
#else
......
......@@ -82,6 +82,7 @@ EXPORT_SYMBOL(xfs_Gqm); /* used by xfsidbg */
kmem_zone_t *qm_dqzone;
kmem_zone_t *qm_dqtrxzone;
kmem_shaker_t xfs_qm_shaker;
STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int);
STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
......@@ -112,8 +113,6 @@ extern mutex_t qcheck_lock;
#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
#endif
struct shrinker *xfs_qm_shrinker;
/*
* Initialize the XQM structure.
* Note that there is not one quota manager per file system.
......@@ -163,7 +162,7 @@ xfs_Gqm_init(void)
} else
xqm->qm_dqzone = qm_dqzone;
xfs_qm_shrinker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake);
xfs_qm_shaker = kmem_shake_register(xfs_qm_shake);
/*
* The t_dqinfo portion of transactions.
......@@ -195,8 +194,7 @@ xfs_qm_destroy(
ASSERT(xqm != NULL);
ASSERT(xqm->qm_nrefs == 0);
remove_shrinker(xfs_qm_shrinker);
kmem_shake_deregister(xfs_qm_shaker);
hsize = xqm->qm_dqhashmask + 1;
for (i = 0; i < hsize; i++) {
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
......@@ -806,7 +804,6 @@ xfs_qm_dqattach_one(
ASSERT(XFS_DQ_IS_LOCKED(dqp));
if (! dolock) {
xfs_dqunlock(dqp);
ASSERT(!udqhint || !XFS_DQ_IS_LOCKED(udqhint));
goto done;
}
if (! udqhint)
......@@ -814,7 +811,6 @@ xfs_qm_dqattach_one(
ASSERT(udqhint);
ASSERT(dolock);
ASSERT(! XFS_DQ_IS_LOCKED(udqhint));
ASSERT(XFS_DQ_IS_LOCKED(dqp));
if (! xfs_qm_dqlock_nowait(udqhint)) {
xfs_dqunlock(dqp);
......@@ -826,14 +822,10 @@ xfs_qm_dqattach_one(
if (udqhint) {
if (dolock)
ASSERT(XFS_DQ_IS_LOCKED(udqhint));
else
ASSERT(! XFS_DQ_IS_LOCKED(udqhint));
}
if (! error) {
if (dolock)
ASSERT(XFS_DQ_IS_LOCKED(dqp));
else
ASSERT(! XFS_DQ_IS_LOCKED(dqp));
}
#endif
return (error);
......@@ -860,9 +852,6 @@ xfs_qm_dqattach_grouphint(
if (locked) {
ASSERT(XFS_DQ_IS_LOCKED(udq));
ASSERT(XFS_DQ_IS_LOCKED(gdq));
} else {
ASSERT(! XFS_DQ_IS_LOCKED(udq));
ASSERT(! XFS_DQ_IS_LOCKED(gdq));
}
#endif
if (! locked)
......@@ -890,15 +879,12 @@ xfs_qm_dqattach_grouphint(
*/
xfs_qm_dqrele(tmp);
ASSERT(! XFS_DQ_IS_LOCKED(udq));
ASSERT(! XFS_DQ_IS_LOCKED(gdq));
xfs_dqlock(udq);
xfs_dqlock(gdq);
} else {
ASSERT(XFS_DQ_IS_LOCKED(udq));
if (! locked) {
ASSERT(! XFS_DQ_IS_LOCKED(gdq));
xfs_dqlock(gdq);
}
}
......@@ -1006,14 +992,10 @@ xfs_qm_dqattach(
if (ip->i_udquot) {
if (flags & XFS_QMOPT_DQLOCK)
ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
else
ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot));
}
if (ip->i_gdquot) {
if (flags & XFS_QMOPT_DQLOCK)
ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
else
ASSERT(! XFS_DQ_IS_LOCKED(ip->i_gdquot));
}
if (XFS_IS_UQUOTA_ON(mp))
ASSERT(ip->i_udquot);
......@@ -1756,7 +1738,10 @@ xfs_qm_dqusage_adjust(
xfs_trans_t *tp, /* transaction pointer - NULL */
xfs_ino_t ino, /* inode number to get data for */
void *buffer, /* not used */
int ubsize, /* not used */
void *private_data, /* not used */
xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
void *dip, /* on-disk inode pointer (not used) */
int *res) /* result code value */
{
......@@ -1920,7 +1905,7 @@ xfs_qm_quotacheck(
* adjusting the corresponding dquot counters in core.
*/
if ((error = xfs_bulkstat(mp, NULL, &lastino, &count,
xfs_qm_dqusage_adjust,
xfs_qm_dqusage_adjust, NULL,
structsz, NULL,
BULKSTAT_FG_IGET|BULKSTAT_FG_VFSLOCKED,
&done)))
......@@ -2091,7 +2076,7 @@ xfs_qm_shake_freelist(
xfs_dqunlock(dqp);
xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
goto out;
return (nreclaimed);
XQM_STATS_INC(xqmstats.xs_qm_dqwants);
goto tryagain;
}
......@@ -2166,7 +2151,7 @@ xfs_qm_shake_freelist(
XFS_DQ_HASH_UNLOCK(hash);
xfs_qm_freelist_unlock(xfs_Gqm);
if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
goto out;
return (nreclaimed);
goto tryagain;
}
xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
......@@ -2191,14 +2176,12 @@ xfs_qm_shake_freelist(
dqp = nextdqp;
}
xfs_qm_freelist_unlock(xfs_Gqm);
out:
return nreclaimed;
return (nreclaimed);
}
/*
* The shake manager routine called by shaked() when memory is
* running low.
* The kmem_shake interface is invoked when memory is running low.
*/
/* ARGSUSED */
STATIC int
......@@ -2206,10 +2189,10 @@ xfs_qm_shake(int nr_to_scan, unsigned int gfp_mask)
{
int ndqused, nfree, n;
if (!(gfp_mask & __GFP_WAIT))
return 0;
if (!kmem_shake_allow(gfp_mask))
return (0);
if (!xfs_Gqm)
return 0;
return (0);
nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
/* incore dquots in all f/s's */
......@@ -2218,7 +2201,7 @@ xfs_qm_shake(int nr_to_scan, unsigned int gfp_mask)
ASSERT(ndqused >= 0);
if (nfree <= ndqused && nfree < ndquot)
return 0;
return (0);
ndqused *= xfs_Gqm->qm_dqfree_ratio; /* target # of free dquots */
n = nfree - ndqused - ndquot; /* # over target */
......
......@@ -1301,7 +1301,10 @@ xfs_qm_internalqcheck_adjust(
xfs_trans_t *tp, /* transaction pointer */
xfs_ino_t ino, /* inode number to get data for */
void *buffer, /* not used */
int ubsize, /* not used */
void *private_data, /* not used */
xfs_daddr_t bno, /* starting block of inode cluster */
int *ubused, /* not used */
void *dip, /* not used */
int *res) /* bulkstat result code */
{
......@@ -1403,7 +1406,7 @@ xfs_qm_internalqcheck(
* adjusting the corresponding dquot counters
*/
if ((error = xfs_bulkstat(mp, NULL, &lastino, &count,
xfs_qm_internalqcheck_adjust,
xfs_qm_internalqcheck_adjust, NULL,
0, NULL, BULKSTAT_FG_IGET, &done))) {
break;
}
......
......@@ -835,13 +835,6 @@ xfs_trans_reserve_quota_nblks(
ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
#ifdef QUOTADEBUG
if (ip->i_udquot)
ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot));
if (ip->i_gdquot)
ASSERT(! XFS_DQ_IS_LOCKED(ip->i_gdquot));
#endif
ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
ASSERT((type & ~XFS_QMOPT_FORCE_RES) == XFS_TRANS_DQ_RES_RTBLKS ||
......
......@@ -44,6 +44,10 @@
extern void icmn_err(int, char *, va_list);
extern void cmn_err(int, char *, ...);
#ifndef STATIC
# define STATIC static
#endif
#ifdef DEBUG
# ifdef lint
# define ASSERT(EX) ((void)0) /* avoid "constant in conditional" babble */
......
......@@ -165,4 +165,25 @@ kmem_zone_free(kmem_zone_t *zone, void *ptr)
kmem_cache_free(zone, ptr);
}
typedef struct shrinker *kmem_shaker_t;
typedef int (*kmem_shake_func_t)(int, unsigned int);
static __inline kmem_shaker_t
kmem_shake_register(kmem_shake_func_t sfunc)
{
return set_shrinker(DEFAULT_SEEKS, sfunc);
}
static __inline void
kmem_shake_deregister(kmem_shaker_t shrinker)
{
remove_shrinker(shrinker);
}
static __inline int
kmem_shake_allow(unsigned int gfp_mask)
{
return (gfp_mask & __GFP_WAIT);
}
#endif /* __XFS_SUPPORT_KMEM_H__ */
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -40,8 +40,6 @@
#include "debug.h"
#include "ktrace.h"
#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING))
static kmem_zone_t *ktrace_hdr_zone;
static kmem_zone_t *ktrace_ent_zone;
static int ktrace_zentries;
......@@ -121,7 +119,6 @@ ktrace_alloc(int nentries, int sleep)
ktp->kt_nentries = nentries;
ktp->kt_index = 0;
ktp->kt_rollover = 0;
return ktp;
}
......@@ -182,6 +179,7 @@ ktrace_enter(
void *val15)
{
static lock_t wrap_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
int index;
ktrace_entry_t *ktep;
......@@ -190,11 +188,11 @@ ktrace_enter(
/*
* Grab an entry by pushing the index up to the next one.
*/
spin_lock(&wrap_lock);
spin_lock_irqsave(&wrap_lock, flags);
index = ktp->kt_index;
if (++ktp->kt_index == ktp->kt_nentries)
ktp->kt_index = 0;
spin_unlock(&wrap_lock);
spin_unlock_irqrestore(&wrap_lock, flags);
if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
ktp->kt_rollover = 1;
......@@ -235,7 +233,6 @@ ktrace_nentries(
return (ktp->kt_rollover ? ktp->kt_nentries : ktp->kt_index);
}
/*
* ktrace_first()
*
......@@ -276,7 +273,7 @@ ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
}
return ktep;
}
EXPORT_SYMBOL(ktrace_first);
/*
* ktrace_next()
......@@ -311,11 +308,7 @@ ktrace_next(
return ktep;
}
#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING))
EXPORT_SYMBOL(ktrace_first);
EXPORT_SYMBOL(ktrace_next);
#endif
/*
* ktrace_skip()
......@@ -323,7 +316,6 @@ EXPORT_SYMBOL(ktrace_next);
* Skip the next "count" entries and return the entry after that.
* Return NULL if this causes us to iterate past the beginning again.
*/
ktrace_entry_t *
ktrace_skip(
ktrace_t *ktp,
......@@ -362,18 +354,3 @@ ktrace_skip(
}
return ktep;
}
#else
ktrace_t *
ktrace_alloc(int nentries, int sleep)
{
/*
* KM_SLEEP callers don't expect failure.
*/
if (sleep & KM_SLEEP)
panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
return NULL;
}
#endif
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -32,6 +32,7 @@
#ifndef __XFS_SUPPORT_KTRACE_H__
#define __XFS_SUPPORT_KTRACE_H__
#include <support/spin.h>
/*
* Trace buffer entry structure.
......@@ -59,16 +60,13 @@ typedef struct ktrace_snap {
int ks_index; /* current index */
} ktrace_snap_t;
/*
* Exported interfaces.
*/
extern ktrace_t *ktrace_alloc(int, int);
#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING))
#ifdef CONFIG_XFS_TRACE
extern void ktrace_init(int zentries);
extern void ktrace_uninit(void);
extern ktrace_t *ktrace_alloc(int, int);
extern void ktrace_free(ktrace_t *);
extern void ktrace_enter(
......@@ -96,10 +94,8 @@ extern ktrace_entry_t *ktrace_next(ktrace_t *, ktrace_snap_t *);
extern ktrace_entry_t *ktrace_skip(ktrace_t *, int, ktrace_snap_t *);
#else
#define ktrace_init(x) do { } while (0)
#define ktrace_uninit() do { } while (0)
#endif /* CONFIG_XFS_TRACE */
#define ktrace_free(ktp)
#define ktrace_enter(ktp,v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15)
#endif
#endif /* __XFS_SUPPORT_KTRACE_H__ */
#endif /* __XFS_SUPPORT_KTRACE_H__ */
This diff is collapsed.
/*
* Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2001-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -102,6 +102,8 @@ extern struct kmem_zone *xfs_acl_zone;
#define xfs_acl_vset(v,p,sz,t) (-EOPNOTSUPP)
#define xfs_acl_vget(v,p,sz,t) (-EOPNOTSUPP)
#define xfs_acl_vremove(v,t) (-EOPNOTSUPP)
#define xfs_acl_vhasacl_access(v) (0)
#define xfs_acl_vhasacl_default(v) (0)
#define _ACL_DECL(a) ((void)0)
#define _ACL_ALLOC(a) (1) /* successfully allocate nothing */
#define _ACL_FREE(a) ((void)0)
......
......@@ -54,13 +54,6 @@
#include "xfs_error.h"
#if defined(DEBUG)
/*
* Allocation tracing.
*/
ktrace_t *xfs_alloc_trace_buf;
#endif
#define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
#define XFSA_FIXUP_BNO_OK 1
......@@ -73,6 +66,8 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_extlen_t len);
#if defined(XFS_ALLOC_TRACE)
ktrace_t *xfs_alloc_trace_buf;
#define TRACE_ALLOC(s,a) \
xfs_alloc_trace_alloc(fname, s, a, __LINE__)
#define TRACE_FREE(s,a,b,x,f) \
......@@ -85,8 +80,6 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_alloc_trace_busy(fname, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__)
#define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) \
xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__)
#else
#define TRACE_ALLOC(s,a)
#define TRACE_FREE(s,a,b,x,f)
......
......@@ -95,6 +95,13 @@ typedef struct xfs_alloc_arg {
#ifdef __KERNEL__
#if defined(XFS_ALLOC_TRACE)
/*
* Allocation tracing buffer size.
*/
#define XFS_ALLOC_TRACE_SIZE 4096
extern ktrace_t *xfs_alloc_trace_buf;
/*
* Types for alloc tracing.
*/
......@@ -104,25 +111,8 @@ typedef struct xfs_alloc_arg {
#define XFS_ALLOC_KTRACE_BUSY 4
#define XFS_ALLOC_KTRACE_UNBUSY 5
#define XFS_ALLOC_KTRACE_BUSYSEARCH 6
/*
* Allocation tracing buffer size.
*/
#define XFS_ALLOC_TRACE_SIZE 4096
#ifdef XFS_ALL_TRACE
#define XFS_ALLOC_TRACE
#endif
#if !defined(DEBUG)
#undef XFS_ALLOC_TRACE
#endif
/*
* Prototypes for visible xfs_alloc.c routines
*/
/*
* Compute and fill in value of m_ag_maxlevels.
*/
......
......@@ -53,16 +53,18 @@
#define ARCH_NOCONVERT 1
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define ARCH_CONVERT 0
# define ARCH_CONVERT 0
#else
#define ARCH_CONVERT ARCH_NOCONVERT
# define ARCH_CONVERT ARCH_NOCONVERT
#endif
/* generic swapping macros */
#ifndef HAVE_SWABMACROS
#define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
#define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
#define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
#endif
#define INT_SWAP(type, var) \
((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -63,6 +63,7 @@
#include "xfs_quota.h"
#include "xfs_rw.h"
#include "xfs_trans_space.h"
#include "xfs_acl.h"
/*
* xfs_attr.c
......@@ -2234,7 +2235,8 @@ xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
(__psunsigned_t)context->count,
(__psunsigned_t)context->firstu,
(__psunsigned_t)
(context->count > 0)
((context->count > 0) &&
!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
? (ATTR_ENTRY(context->alist,
context->count-1)->a_valuelen)
: 0,
......@@ -2262,7 +2264,8 @@ xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
(__psunsigned_t)context->count,
(__psunsigned_t)context->firstu,
(__psunsigned_t)
(context->count > 0)
((context->count > 0) &&
!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
? (ATTR_ENTRY(context->alist,
context->count-1)->a_valuelen)
: 0,
......@@ -2290,7 +2293,8 @@ xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
(__psunsigned_t)context->count,
(__psunsigned_t)context->firstu,
(__psunsigned_t)
(context->count > 0)
((context->count > 0) &&
!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
? (ATTR_ENTRY(context->alist,
context->count-1)->a_valuelen)
: 0,
......@@ -2318,7 +2322,8 @@ xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
(__psunsigned_t)context->count,
(__psunsigned_t)context->firstu,
(__psunsigned_t)
(context->count > 0)
((context->count > 0) &&
!(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
? (ATTR_ENTRY(context->alist,
context->count-1)->a_valuelen)
: 0,
......@@ -2353,3 +2358,307 @@ xfs_attr_trace_enter(int type, char *where,
(void *)a14, (void *)a15);
}
#endif /* XFS_ATTR_TRACE */
/*========================================================================
* System (pseudo) namespace attribute interface routines.
*========================================================================*/
STATIC int
posix_acl_access_set(
vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vset(vp, data, size, _ACL_TYPE_ACCESS);
}
STATIC int
posix_acl_access_remove(
struct vnode *vp, char *name, int xflags)
{
return xfs_acl_vremove(vp, _ACL_TYPE_ACCESS);
}
STATIC int
posix_acl_access_get(
vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vget(vp, data, size, _ACL_TYPE_ACCESS);
}
STATIC int
posix_acl_access_exists(
vnode_t *vp)
{
return xfs_acl_vhasacl_access(vp);
}
STATIC int
posix_acl_default_set(
vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vset(vp, data, size, _ACL_TYPE_DEFAULT);
}
STATIC int
posix_acl_default_get(
vnode_t *vp, char *name, void *data, size_t size, int xflags)
{
return xfs_acl_vget(vp, data, size, _ACL_TYPE_DEFAULT);
}
STATIC int
posix_acl_default_remove(
struct vnode *vp, char *name, int xflags)
{
return xfs_acl_vremove(vp, _ACL_TYPE_DEFAULT);
}
STATIC int
posix_acl_default_exists(
vnode_t *vp)
{
return xfs_acl_vhasacl_default(vp);
}
struct attrnames posix_acl_access = {
.attr_name = "posix_acl_access",
.attr_namelen = sizeof("posix_acl_access") - 1,
.attr_get = posix_acl_access_get,
.attr_set = posix_acl_access_set,
.attr_remove = posix_acl_access_remove,
.attr_exists = posix_acl_access_exists,
};
struct attrnames posix_acl_default = {
.attr_name = "posix_acl_default",
.attr_namelen = sizeof("posix_acl_default") - 1,
.attr_get = posix_acl_default_get,
.attr_set = posix_acl_default_set,
.attr_remove = posix_acl_default_remove,
.attr_exists = posix_acl_default_exists,
};
struct attrnames *attr_system_names[] =
{ &posix_acl_access, &posix_acl_default };
/*========================================================================
* Namespace-prefix-style attribute name interface routines.
*========================================================================*/
STATIC int
attr_generic_set(
struct vnode *vp, char *name, void *data, size_t size, int xflags)
{
int error;
VOP_ATTR_SET(vp, name, data, size, xflags, NULL, error);
return -error;
}
STATIC int
attr_generic_get(
struct vnode *vp, char *name, void *data, size_t size, int xflags)
{
int error, asize = size;
VOP_ATTR_GET(vp, name, data, &asize, xflags, NULL, error);
if (!error)
return asize;
return -error;
}
STATIC int
attr_generic_remove(
struct vnode *vp, char *name, int xflags)
{
int error;
VOP_ATTR_REMOVE(vp, name, xflags, NULL, error);
return -error;
}
STATIC int
attr_generic_listadd(
attrnames_t *prefix,
attrnames_t *namesp,
void *data,
size_t size,
ssize_t *result)
{
char *p = data + *result;
*result += prefix->attr_namelen;
*result += namesp->attr_namelen + 1;
if (!size)
return 0;
if (*result > size)
return -ERANGE;
strcpy(p, prefix->attr_name);
p += prefix->attr_namelen;
strcpy(p, namesp->attr_name);
p += namesp->attr_namelen + 1;
return 0;
}
STATIC int
attr_system_list(
struct vnode *vp,
void *data,
size_t size,
ssize_t *result)
{
attrnames_t *namesp;
int i, error = 0;
for (i = 0; i < ATTR_SYSCOUNT; i++) {
namesp = attr_system_names[i];
if (!namesp->attr_exists || !namesp->attr_exists(vp))
continue;
error = attr_generic_listadd(&attr_system, namesp,
data, size, result);
if (error)
break;
}
return error;
}
int
attr_generic_list(
struct vnode *vp, void *data, size_t size, int xflags, ssize_t *result)
{
attrlist_cursor_kern_t cursor = { 0 };
int error;
VOP_ATTR_LIST(vp, data, size, xflags, &cursor, NULL, error);
if (error > 0)
return -error;
*result = -error;
return attr_system_list(vp, data, size, result);
}
attrnames_t *
attr_lookup_namespace(
char *name,
struct attrnames **names,
int nnames)
{
int i;
for (i = 0; i < nnames; i++)
if (!strncmp(name, names[i]->attr_name, names[i]->attr_namelen))
return names[i];
return NULL;
}
/*
* Some checks to prevent people abusing EAs to get over quota:
* - Don't allow modifying user EAs on devices/symlinks;
* - Don't allow modifying user EAs if sticky bit set;
*/
STATIC int
attr_user_capable(
struct vnode *vp,
cred_t *cred)
{
struct inode *inode = LINVFS_GET_IP(vp);
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
(current_fsuid(cred) != inode->i_uid) && !capable(CAP_FOWNER))
return -EPERM;
return 0;
}
STATIC int
attr_trusted_capable(
struct vnode *vp,
cred_t *cred)
{
struct inode *inode = LINVFS_GET_IP(vp);
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
}
STATIC int
attr_system_set(
struct vnode *vp, char *name, void *data, size_t size, int xflags)
{
attrnames_t *namesp;
int error;
if (xflags & ATTR_CREATE)
return -EINVAL;
namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
if (!namesp)
return -EOPNOTSUPP;
error = namesp->attr_set(vp, name, data, size, xflags);
if (!error)
error = vn_revalidate(vp);
return error;
}
STATIC int
attr_system_get(
struct vnode *vp, char *name, void *data, size_t size, int xflags)
{
attrnames_t *namesp;
namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
if (!namesp)
return -EOPNOTSUPP;
return namesp->attr_get(vp, name, data, size, xflags);
}
STATIC int
attr_system_remove(
struct vnode *vp, char *name, int xflags)
{
attrnames_t *namesp;
namesp = attr_lookup_namespace(name, attr_system_names, ATTR_SYSCOUNT);
if (!namesp)
return -EOPNOTSUPP;
return namesp->attr_remove(vp, name, xflags);
}
struct attrnames attr_system = {
.attr_name = "system.",
.attr_namelen = sizeof("system.") - 1,
.attr_flag = ATTR_SYSTEM,
.attr_get = attr_system_get,
.attr_set = attr_system_set,
.attr_remove = attr_system_remove,
.attr_capable = (attrcapable_t)fs_noerr,
};
struct attrnames attr_trusted = {
.attr_name = "trusted.",
.attr_namelen = sizeof("trusted.") - 1,
.attr_flag = ATTR_ROOT,
.attr_get = attr_generic_get,
.attr_set = attr_generic_set,
.attr_remove = attr_generic_remove,
.attr_capable = attr_trusted_capable,
};
struct attrnames attr_user = {
.attr_name = "user.",
.attr_namelen = sizeof("user.") - 1,
.attr_get = attr_generic_get,
.attr_set = attr_generic_set,
.attr_remove = attr_generic_remove,
.attr_capable = attr_user_capable,
};
struct attrnames *attr_namespaces[] =
{ &attr_system, &attr_trusted, &attr_user };
/*
* Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000, 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -45,22 +45,50 @@
* as possible so as to fit into the literal area of the inode.
*/
#ifdef XFS_ALL_TRACE
#define XFS_ATTR_TRACE
#endif
#if !defined(DEBUG)
#undef XFS_ATTR_TRACE
#endif
/*========================================================================
* External interfaces
*========================================================================*/
#define ATTR_ROOT 0x0002 /* use attrs in root namespace, not user */
struct cred;
struct vnode;
typedef int (*attrset_t)(struct vnode *, char *, void *, size_t, int);
typedef int (*attrget_t)(struct vnode *, char *, void *, size_t, int);
typedef int (*attrremove_t)(struct vnode *, char *, int);
typedef int (*attrexists_t)(struct vnode *);
typedef int (*attrcapable_t)(struct vnode *, struct cred *);
typedef struct attrnames {
char * attr_name;
unsigned int attr_namelen;
unsigned int attr_flag;
attrget_t attr_get;
attrset_t attr_set;
attrremove_t attr_remove;
attrexists_t attr_exists;
attrcapable_t attr_capable;
} attrnames_t;
#define ATTR_NAMECOUNT 3
extern struct attrnames attr_user;
extern struct attrnames attr_system;
extern struct attrnames attr_trusted;
extern struct attrnames *attr_namespaces[ATTR_NAMECOUNT];
#define ATTR_SYSCOUNT 2
extern struct attrnames posix_acl_access;
extern struct attrnames posix_acl_default;
extern struct attrnames *attr_system_names[ATTR_SYSCOUNT];
extern attrnames_t *attr_lookup_namespace(char *, attrnames_t **, int);
extern int attr_generic_list(struct vnode *, void *, size_t, int, ssize_t *);
#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
#define ATTR_SYSTEM 0x0100 /* use attrs in system (pseudo) namespace */
#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
#define ATTR_KERNAMELS 0x4000 /* [kernel] list attr names (simple list) */
......@@ -135,11 +163,8 @@ typedef struct attrlist_cursor_kern {
* Function prototypes for the kernel.
*========================================================================*/
struct cred;
struct vnode;
struct xfs_inode;
struct attrlist_cursor_kern;
struct xfs_ext_attr;
struct xfs_da_args;
/*
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -444,8 +444,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
< context->bufsize) {
for (i = 0, sfe = &sf->list[0];
i < INT_GET(sf->hdr.count, ARCH_CONVERT); i++) {
int ns = (sfe->flags & XFS_ATTR_ROOT)?
ROOT_NAMES : USER_NAMES;
attrnames_t *namesp;
namesp = (sfe->flags & XFS_ATTR_ROOT) ? &attr_trusted :
&attr_user;
if (((context->flags & ATTR_ROOT) != 0) !=
((sfe->flags & XFS_ATTR_ROOT) != 0) &&
!(context->flags & ATTR_KERNFULLS)) {
......@@ -454,11 +456,11 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
if (context->flags & ATTR_KERNOVAL) {
ASSERT(context->flags & ATTR_KERNAMELS);
context->count += xfs_namespaces[ns].namelen +
context->count += namesp->attr_namelen +
INT_GET(sfe->namelen, ARCH_CONVERT) + 1;
}
else {
if (xfs_attr_put_listent(context, ns,
if (xfs_attr_put_listent(context, namesp,
(char *)sfe->nameval,
(int)sfe->namelen,
(int)INT_GET(sfe->valuelen,
......@@ -544,18 +546,22 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
* Loop putting entries into the user buffer.
*/
for ( ; i < nsbuf; i++, sbp++) {
int ns = (sbp->flags & XFS_ATTR_ROOT)? ROOT_NAMES:USER_NAMES;
attrnames_t *namesp;
namesp = (sfe->flags & XFS_ATTR_ROOT) ? &attr_trusted :
&attr_user;
if (cursor->hashval != INT_GET(sbp->hash, ARCH_CONVERT)) {
cursor->hashval = INT_GET(sbp->hash, ARCH_CONVERT);
cursor->offset = 0;
}
if (context->flags & ATTR_KERNOVAL) {
ASSERT(context->flags & ATTR_KERNAMELS);
context->count += xfs_namespaces[ns].namelen
+ sbp->namelen + 1;
context->count += namesp->attr_namelen +
sbp->namelen + 1;
}
else {
if (xfs_attr_put_listent(context, ns,
if (xfs_attr_put_listent(context, namesp,
sbp->name, sbp->namelen,
INT_GET(sbp->valuelen, ARCH_CONVERT)))
break;
......@@ -2270,7 +2276,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
retval = 0;
for ( ; (i < INT_GET(leaf->hdr.count, ARCH_CONVERT))
&& (retval == 0); entry++, i++) {
int ns = (entry->flags & XFS_ATTR_ROOT)? ROOT_NAMES:USER_NAMES;
attrnames_t *namesp;
if (INT_GET(entry->hashval, ARCH_CONVERT) != cursor->hashval) {
cursor->hashval = INT_GET(entry->hashval, ARCH_CONVERT);
......@@ -2284,14 +2290,17 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
!(context->flags & ATTR_KERNFULLS))
continue; /* skip non-matching entries */
namesp = (entry->flags & XFS_ATTR_ROOT) ? &attr_trusted :
&attr_user;
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = XFS_ATTR_LEAF_NAME_LOCAL(leaf, i);
if (context->flags & ATTR_KERNOVAL) {
ASSERT(context->flags & ATTR_KERNAMELS);
context->count += xfs_namespaces[ns].namelen
+ (int)name_loc->namelen + 1;
context->count += namesp->attr_namelen +
(int)name_loc->namelen + 1;
} else {
retval = xfs_attr_put_listent(context, ns,
retval = xfs_attr_put_listent(context, namesp,
(char *)name_loc->nameval,
(int)name_loc->namelen,
(int)INT_GET(name_loc->valuelen,
......@@ -2301,10 +2310,10 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
name_rmt = XFS_ATTR_LEAF_NAME_REMOTE(leaf, i);
if (context->flags & ATTR_KERNOVAL) {
ASSERT(context->flags & ATTR_KERNAMELS);
context->count += xfs_namespaces[ns].namelen
+ (int)name_rmt->namelen + 1;
context->count += namesp->attr_namelen +
(int)name_rmt->namelen + 1;
} else {
retval = xfs_attr_put_listent(context, ns,
retval = xfs_attr_put_listent(context, namesp,
(char *)name_rmt->name,
(int)name_rmt->namelen,
(int)INT_GET(name_rmt->valuelen,
......@@ -2333,7 +2342,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
/*ARGSUSED*/
int
xfs_attr_put_listent(xfs_attr_list_context_t *context,
int ns, char *name, int namelen, int valuelen)
attrnames_t *namesp, char *name, int namelen, int valuelen)
{
attrlist_ent_t *aep;
int arraytop;
......@@ -2341,23 +2350,21 @@ xfs_attr_put_listent(xfs_attr_list_context_t *context,
ASSERT(!(context->flags & ATTR_KERNOVAL));
if (context->flags & ATTR_KERNAMELS) {
char *offset;
xattr_namespace_t *nsp;
ASSERT(context->count >= 0);
nsp = &xfs_namespaces[ns];
arraytop = context->count + nsp->namelen + namelen+1;
arraytop = context->count + namesp->attr_namelen + namelen + 1;
if (arraytop > context->firstu) {
context->count = -1; /* insufficient space */
return(1);
}
offset = (char *)context->alist + context->count;
strncpy(offset, nsp->name, nsp->namelen); /* namespace */
offset += nsp->namelen;
strncpy(offset, namesp->attr_name, namesp->attr_namelen);
offset += namesp->attr_namelen;
strncpy(offset, name, namelen); /* real name */
offset += namelen;
*offset = '\0';
context->count += nsp->namelen + namelen + 1;
context->count += namesp->attr_namelen + namelen + 1;
return(0);
}
......
/*
* Copyright (c) 2000, 2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000, 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -44,6 +44,7 @@
struct attrlist;
struct attrlist_cursor_kern;
struct attrnames;
struct xfs_dabuf;
struct xfs_da_args;
struct xfs_da_state;
......@@ -128,7 +129,7 @@ typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t;
* on the system call, they are "or"ed together for various operations.
*/
#define XFS_ATTR_LOCAL_BIT 0 /* attr is stored locally */
#define XFS_ATTR_ROOT_BIT 1 /* limit access to attr to userid 0 */
#define XFS_ATTR_ROOT_BIT 1 /* limit access to trusted attrs */
#define XFS_ATTR_INCOMPLETE_BIT 7 /* attr in middle of create/delete */
#define XFS_ATTR_LOCAL (1 << XFS_ATTR_LOCAL_BIT)
#define XFS_ATTR_ROOT (1 << XFS_ATTR_ROOT_BIT)
......@@ -299,7 +300,8 @@ int xfs_attr_leaf_newentsize(struct xfs_da_args *args, int blocksize,
int *local);
int xfs_attr_leaf_entsize(struct xfs_attr_leafblock *leaf, int index);
int xfs_attr_put_listent(struct xfs_attr_list_context *context,
int ns, char *name, int namelen, int valuelen);
struct attrnames *, char *name, int namelen,
int valuelen);
int xfs_attr_rolltrans(struct xfs_trans **transp, struct xfs_inode *dp);
#endif /* __XFS_ATTR_LEAF_H__ */
......@@ -104,14 +104,7 @@ int xfs_attr_sf_totsize(struct xfs_inode *dp);
(INT_GET(((xfs_attr_shortform_t *)((dp)->i_afp->if_u1.if_data))->hdr.totsize, ARCH_CONVERT))
#endif
#ifdef XFS_ALL_TRACE
#define XFS_ATTR_TRACE
#endif
#if !defined(DEBUG)
#undef XFS_ATTR_TRACE
#endif
#if defined(XFS_ATTR_TRACE)
/*
* Kernel tracing support for attribute lists
*/
......@@ -121,6 +114,7 @@ struct xfs_da_node_entry;
struct xfs_attr_leafblock;
#define XFS_ATTR_TRACE_SIZE 4096 /* size of global trace buffer */
extern ktrace_t *xfs_attr_trace_buf;
/*
* Trace record types.
......@@ -130,8 +124,6 @@ struct xfs_attr_leafblock;
#define XFS_ATTR_KTRACE_L_CB 3 /* context, btree */
#define XFS_ATTR_KTRACE_L_CL 4 /* context, leaf */
#if defined(XFS_ATTR_TRACE)
void xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context);
void xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
struct xfs_da_intnode *node);
......
......@@ -68,9 +68,6 @@
#include "xfs_trans_space.h"
#include "xfs_buf_item.h"
#ifdef DEBUG
ktrace_t *xfs_bmap_trace_buf;
#endif
#ifdef XFSDEBUG
STATIC void
......@@ -404,7 +401,7 @@ xfs_bmap_validate_ret(
#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
#endif /* DEBUG */
#if defined(DEBUG) && defined(XFS_RW_TRACE)
#if defined(XFS_RW_TRACE)
STATIC void
xfs_bunmap_trace(
xfs_inode_t *ip,
......@@ -414,7 +411,7 @@ xfs_bunmap_trace(
inst_t *ra);
#else
#define xfs_bunmap_trace(ip, bno, len, flags, ra)
#endif /* DEBUG && XFS_RW_TRACE */
#endif /* XFS_RW_TRACE */
STATIC int
xfs_bmap_count_tree(
......@@ -3543,6 +3540,8 @@ xfs_bmap_search_extents(
#ifdef XFS_BMAP_TRACE
ktrace_t *xfs_bmap_trace_buf;
/*
* Add a bmap trace buffer entry. Base routine for the others.
*/
......@@ -3575,14 +3574,14 @@ xfs_bmap_trace_addentry(
(void *)(__psint_t)cnt,
(void *)(__psunsigned_t)(ip->i_ino >> 32),
(void *)(__psunsigned_t)(unsigned)ip->i_ino,
(void *)(__psunsigned_t)(INT_GET(r1->l0, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l0, ARCH_CONVERT)),
(void *)(__psunsigned_t)(INT_GET(r1->l1, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l1, ARCH_CONVERT)),
(void *)(__psunsigned_t)(INT_GET(r2->l0, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l0, ARCH_CONVERT)),
(void *)(__psunsigned_t)(INT_GET(r2->l1, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l1, ARCH_CONVERT))
(void *)(__psunsigned_t)(r1->l0 >> 32),
(void *)(__psunsigned_t)(unsigned)(r1->l0),
(void *)(__psunsigned_t)(r1->l1 >> 32),
(void *)(__psunsigned_t)(unsigned)(r1->l1),
(void *)(__psunsigned_t)(r2->l0 >> 32),
(void *)(__psunsigned_t)(unsigned)(r2->l0),
(void *)(__psunsigned_t)(r2->l1 >> 32),
(void *)(__psunsigned_t)(unsigned)(r2->l1)
);
ASSERT(ip->i_xtrace);
ktrace_enter(ip->i_xtrace,
......@@ -3592,14 +3591,14 @@ xfs_bmap_trace_addentry(
(void *)(__psint_t)cnt,
(void *)(__psunsigned_t)(ip->i_ino >> 32),
(void *)(__psunsigned_t)(unsigned)ip->i_ino,
(void *)(__psunsigned_t)(INT_GET(r1->l0, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l0, ARCH_CONVERT)),
(void *)(__psunsigned_t)(INT_GET(r1->l1, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r1->l1, ARCH_CONVERT)),
(void *)(__psunsigned_t)(INT_GET(r2->l0, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l0, ARCH_CONVERT)),
(void *)(__psunsigned_t)(INT_GET(r2->l1, ARCH_CONVERT) >> 32),
(void *)(__psunsigned_t)(unsigned)(INT_GET(r2->l1, ARCH_CONVERT))
(void *)(__psunsigned_t)(r1->l0 >> 32),
(void *)(__psunsigned_t)(unsigned)(r1->l0),
(void *)(__psunsigned_t)(r1->l1 >> 32),
(void *)(__psunsigned_t)(unsigned)(r1->l1),
(void *)(__psunsigned_t)(r2->l0 >> 32),
(void *)(__psunsigned_t)(unsigned)(r2->l0),
(void *)(__psunsigned_t)(r2->l1 >> 32),
(void *)(__psunsigned_t)(unsigned)(r2->l1)
);
}
......@@ -3722,7 +3721,7 @@ xfs_bmap_worst_indlen(
return rval;
}
#if defined(DEBUG) && defined(XFS_RW_TRACE)
#if defined(XFS_RW_TRACE)
STATIC void
xfs_bunmap_trace(
xfs_inode_t *ip,
......@@ -3742,7 +3741,7 @@ xfs_bunmap_trace(
(void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
(void *)(__psint_t)len,
(void *)(__psint_t)flags,
(void *)(__psint_t)private.p_cpuid,
(void *)(unsigned long)current_cpu(),
(void *)ra,
(void *)0,
(void *)0,
......
......@@ -90,26 +90,6 @@ int xfs_bmapi_aflag(int w);
#define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL)
#define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL)
/*
* Trace operations for bmap extent tracing
*/
#define XFS_BMAP_KTRACE_DELETE 1
#define XFS_BMAP_KTRACE_INSERT 2
#define XFS_BMAP_KTRACE_PRE_UP 3
#define XFS_BMAP_KTRACE_POST_UP 4
#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */
#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */
#if defined(XFS_ALL_TRACE)
#define XFS_BMAP_TRACE
#endif
#if !defined(DEBUG)
#undef XFS_BMAP_TRACE
#endif
#if XFS_WANT_FUNCS || (XFS_WANT_SPACE && XFSSO_XFS_BMAP_INIT)
void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp);
#define XFS_BMAP_INIT(flp,fbp) xfs_bmap_init(flp,fbp)
......@@ -142,6 +122,33 @@ typedef struct xfs_bmalloca {
} xfs_bmalloca_t;
#ifdef __KERNEL__
#if defined(XFS_BMAP_TRACE)
/*
* Trace operations for bmap extent tracing
*/
#define XFS_BMAP_KTRACE_DELETE 1
#define XFS_BMAP_KTRACE_INSERT 2
#define XFS_BMAP_KTRACE_PRE_UP 3
#define XFS_BMAP_KTRACE_POST_UP 4
#define XFS_BMAP_TRACE_SIZE 4096 /* size of global trace buffer */
#define XFS_BMAP_KTRACE_SIZE 32 /* size of per-inode trace buffer */
extern ktrace_t *xfs_bmap_trace_buf;
/*
* Add bmap trace insert entries for all the contents of the extent list.
*/
void
xfs_bmap_trace_exlist(
char *fname, /* function name */
struct xfs_inode *ip, /* incore inode pointer */
xfs_extnum_t cnt, /* count of entries in list */
int whichfork); /* data or attr fork */
#else
#define xfs_bmap_trace_exlist(f,ip,c,w)
#endif
/*
* Convert inode from non-attributed to attributed.
* Must not be in a transaction, ip must not be locked.
......@@ -260,20 +267,6 @@ xfs_bmap_read_extents(
struct xfs_inode *ip, /* incore inode */
int whichfork); /* data or attr fork */
#if defined(XFS_BMAP_TRACE)
/*
* Add bmap trace insert entries for all the contents of the extent list.
*/
void
xfs_bmap_trace_exlist(
char *fname, /* function name */
struct xfs_inode *ip, /* incore inode pointer */
xfs_extnum_t cnt, /* count of entries in list */
int whichfork); /* data or attr fork */
#else
#define xfs_bmap_trace_exlist(f,ip,c,w)
#endif
/*
* Map file blocks to filesystem blocks.
* File range is given by the bno/len pair.
......@@ -375,7 +368,7 @@ xfs_bmap_eof(
int
xfs_bmap_count_blocks(
xfs_trans_t *tp,
xfs_inode_t *ip,
struct xfs_inode *ip,
int whichfork,
int *count);
......
......@@ -81,6 +81,13 @@ STATIC int xfs_bmbt_updkey(xfs_btree_cur_t *, xfs_bmbt_key_t *, int);
#if defined(XFS_BMBT_TRACE)
static char ARGS[] = "args";
static char ENTRY[] = "entry";
static char ERROR[] = "error";
#undef EXIT
static char EXIT[] = "exit";
/*
* Add a trace buffer entry for the arguments given to the routine,
* generic form.
......@@ -305,11 +312,6 @@ xfs_bmbt_trace_cursor(
xfs_bmbt_trace_argik(fname, c, i, k, __LINE__)
#define XFS_BMBT_TRACE_CURSOR(c,s) \
xfs_bmbt_trace_cursor(fname, c, s, __LINE__)
static char ARGS[] = "args";
static char ENTRY[] = "entry";
static char ERROR[] = "error";
#undef EXIT
static char EXIT[] = "exit";
#else
#define XFS_BMBT_TRACE_ARGBI(c,b,i)
#define XFS_BMBT_TRACE_ARGBII(c,b,i,j)
......
......@@ -435,6 +435,10 @@ int xfs_bmap_sanity_check(struct xfs_mount *mp, xfs_bmbt_block_t *bb,
INT_GET((bb)->bb_numrecs, ARCH_CONVERT) <= (mp)->m_bmap_dmxr[(level) != 0])
#endif
#ifdef __KERNEL__
#if defined(XFS_BMBT_TRACE)
/*
* Trace buffer entry types.
*/
......@@ -449,18 +453,9 @@ int xfs_bmap_sanity_check(struct xfs_mount *mp, xfs_bmbt_block_t *bb,
#define XFS_BMBT_TRACE_SIZE 4096 /* size of global trace buffer */
#define XFS_BMBT_KTRACE_SIZE 32 /* size of per-inode trace buffer */
#if defined(XFS_ALL_TRACE)
#define XFS_BMBT_TRACE
extern ktrace_t *xfs_bmbt_trace_buf;
#endif
#if !defined(DEBUG)
#undef XFS_BMBT_TRACE
#endif
#ifdef __KERNEL__
/*
* Prototypes for xfs_bmap.c to call.
*/
......
......@@ -181,7 +181,7 @@ extern inline xfs_caddr_t xfs_buf_offset(page_buf_t *bp, size_t offset)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp)
#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp)
#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0)
......@@ -191,13 +191,11 @@ extern inline xfs_caddr_t xfs_buf_offset(page_buf_t *bp, size_t offset)
/* setup the buffer target from a buftarg structure */
#define XFS_BUF_SET_TARGET(bp, target) \
(bp)->pb_target = (target)
(bp)->pb_target = (target)
#define XFS_BUF_TARGET(bp) ((bp)->pb_target)
#define XFS_BUFTARG_NAME(target) \
pagebuf_target_name(target)
#define XFS_BUFTARG_NAME(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target->pbr_bdev), __b); __b; })
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_SET_VTYPE(bp, type)
#define XFS_BUF_SET_REF(bp, ref)
......@@ -231,18 +229,11 @@ static inline void xfs_buf_relse(page_buf_t *bp)
pagebuf_rele(bp);
}
#define xfs_bpin(bp) pagebuf_pin(bp)
#define xfs_bunpin(bp) pagebuf_unpin(bp)
#ifdef PAGEBUF_TRACE
# define PB_DEFINE_TRACES
# include <pagebuf/page_buf_trace.h>
# define xfs_buftrace(id, bp) PB_TRACE(bp, PB_TRACE_REC(external), (void *)id)
#else
# define xfs_buftrace(id, bp) do { } while (0)
#endif
#define xfs_buftrace(id, bp) \
pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
#define xfs_biodone(pb) \
pagebuf_iodone(pb, (pb->pb_flags & PBF_FS_DATAIOD), 0)
......
......@@ -1207,13 +1207,14 @@ xfs_buf_item_trace(
(void *)((unsigned long)bip->bli_flags),
(void *)((unsigned long)bip->bli_recur),
(void *)((unsigned long)atomic_read(&bip->bli_refcount)),
(void *)XFS_BUF_ADDR(bp),
(void *)((unsigned long)
(0xFFFFFFFF & XFS_BUF_ADDR(bp) >> 32)),
(void *)((unsigned long)(0xFFFFFFFF & XFS_BUF_ADDR(bp))),
(void *)((unsigned long)XFS_BUF_COUNT(bp)),
(void *)((unsigned long)(0xFFFFFFFF & (XFS_BFLAGS(bp) >> 32))),
(void *)((unsigned long)(0xFFFFFFFF & XFS_BFLAGS(bp))),
(void *)((unsigned long)XFS_BUF_BFLAGS(bp)),
XFS_BUF_FSPRIVATE(bp, void *),
XFS_BUF_FSPRIVATE2(bp, void *),
(void *)((unsigned long)bp->b_pincount),
(void *)(unsigned long)XFS_BUF_ISPINNED(bp),
(void *)XFS_BUF_IODONE_FUNC(bp),
(void *)((unsigned long)(XFS_BUF_VALUSEMA(bp))),
(void *)bip->bli_item.li_desc,
......
......@@ -104,6 +104,15 @@ typedef struct xfs_buf_log_format_t {
struct xfs_buf;
struct ktrace;
struct xfs_mount;
struct xfs_buf_log_item;
#if defined(XFS_BLI_TRACE)
#define XFS_BLI_TRACE_SIZE 32
void xfs_buf_item_trace(char *, struct xfs_buf_log_item *);
#else
#define xfs_buf_item_trace(id, bip)
#endif
/*
* This is the in core log item structure used to track information
......@@ -116,7 +125,7 @@ typedef struct xfs_buf_log_item {
unsigned int bli_flags; /* misc flags */
unsigned int bli_recur; /* lock recursion count */
atomic_t bli_refcount; /* cnt of tp refs */
#ifdef DEBUG
#ifdef XFS_BLI_TRACE
struct ktrace *bli_trace; /* event trace buf */
#endif
#ifdef XFS_TRANS_DEBUG
......@@ -137,23 +146,6 @@ typedef struct xfs_buf_cancel {
struct xfs_buf_cancel *bc_next;
} xfs_buf_cancel_t;
#define XFS_BLI_TRACE_SIZE 32
#if defined(XFS_ALL_TRACE)
#define XFS_BLI_TRACE
#endif
#if !defined(DEBUG)
#undef XFS_BLI_TRACE
#endif
#if defined(XFS_BLI_TRACE)
void xfs_buf_item_trace(char *, xfs_buf_log_item_t *);
#else
#define xfs_buf_item_trace(id, bip)
#endif
void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
void xfs_buf_item_relse(struct xfs_buf *);
void xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
......
......@@ -66,13 +66,6 @@
#include "xfs_error.h"
#include "xfs_bit.h"
#if defined(XFSDEBUG) && defined(CONFIG_KDB)
#undef xfs_buftrace
#define xfs_buftrace(A,B) \
printk(" xfs_buftrace : %s (0x%p)\n", A, B); \
BUG();
#endif
/*
* xfs_da_btree.c
*
......
......@@ -153,12 +153,12 @@ xfs_swapext(
if ((error = _MAC_XFS_IACCESS(tip, MACWRITE, NULL))) {
goto error0;
}
if ((current->fsuid != ip->i_d.di_uid) &&
if ((current_fsuid(cred) != ip->i_d.di_uid) &&
(error = xfs_iaccess(ip, S_IWUSR, NULL)) &&
!capable_cred(NULL, CAP_FOWNER)) {
goto error0;
}
if ((current->fsuid != tip->i_d.di_uid) &&
if ((current_fsuid(cred) != tip->i_d.di_uid) &&
(error = xfs_iaccess(tip, S_IWUSR, NULL)) &&
!capable_cred(NULL, CAP_FOWNER)) {
goto error0;
......
......@@ -1093,10 +1093,10 @@ void
xfs_dir_trace_g_du(char *where, xfs_inode_t *dp, uio_t *uio)
{
xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DU, where,
(__psunsigned_t)dp, (__psunsigned_t)dp->i_mount,
(__psunsigned_t)(uio->uio_offset >> 32),
(__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF),
(__psunsigned_t)uio->uio_resid,
(void *)dp, (void *)dp->i_mount,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
NULL, NULL, NULL, NULL, NULL, NULL, NULL);
}
......@@ -1107,11 +1107,11 @@ void
xfs_dir_trace_g_dub(char *where, xfs_inode_t *dp, uio_t *uio, xfs_dablk_t bno)
{
xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUB, where,
(__psunsigned_t)dp, (__psunsigned_t)dp->i_mount,
(__psunsigned_t)(uio->uio_offset >> 32),
(__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF),
(__psunsigned_t)uio->uio_resid,
(__psunsigned_t)bno,
(void *)dp, (void *)dp->i_mount,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
(void *)(unsigned long)bno,
NULL, NULL, NULL, NULL, NULL, NULL);
}
......@@ -1122,15 +1122,21 @@ void
xfs_dir_trace_g_dun(char *where, xfs_inode_t *dp, uio_t *uio,
xfs_da_intnode_t *node)
{
int last = INT_GET(node->hdr.count, ARCH_CONVERT) - 1;
xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUN, where,
(__psunsigned_t)dp, (__psunsigned_t)dp->i_mount,
(__psunsigned_t)(uio->uio_offset >> 32),
(__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF),
(__psunsigned_t)uio->uio_resid,
(__psunsigned_t)INT_GET(node->hdr.info.forw, ARCH_CONVERT),
(__psunsigned_t)INT_GET(node->hdr.count, ARCH_CONVERT),
(__psunsigned_t)INT_GET(node->btree[0].hashval, ARCH_CONVERT),
(__psunsigned_t)INT_GET(node->btree[INT_GET(node->hdr.count, ARCH_CONVERT)-1].hashval, ARCH_CONVERT),
(void *)dp, (void *)dp->i_mount,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
(void *)(unsigned long)
INT_GET(node->hdr.info.forw, ARCH_CONVERT),
(void *)(unsigned long)
INT_GET(node->hdr.count, ARCH_CONVERT),
(void *)(unsigned long)
INT_GET(node->btree[0].hashval, ARCH_CONVERT),
(void *)(unsigned long)
INT_GET(node->btree[last].hashval, ARCH_CONVERT),
NULL, NULL, NULL);
}
......@@ -1141,15 +1147,21 @@ void
xfs_dir_trace_g_dul(char *where, xfs_inode_t *dp, uio_t *uio,
xfs_dir_leafblock_t *leaf)
{
int last = INT_GET(leaf->hdr.count, ARCH_CONVERT) - 1;
xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUL, where,
(__psunsigned_t)dp, (__psunsigned_t)dp->i_mount,
(__psunsigned_t)(uio->uio_offset >> 32),
(__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF),
(__psunsigned_t)uio->uio_resid,
(__psunsigned_t)INT_GET(leaf->hdr.info.forw, ARCH_CONVERT),
(__psunsigned_t)INT_GET(leaf->hdr.count, ARCH_CONVERT),
(__psunsigned_t)INT_GET(leaf->entries[0].hashval, ARCH_CONVERT),
(__psunsigned_t)INT_GET(leaf->entries[ INT_GET(leaf->hdr.count, ARCH_CONVERT)-1 ].hashval, ARCH_CONVERT),
(void *)dp, (void *)dp->i_mount,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
(void *)(unsigned long)
INT_GET(leaf->hdr.info.forw, ARCH_CONVERT),
(void *)(unsigned long)
INT_GET(leaf->hdr.count, ARCH_CONVERT),
(void *)(unsigned long)
INT_GET(leaf->entries[0].hashval, ARCH_CONVERT),
(void *)(unsigned long)
INT_GET(leaf->entries[last].hashval, ARCH_CONVERT),
NULL, NULL, NULL);
}
......@@ -1161,11 +1173,12 @@ xfs_dir_trace_g_due(char *where, xfs_inode_t *dp, uio_t *uio,
xfs_dir_leaf_entry_t *entry)
{
xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUE, where,
(__psunsigned_t)dp, (__psunsigned_t)dp->i_mount,
(__psunsigned_t)(uio->uio_offset >> 32),
(__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF),
(__psunsigned_t)uio->uio_resid,
(__psunsigned_t)INT_GET(entry->hashval, ARCH_CONVERT),
(void *)dp, (void *)dp->i_mount,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
(void *)(unsigned long)
INT_GET(entry->hashval, ARCH_CONVERT),
NULL, NULL, NULL, NULL, NULL, NULL);
}
......@@ -1176,12 +1189,12 @@ void
xfs_dir_trace_g_duc(char *where, xfs_inode_t *dp, uio_t *uio, xfs_off_t cookie)
{
xfs_dir_trace_enter(XFS_DIR_KTRACE_G_DUC, where,
(__psunsigned_t)dp, (__psunsigned_t)dp->i_mount,
(__psunsigned_t)(uio->uio_offset >> 32),
(__psunsigned_t)(uio->uio_offset & 0xFFFFFFFF),
(__psunsigned_t)uio->uio_resid,
(__psunsigned_t)(cookie >> 32),
(__psunsigned_t)(cookie & 0xFFFFFFFF),
(void *)dp, (void *)dp->i_mount,
(void *)((unsigned long)(uio->uio_offset >> 32)),
(void *)((unsigned long)(uio->uio_offset & 0xFFFFFFFF)),
(void *)(unsigned long)uio->uio_resid,
(void *)((unsigned long)(cookie >> 32)),
(void *)((unsigned long)(cookie & 0xFFFFFFFF)),
NULL, NULL, NULL, NULL, NULL);
}
......@@ -1191,15 +1204,15 @@ xfs_dir_trace_g_duc(char *where, xfs_inode_t *dp, uio_t *uio, xfs_off_t cookie)
*/
void
xfs_dir_trace_enter(int type, char *where,
__psunsigned_t a0, __psunsigned_t a1,
__psunsigned_t a2, __psunsigned_t a3,
__psunsigned_t a4, __psunsigned_t a5,
__psunsigned_t a6, __psunsigned_t a7,
__psunsigned_t a8, __psunsigned_t a9,
__psunsigned_t a10, __psunsigned_t a11)
void * a0, void * a1,
void * a2, void * a3,
void * a4, void * a5,
void * a6, void * a7,
void * a8, void * a9,
void * a10, void * a11)
{
ASSERT(xfs_dir_trace_buf);
ktrace_enter(xfs_dir_trace_buf, (void *)((__psunsigned_t)type),
ktrace_enter(xfs_dir_trace_buf, (void *)(unsigned long)type,
(void *)where,
(void *)a0, (void *)a1, (void *)a2,
(void *)a3, (void *)a4, (void *)a5,
......
......@@ -43,14 +43,6 @@
* as possible so as to fit into the literal area of the inode.
*/
#ifdef XFS_ALL_TRACE
#define XFS_DIR_TRACE
#endif
#if !defined(DEBUG)
#undef XFS_DIR_TRACE
#endif
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
......
This diff is collapsed.
......@@ -36,20 +36,15 @@
* Tracing for xfs v2 directories.
*/
#if defined(XFS_DIR2_TRACE)
struct ktrace;
struct xfs_dabuf;
struct xfs_da_args;
#ifdef XFS_ALL_TRACE
#define XFS_DIR2_TRACE
#endif /* XFS_ALL_TRACE */
#if !defined(DEBUG)
#undef XFS_DIR2_TRACE
#endif /* !DEBUG */
#define XFS_DIR2_GTRACE_SIZE 4096 /* global buffer */
#define XFS_DIR2_KTRACE_SIZE 32 /* per-inode buffer */
extern struct ktrace *xfs_dir2_trace_buf;
#define XFS_DIR2_KTRACE_ARGS 1 /* args only */
#define XFS_DIR2_KTRACE_ARGS_B 2 /* args + buffer */
......@@ -60,8 +55,6 @@ struct xfs_da_args;
#define XFS_DIR2_KTRACE_ARGS_SB 7 /* args, int, buffer */
#define XFS_DIR2_KTRACE_ARGS_BIBII 8 /* args, buf/int/buf/int/int */
#ifdef XFS_DIR2_TRACE
void xfs_dir2_trace_args(char *where, struct xfs_da_args *args);
void xfs_dir2_trace_args_b(char *where, struct xfs_da_args *args,
struct xfs_dabuf *bp);
......@@ -90,6 +83,4 @@ void xfs_dir2_trace_args_sb(char *where, struct xfs_da_args *args, int s,
#endif /* XFS_DIR2_TRACE */
extern struct ktrace *xfs_dir2_trace_buf;
#endif /* __XFS_DIR2_TRACE_H__ */
This diff is collapsed.
......@@ -175,14 +175,6 @@ typedef enum {
DM_FLAGS_NDELAY : 0)
#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
/*
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
extern struct bhv_vfsops xfs_dmops;
......
......@@ -43,8 +43,6 @@
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#ifndef CONFIG_XFS_DMAPI
xfs_dmops_t xfs_dmcore_xfs = {
.xfs_send_data = (xfs_send_data_t)fs_nosys,
.xfs_send_mmap = (xfs_send_mmap_t)fs_noerr,
......@@ -52,4 +50,3 @@ xfs_dmops_t xfs_dmcore_xfs = {
.xfs_send_namesp = (xfs_send_namesp_t)fs_nosys,
.xfs_send_unmount = (xfs_send_unmount_t)fs_noval,
};
#endif /* CONFIG_XFS_DMAPI */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -61,6 +61,7 @@
#include "xfs_rw.h"
#include "xfs_quota.h"
#include "xfs_trans_space.h"
#include "xfs_iomap.h"
STATIC xfs_fsize_t
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment