Commit 07c65489 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Nathan Scott

[XFS] Separate the quota source into its own subdirectory ala dmapi.

Push a bunch of quota- and dmapi-specific code down into these
subdirs which previously was compiled into the core XFS code,
and don't descend into these subdirs if options config'd off.

SGI Modid: 2.5.x-xfs:slinx:141850a
parent 07f08be5
......@@ -29,8 +29,6 @@
#
# http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
#
# Makefile for XFS on Linux.
#
EXTRA_CFLAGS += -Ifs/xfs -funsigned-char
......@@ -44,18 +42,22 @@ endif
obj-$(CONFIG_XFS_FS) += xfs.o
xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
xfs-$(CONFIG_XFS_QUOTA) += $(addprefix quota/, \
xfs_dquot.o \
xfs_dquot_item.o \
xfs_trans_dquot.o \
xfs_qm_syscalls.o \
xfs_qmops.o \
xfs_qm.o
xfs_qm_bhv.o \
xfs_qm.o)
ifeq ($(CONFIG_XFS_QUOTA),y)
xfs-$(CONFIG_PROC_FS) += quota/xfs_qm_stats.o
endif
xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-$(CONFIG_FS_POSIX_CAP) += xfs_cap.o
xfs-$(CONFIG_FS_POSIX_MAC) += xfs_mac.o
xfs-$(CONFIG_XFS_POSIX_CAP) += xfs_cap.o
xfs-$(CONFIG_XFS_POSIX_MAC) += xfs_mac.o
xfs-$(CONFIG_PROC_FS) += linux/xfs_stats.o
xfs-$(CONFIG_SYSCTL) += linux/xfs_sysctl.o
......@@ -137,6 +139,10 @@ xfs-y += $(addprefix support/, \
qsort.o \
uuid.o)
# Quota and DMAPI stubs
xfs-y += xfs_dmops.o \
xfs_qmops.o
# If both xfs and kdb modules are built in then xfsidbg is built in. If xfs is
# a module and kdb modules are being compiled then xfsidbg must be a module, to
# follow xfs. If xfs is built in then xfsidbg tracks the kdb module state.
......
......@@ -299,7 +299,9 @@ linvfs_file_mmap(
int error;
if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
error = -xfs_dm_send_mmap_event(vma, 0);
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
error = -XFS_SEND_MMAP(mp, vma, 0);
if (error)
return error;
}
......@@ -345,8 +347,10 @@ linvfs_mprotect(
if ((vp->v_type == VREG) && (vp->v_vfsp->vfs_flag & VFS_DMI)) {
if ((vma->vm_flags & VM_MAYSHARE) &&
(newflags & PROT_WRITE) && !(vma->vm_flags & PROT_WRITE)){
error = xfs_dm_send_mmap_event(vma, VM_WRITE);
(newflags & PROT_WRITE) && !(vma->vm_flags & PROT_WRITE)) {
xfs_mount_t *mp = XFS_VFSTOM(vp->v_vfsp);
error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
}
}
return error;
......
......@@ -58,17 +58,7 @@ spinlock_t Atomic_spin = SPIN_LOCK_UNLOCKED;
*/
cred_t sys_cred_val, *sys_cred = &sys_cred_val;
/*
* The global quota manager. There is only one of these for the entire
* system, _not_ one per file system. XQM keeps track of the overall
* quota functionality, including maintaining the freelist and hash
* tables of dquots.
*/
struct xfs_qm *xfs_Gqm;
mutex_t xfs_Gqm_lock;
/* Export XFS symbols used by xfsidbg */
EXPORT_SYMBOL(xfs_Gqm);
EXPORT_SYMBOL(xfs_next_bit);
EXPORT_SYMBOL(xfs_contig_bits);
EXPORT_SYMBOL(xfs_bmbt_get_all);
......
......@@ -43,7 +43,4 @@ extern unsigned long xfs_physmem;
extern struct cred *sys_cred;
extern struct xfs_qm *xfs_Gqm;
extern mutex_t xfs_Gqm_lock;
#endif /* __XFS_GLOBALS_H__ */
......@@ -258,11 +258,10 @@ xfs_iomap_write_direct(
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
error = XFS_QM_DQATTACH(ip->i_mount, ip, XFS_QMOPT_ILOCKED);
if (error)
return XFS_ERROR(error);
}
}
maps = min(XFS_WRITE_IMAPS, *nmaps);
nimaps = maps;
......@@ -291,7 +290,7 @@ xfs_iomap_write_direct(
* determine if reserving space on
* the data or realtime partition.
*/
if ((rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
if ((rt = XFS_IS_REALTIME_INODE(ip))) {
int sbrtextsize, iprtextsize;
sbrtextsize = mp->m_sb.sb_rextsize;
......@@ -333,12 +332,10 @@ xfs_iomap_write_direct(
goto error_out; /* Don't return in above if .. trans ..,
need lock to return */
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_blkquota(tp, ip, resblks)) {
if (XFS_TRANS_RESERVE_BLKQUOTA(mp, tp, ip, resblks)) {
error = (EDQUOT);
goto error1;
}
}
nimaps = 1;
bmapi_flag = XFS_BMAPI_WRITE;
......@@ -422,11 +419,9 @@ xfs_iomap_write_delay(
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
if (error)
return XFS_ERROR(error);
}
}
retry:
isize = ip->i_d.di_size;
......@@ -538,11 +533,8 @@ xfs_iomap_write_allocate(
* Make sure that the dquots are there.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0))) {
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return XFS_ERROR(error);
}
}
offset_fsb = map->br_startoff;
count_fsb = map->br_blockcount;
......
......@@ -121,7 +121,8 @@ xfs_read(
xfs_mount_t *mp;
vnode_t *vp;
unsigned long seg;
int direct = filp->f_flags & O_DIRECT;
int direct = (filp->f_flags & O_DIRECT);
int invisible = (filp->f_mode & FINVIS);
ip = XFS_BHVTOI(bdp);
vp = BHV_TO_VNODE(bdp);
......@@ -180,13 +181,12 @@ xfs_read(
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
!(filp->f_mode & FINVIS)) {
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && !invisible) {
int error;
vrwlock_t locktype = VRWLOCK_READ;
error = xfs_dm_send_data_event(DM_EVENT_READ, bdp, *offp,
size, FILP_DELAY_FLAG(filp), &locktype);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offp, size,
FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
......@@ -198,7 +198,7 @@ xfs_read(
XFS_STATS_ADD(xfsstats.xs_read_bytes, ret);
if (!(filp->f_mode & FINVIS))
if (!invisible)
xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
return ret;
......@@ -217,11 +217,13 @@ xfs_sendfile(
ssize_t ret;
xfs_fsize_t n;
xfs_inode_t *ip;
xfs_mount_t *mp;
vnode_t *vp;
int invisible = (filp->f_mode & FINVIS);
ip = XFS_BHVTOI(bdp);
vp = BHV_TO_VNODE(bdp);
mp = ip->i_mount;
vn_trace_entry(vp, "xfs_sendfile", (inst_t *)__return_address);
XFS_STATS_INC(xfsstats.xs_read_calls);
......@@ -241,8 +243,8 @@ xfs_sendfile(
vrwlock_t locktype = VRWLOCK_READ;
int error;
error = xfs_dm_send_data_event(DM_EVENT_READ, bdp, *offp,
count, FILP_DELAY_FLAG(filp), &locktype);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offp, count,
FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
......@@ -493,7 +495,8 @@ xfs_write(
vnode_t *vp;
unsigned long seg;
int iolock;
int direct = file->f_flags & O_DIRECT;
int direct = (file->f_flags & O_DIRECT);
int invisible = (file->f_mode & FINVIS);
int eventsent = 0;
vrwlock_t locktype;
......@@ -573,11 +576,11 @@ xfs_write(
}
if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
!(file->f_mode & FINVIS) && !eventsent)) {
!invisible && !eventsent)) {
loff_t savedsize = *offset;
xfs_iunlock(xip, XFS_ILOCK_EXCL);
error = xfs_dm_send_data_event(DM_EVENT_WRITE, bdp,
error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, bdp,
*offset, size,
FILP_DELAY_FLAG(file), &locktype);
if (error) {
......@@ -588,12 +591,11 @@ xfs_write(
eventsent = 1;
/*
* The iolock was dropped and reaquired in
* xfs_dm_send_data_event so we have to recheck the size
* when appending. We will only "goto start;" once,
* since having sent the event prevents another call
* to xfs_dm_send_data_event, which is what
* allows the size to change in the first place.
* The iolock was dropped and reaquired in XFS_SEND_DATA
* so we have to recheck the size when appending.
* We will only "goto start;" once, since having sent the
* event prevents another call to XFS_SEND_DATA, which is
* what allows the size to change in the first place.
*/
if ((file->f_flags & O_APPEND) &&
savedsize != xip->i_d.di_size) {
......@@ -608,10 +610,8 @@ xfs_write(
*
* We must update xfs' times since revalidate will overcopy xfs.
*/
if (size) {
if (!(file->f_mode & FINVIS))
if (size && !invisible)
xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
}
/*
* If the offset is beyond the size of the file, we have a couple
......@@ -658,11 +658,10 @@ xfs_write(
ret = generic_file_write_nolock(file, iovp, segs, offset);
if ((ret == -ENOSPC) &&
DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
!(file->f_mode & FINVIS)) {
DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && !invisible) {
xfs_rwunlock(bdp, locktype);
error = dm_send_namesp_event(DM_EVENT_NOSPACE, bdp,
error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, bdp,
DM_RIGHT_NULL, bdp, DM_RIGHT_NULL, NULL, NULL,
0, 0, 0); /* Delay flag intentionally unused */
if (error)
......
/*
* Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -61,7 +61,6 @@ xfs_read_xfsstats(
{ "xstrat", XFSSTAT_END_WRITE_CONVERT },
{ "rw", XFSSTAT_END_READ_WRITE_OPS },
{ "attr", XFSSTAT_END_ATTRIBUTE_OPS },
{ "qm", XFSSTAT_END_QUOTA_OPS },
{ "icluster", XFSSTAT_END_INODE_CLUSTER },
{ "vnodes", XFSSTAT_END_VNODE_OPS },
};
......@@ -95,50 +94,17 @@ xfs_read_xfsstats(
return len;
}
STATIC int
xfs_read_xfsquota(
char *buffer,
char **start,
off_t offset,
int count,
int *eof,
void *data)
{
int len;
/* maximum; incore; ratio free to inuse; freelist */
len = sprintf(buffer, "%d\t%d\t%d\t%u\n",
ndquot,
xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
if (offset >= len) {
*start = buffer;
*eof = 1;
return 0;
}
*start = buffer + offset;
if ((len -= offset) > count)
return count;
*eof = 1;
return len;
}
void
xfs_init_procfs(void)
{
if (!proc_mkdir("fs/xfs", 0))
return;
create_proc_read_entry("fs/xfs/stat", 0, 0, xfs_read_xfsstats, NULL);
create_proc_read_entry("fs/xfs/xqm", 0, 0, xfs_read_xfsquota, NULL);
}
void
xfs_cleanup_procfs(void)
{
remove_proc_entry("fs/xfs/stat", NULL);
remove_proc_entry("fs/xfs/xqm", NULL);
remove_proc_entry("fs/xfs", NULL);
}
......@@ -107,16 +107,7 @@ struct xfsstats {
__uint32_t xs_attr_set;
__uint32_t xs_attr_remove;
__uint32_t xs_attr_list;
# define XFSSTAT_END_QUOTA_OPS (XFSSTAT_END_ATTRIBUTE_OPS+8)
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
__uint32_t xs_qm_dqcachemisses;
__uint32_t xs_qm_dqcachehits;
__uint32_t xs_qm_dqwants;
__uint32_t xs_qm_dqshake_reclaims;
__uint32_t xs_qm_dqinact_reclaims;
# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_QUOTA_OPS+3)
# define XFSSTAT_END_INODE_CLUSTER (XFSSTAT_END_ATTRIBUTE_OPS+3)
__uint32_t xs_iflush_count;
__uint32_t xs_icluster_flushcnt;
__uint32_t xs_icluster_flushinode;
......
......@@ -953,33 +953,27 @@ init_xfs_fs( void )
error = init_inodecache();
if (error < 0)
goto undo_inodecache;
error = pagebuf_init();
if (error < 0)
goto undo_pagebuf;
vn_init();
xfs_init();
error = vfs_initdmapi();
if (error < 0)
goto undo_dmapi;
error = vfs_initquota();
if (error < 0)
goto undo_quota;
vfs_initdmapi();
vfs_initquota();
error = register_filesystem(&xfs_fs_type);
if (error)
goto undo_fs;
goto undo_register;
return 0;
undo_fs:
vfs_exitquota();
undo_quota:
vfs_exitdmapi();
undo_dmapi:
undo_register:
pagebuf_terminate();
undo_pagebuf:
destroy_inodecache();
undo_inodecache:
return error;
}
......
......@@ -32,42 +32,38 @@
#ifndef __XFS_SUPER_H__
#define __XFS_SUPER_H__
#ifdef CONFIG_XFS_POSIX_ACL
# define XFS_ACL_STRING "ACLs, "
# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL)
#else
# define XFS_ACL_STRING
# define set_posix_acl_flag(sb) do { } while (0)
#endif
#ifdef CONFIG_XFS_DMAPI
# define XFS_DMAPI_STRING "DMAPI, "
# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops_xfs)
# define vfs_initdmapi() (0) /* temporarily */
# define vfs_exitdmapi() do { } while (0) /* temporarily */
# define vfs_insertdmapi(vfs) vfs_insertops(vfsp, &xfs_dmops)
# define vfs_initdmapi() xfs_dm_init()
# define vfs_exitdmapi() xfs_dm_exit()
#else
# define XFS_DMAPI_STRING
# define vfs_insertdmapi(vfs) do { } while (0)
# define vfs_initdmapi() (0)
# define vfs_initdmapi() do { } while (0)
# define vfs_exitdmapi() do { } while (0)
#endif
#ifdef CONFIG_XFS_QUOTA
# define XFS_QUOTA_STRING "quota, "
# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops_xfs)
# define vfs_initquota() (0) /* temporarily */
# define vfs_exitquota() do { } while (0) /* temporarily */
# define vfs_insertquota(vfs) vfs_insertops(vfsp, &xfs_qmops)
# define vfs_initquota() xfs_qm_init()
# define vfs_exitquota() xfs_qm_exit()
#else
# define XFS_QUOTA_STRING
# define vfs_insertquota(vfs) do { } while (0)
# define vfs_initquota() (0)
# define vfs_initquota() do { } while (0)
# define vfs_exitquota() do { } while (0)
#endif
#ifdef CONFIG_XFS_POSIX_ACL
# define XFS_ACL_STRING "ACLs, "
# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL)
#else
# define XFS_ACL_STRING
# define set_posix_acl_flag(sb) do { } while (0)
#endif
#ifdef CONFIG_XFS_RT
# define XFS_RT_STRING "realtime, "
# define XFS_REALTIME_STRING "realtime, "
#else
# define XFS_RT_STRING
# define XFS_REALTIME_STRING
#endif
#ifdef CONFIG_XFS_VNODE_TRACING
......@@ -82,9 +78,9 @@
# define XFS_DBG_STRING "no debug"
#endif
#define XFS_BUILD_OPTIONS XFS_ACL_STRING XFS_DMAPI_STRING \
XFS_RT_STRING \
XFS_QUOTA_STRING XFS_VNTRACE_STRING \
#define XFS_BUILD_OPTIONS XFS_ACL_STRING \
XFS_REALTIME_STRING \
XFS_VNTRACE_STRING \
XFS_DBG_STRING /* DBG must be last */
#define LINVFS_GET_VFS(s) \
......
......@@ -222,7 +222,7 @@ vfs_deallocate(
void
vfs_insertops(
struct vfs *vfsp,
struct vfsops *vfsops)
struct bhv_vfsops *vfsops)
{
struct bhv_desc *bdp;
......
......@@ -165,9 +165,19 @@ extern int vfs_quotactl(bhv_desc_t *, int, int, caddr_t);
extern void vfs_init_vnode(bhv_desc_t *, struct vnode *, bhv_desc_t *, int);
extern void vfs_force_shutdown(bhv_desc_t *, int, char *, int);
typedef struct bhv_vfsops {
struct vfsops bhv_common;
void * bhv_custom;
} bhv_vfsops_t;
#define vfs_bhv_lookup(v, id) ( bhv_lookup_range(&(v)->vfs_bh, (id), (id)) )
#define vfs_bhv_custom(b) ( ((bhv_vfsops_t *)BHV_OPS(b))->bhv_custom )
#define vfs_bhv_set_custom(b,o) ( (b)->bhv_custom = (void *)(o))
#define vfs_bhv_clr_custom(b) ( (b)->bhv_custom = NULL )
extern vfs_t *vfs_allocate(void);
extern void vfs_deallocate(vfs_t *);
extern void vfs_insertops(vfs_t *, vfsops_t *);
extern void vfs_insertops(vfs_t *, bhv_vfsops_t *);
extern void vfs_insertbhv(vfs_t *, bhv_desc_t *, vfsops_t *, void *);
extern void bhv_insert_all_vfsops(struct vfs *);
......
......@@ -146,6 +146,7 @@ XFS_DQ_IS_LOCKED(xfs_dquot_t *dqp)
}
#endif
/*
* The following three routines simply manage the q_flock
* semaphore embedded in the dquot. This semaphore synchronizes
......@@ -197,7 +198,6 @@ extern void xfs_qm_dqprint(xfs_dquot_t *);
#define xfs_qm_dqprint(a)
#endif
extern xfs_dquot_t *xfs_qm_dqinit(xfs_mount_t *, xfs_dqid_t, uint);
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
extern int xfs_qm_dqpurge(xfs_dquot_t *, uint);
......@@ -208,5 +208,13 @@ extern void xfs_qm_dqflock_pushbuf_wait(xfs_dquot_t *dqp);
extern void xfs_qm_adjust_dqtimers(xfs_mount_t *,
xfs_disk_dquot_t *);
extern int xfs_qm_dqwarn(xfs_disk_dquot_t *, uint);
extern int xfs_qm_dqget(xfs_mount_t *, xfs_inode_t *,
xfs_dqid_t, uint, uint, xfs_dquot_t **);
extern void xfs_qm_dqput(xfs_dquot_t *);
extern void xfs_qm_dqrele(xfs_dquot_t *);
extern void xfs_dqlock(xfs_dquot_t *);
extern void xfs_dqlock2(xfs_dquot_t *, xfs_dquot_t *);
extern void xfs_dqunlock(xfs_dquot_t *);
extern void xfs_dqunlock_nonotify(xfs_dquot_t *);
#endif /* __XFS_DQUOT_H__ */
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -31,7 +31,7 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
/*
......
/*
* Copyright (c) 2000-2001 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -32,58 +32,22 @@
#ifndef __XFS_DQUOT_ITEM_H__
#define __XFS_DQUOT_ITEM_H__
/*
* These are the structures used to lay out dquots and quotaoff
* records on the log. Quite similar to those of inodes.
*/
/*
* log format struct for dquots.
* The first two fields must be the type and size fitting into
* 32 bits : log_recovery code assumes that.
*/
typedef struct xfs_dq_logformat {
__uint16_t qlf_type; /* dquot log item type */
__uint16_t qlf_size; /* size of this item */
xfs_dqid_t qlf_id; /* usr/grp id number : 32 bits */
__int64_t qlf_blkno; /* blkno of dquot buffer */
__int32_t qlf_len; /* len of dquot buffer */
__uint32_t qlf_boffset; /* off of dquot in buffer */
} xfs_dq_logformat_t;
/*
* log format struct for QUOTAOFF records.
* The first two fields must be the type and size fitting into
* 32 bits : log_recovery code assumes that.
* We write two LI_QUOTAOFF logitems per quotaoff, the last one keeps a pointer
* to the first and ensures that the first logitem is taken out of the AIL
* only when the last one is securely committed.
*/
typedef struct xfs_qoff_logformat {
unsigned short qf_type; /* quotaoff log item type */
unsigned short qf_size; /* size of this item */
unsigned int qf_flags; /* USR and/or GRP */
char qf_pad[12]; /* padding for future */
} xfs_qoff_logformat_t;
#ifdef __KERNEL__
struct xfs_dquot;
struct xfs_trans;
struct xfs_mount;
struct xfs_qoff_logitem;
typedef struct xfs_dq_logitem {
xfs_log_item_t qli_item; /* common portion */
struct xfs_dquot *qli_dquot; /* dquot ptr */
xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
unsigned short qli_pushbuf_flag; /* one bit used in push_ail */
unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */
#ifdef DEBUG
uint64_t qli_push_owner;
#endif
xfs_dq_logformat_t qli_format; /* logged structure */
} xfs_dq_logitem_t;
typedef struct xfs_qoff_logitem {
xfs_log_item_t qql_item; /* common portion */
struct xfs_qoff_logitem *qql_start_lip; /* qoff-start logitem, if any */
......@@ -93,12 +57,10 @@ typedef struct xfs_qoff_logitem {
extern void xfs_qm_dquot_logitem_init(struct xfs_dquot *);
extern xfs_qoff_logitem_t *xfs_qm_qoff_logitem_init(struct xfs_mount *,
xfs_qoff_logitem_t *, uint);
struct xfs_qoff_logitem *, uint);
extern xfs_qoff_logitem_t *xfs_trans_get_qoff_item(struct xfs_trans *,
xfs_qoff_logitem_t *, uint);
struct xfs_qoff_logitem *, uint);
extern void xfs_trans_log_quotaoff_item(struct xfs_trans *,
xfs_qoff_logitem_t *);
#endif /* __KERNEL__ */
struct xfs_qoff_logitem *);
#endif /* __XFS_DQUOT_ITEM_H__ */
This diff is collapsed.
......@@ -32,10 +32,16 @@
#ifndef __XFS_QM_H__
#define __XFS_QM_H__
struct xfs_dqhash;
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
#include "xfs_quota_priv.h"
#include "xfs_qm_stats.h"
struct xfs_qm;
struct xfs_inode;
struct xfs_dquot;
extern mutex_t xfs_Gqm_lock;
extern struct xfs_qm *xfs_Gqm;
extern kmem_zone_t *qm_dqzone;
extern kmem_zone_t *qm_dqtrxzone;
......@@ -136,24 +142,13 @@ typedef struct xfs_quotainfo {
} xfs_quotainfo_t;
/*
* The structure kept inside the xfs_trans_t keep track of dquot changes
* within a transaction and apply them later.
*/
typedef struct xfs_dqtrx {
struct xfs_dquot *qt_dquot; /* the dquot this refers to */
ulong qt_blk_res; /* blks reserved on a dquot */
ulong qt_blk_res_used; /* blks used from the reservation */
ulong qt_ino_res; /* inode reserved on a dquot */
ulong qt_ino_res_used; /* inodes used from the reservation */
long qt_bcount_delta; /* dquot blk count changes */
long qt_delbcnt_delta; /* delayed dquot blk count changes */
long qt_icount_delta; /* dquot inode count changes */
ulong qt_rtblk_res; /* # blks reserved on a dquot */
ulong qt_rtblk_res_used;/* # blks used from reservation */
long qt_rtbcount_delta;/* dquot realtime blk changes */
long qt_delrtb_delta; /* delayed RT blk count changes */
} xfs_dqtrx_t;
extern xfs_dqtrxops_t xfs_trans_dquot_ops;
extern void xfs_trans_mod_dquot(xfs_trans_t *, xfs_dquot_t *, uint, long);
extern int xfs_trans_reserve_quota_bydquots(xfs_trans_t *, xfs_mount_t *,
xfs_dquot_t *, xfs_dquot_t *, long, long, uint);
extern void xfs_trans_dqjoin(xfs_trans_t *, xfs_dquot_t *);
extern void xfs_trans_log_dquot(xfs_trans_t *, xfs_dquot_t *);
/*
* We keep the usr and grp dquots separately so that locking will be easier
......@@ -184,9 +179,33 @@ typedef struct xfs_dquot_acct {
extern int xfs_qm_init_quotainfo(xfs_mount_t *);
extern void xfs_qm_destroy_quotainfo(xfs_mount_t *);
extern int xfs_qm_mount_quotas(xfs_mount_t *);
extern void xfs_qm_mount_quotainit(xfs_mount_t *, uint);
extern void xfs_qm_unmount_quotadestroy(xfs_mount_t *);
extern int xfs_qm_unmount_quotas(xfs_mount_t *);
extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
extern int xfs_qm_sync(xfs_mount_t *, short);
/* dquot stuff */
extern void xfs_qm_dqunlink(xfs_dquot_t *);
extern boolean_t xfs_qm_dqalloc_incore(xfs_dquot_t **);
extern int xfs_qm_write_sb_changes(xfs_mount_t *, __int64_t);
extern int xfs_qm_dqattach(xfs_inode_t *, uint);
extern void xfs_qm_dqdetach(xfs_inode_t *);
extern int xfs_qm_dqpurge_all(xfs_mount_t *, uint);
extern void xfs_qm_dqrele_all_inodes(xfs_mount_t *, uint);
/* vop stuff */
extern int xfs_qm_vop_dqalloc(xfs_mount_t *, xfs_inode_t *,
uid_t, gid_t, uint,
xfs_dquot_t **, xfs_dquot_t **);
extern void xfs_qm_vop_dqattach_and_dqmod_newinode(
xfs_trans_t *, xfs_inode_t *,
xfs_dquot_t *, xfs_dquot_t *);
extern int xfs_qm_vop_rename_dqattach(xfs_inode_t **);
extern xfs_dquot_t * xfs_qm_vop_chown(xfs_trans_t *, xfs_inode_t *,
xfs_dquot_t **, xfs_dquot_t *);
extern int xfs_qm_vop_chown_reserve(xfs_trans_t *, xfs_inode_t *,
xfs_dquot_t *, xfs_dquot_t *, uint);
/* list stuff */
extern void xfs_qm_freelist_init(xfs_frlist_t *);
......@@ -207,10 +226,4 @@ extern int xfs_qm_internalqcheck(xfs_mount_t *);
#define xfs_qm_internalqcheck(mp) (0)
#endif
#ifdef QUOTADEBUG
extern void xfs_qm_freelist_print(xfs_frlist_t *, char *);
#else
#define xfs_qm_freelist_print(a, b) do { } while (0)
#endif
#endif /* __XFS_QM_H__ */
This diff is collapsed.
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#include <linux/proc_fs.h>
#include "xfs_qm.h"
struct xqmstats xqmstats;
STATIC int
xfs_qm_read_xfsquota(
char *buffer,
char **start,
off_t offset,
int count,
int *eof,
void *data)
{
int len;
/* maximum; incore; ratio free to inuse; freelist */
len = sprintf(buffer, "%d\t%d\t%d\t%u\n",
ndquot,
xfs_Gqm? atomic_read(&xfs_Gqm->qm_totaldquots) : 0,
xfs_Gqm? xfs_Gqm->qm_dqfree_ratio : 0,
xfs_Gqm? xfs_Gqm->qm_dqfreelist.qh_nelems : 0);
if (offset >= len) {
*start = buffer;
*eof = 1;
return 0;
}
*start = buffer + offset;
if ((len -= offset) > count)
return count;
*eof = 1;
return len;
}
STATIC int
xfs_qm_read_stats(
char *buffer,
char **start,
off_t offset,
int count,
int *eof,
void *data)
{
int len;
/* quota performance statistics */
len = sprintf(buffer, "qm %u %u %u %u %u %u %u %u\n",
xqmstats.xs_qm_dqreclaims,
xqmstats.xs_qm_dqreclaim_misses,
xqmstats.xs_qm_dquot_dups,
xqmstats.xs_qm_dqcachemisses,
xqmstats.xs_qm_dqcachehits,
xqmstats.xs_qm_dqwants,
xqmstats.xs_qm_dqshake_reclaims,
xqmstats.xs_qm_dqinact_reclaims);
if (offset >= len) {
*start = buffer;
*eof = 1;
return 0;
}
*start = buffer + offset;
if ((len -= offset) > count)
return count;
*eof = 1;
return len;
}
void
xfs_qm_init_procfs(void)
{
create_proc_read_entry("fs/xfs/xqmstat", 0, 0, xfs_qm_read_stats, NULL);
create_proc_read_entry("fs/xfs/xqm", 0, 0, xfs_qm_read_xfsquota, NULL);
}
void
xfs_qm_cleanup_procfs(void)
{
remove_proc_entry("fs/xfs/xqm", NULL);
remove_proc_entry("fs/xfs/xqmstat", NULL);
}
/*
* Copyright (c) 2002 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#ifndef __XFS_QM_STATS_H__
#define __XFS_QM_STATS_H__
#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
/*
* XQM global statistics
*/
struct xqmstats {
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
__uint32_t xs_qm_dqcachemisses;
__uint32_t xs_qm_dqcachehits;
__uint32_t xs_qm_dqwants;
__uint32_t xs_qm_dqshake_reclaims;
__uint32_t xs_qm_dqinact_reclaims;
};
extern struct xqmstats xqmstats;
# define XQM_STATS_INC(count) ( (count)++ )
extern void xfs_qm_init_procfs(void);
extern void xfs_qm_cleanup_procfs(void);
#else
# define XQM_STATS_INC(count) do { } while (0)
static __inline void xfs_qm_init_procfs(void) { };
static __inline void xfs_qm_cleanup_procfs(void) { };
#endif
#endif /* __XFS_QM_STATS_H__ */
......@@ -31,7 +31,7 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
#ifdef DEBUG
# define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args)
......
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -67,8 +67,8 @@
#define XQMLCK(h) (mutex_lock(&((h)->qh_lock), PINOD))
#define XQMUNLCK(h) (mutex_unlock(&((h)->qh_lock)))
#ifdef DEBUG
static inline int
XQMISLCKD(xfs_dqhash_t *h)
struct xfs_dqhash;
static inline int XQMISLCKD(struct xfs_dqhash *h)
{
if (mutex_trylock(&h->qh_lock)) {
mutex_unlock(&h->qh_lock);
......
......@@ -31,8 +31,9 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include "xfs_qm.h"
STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
/*
* Add the locked dquot to the transaction.
......@@ -95,7 +96,7 @@ xfs_trans_log_dquot(
* Carry forward whatever is left of the quota blk reservation to
* the spanky new transaction
*/
void
STATIC void
xfs_trans_dup_dqinfo(
xfs_trans_t *otp,
xfs_trans_t *ntp)
......@@ -104,6 +105,9 @@ xfs_trans_dup_dqinfo(
int i,j;
xfs_dqtrx_t *oqa, *nqa;
if (!otp->t_dqinfo)
return;
xfs_trans_alloc_dqinfo(ntp);
oqa = otp->t_dqinfo->dqa_usrdquots;
nqa = ntp->t_dqinfo->dqa_usrdquots;
......@@ -155,15 +159,23 @@ xfs_trans_mod_dquot_byino(
uint field,
long delta)
{
xfs_mount_t *mp;
ASSERT(tp);
mp = tp->t_mountp;
if (!XFS_IS_QUOTA_ON(mp) ||
ip->i_ino == mp->m_sb.sb_uquotino ||
ip->i_ino == mp->m_sb.sb_gquotino)
return;
if (tp->t_dqinfo == NULL)
xfs_trans_alloc_dqinfo(tp);
if (XFS_IS_UQUOTA_ON(tp->t_mountp) && ip->i_udquot) {
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) {
(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
}
if (XFS_IS_GQUOTA_ON(tp->t_mountp) && ip->i_gdquot) {
if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) {
(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
}
}
......@@ -318,7 +330,7 @@ xfs_trans_dqlockedjoin(
* xfs_trans_apply_sb_deltas().
* Go thru all the dquots belonging to this transaction and modify the
* INCORE dquot to reflect the actual usages.
* Unreserve just the reservations done by this transaction
* Unreserve just the reservations done by this transaction.
* dquot is still left locked at exit.
*/
void
......@@ -332,6 +344,9 @@ xfs_trans_apply_dquot_deltas(
long totalbdelta;
long totalrtbdelta;
if (! (tp->t_flags & XFS_TRANS_DQ_DIRTY))
return;
ASSERT(tp->t_dqinfo);
qa = tp->t_dqinfo->dqa_usrdquots;
for (j = 0; j < 2; j++) {
......@@ -481,13 +496,15 @@ xfs_trans_apply_dquot_deltas(
#ifdef QUOTADEBUG
if (qtrx->qt_rtblk_res != 0)
printk("RT res %d for 0x%p\n",
(int) qtrx->qt_rtblk_res,
dqp);
cmn_err(CE_DEBUG, "RT res %d for 0x%p\n",
(int) qtrx->qt_rtblk_res, dqp);
#endif
ASSERT(dqp->q_res_bcount >= INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
ASSERT(dqp->q_res_icount >= INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
ASSERT(dqp->q_res_rtbcount >= INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
ASSERT(dqp->q_res_bcount >=
INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT));
ASSERT(dqp->q_res_icount >=
INT_GET(dqp->q_core.d_icount, ARCH_CONVERT));
ASSERT(dqp->q_res_rtbcount >=
INT_GET(dqp->q_core.d_rtbcount, ARCH_CONVERT));
}
/*
* Do the group quotas next
......@@ -503,7 +520,7 @@ xfs_trans_apply_dquot_deltas(
* we simply throw those away, since that's the expected behavior
* when a transaction is curtailed without a commit.
*/
void
STATIC void
xfs_trans_unreserve_and_mod_dquots(
xfs_trans_t *tp)
{
......@@ -512,7 +529,9 @@ xfs_trans_unreserve_and_mod_dquots(
xfs_dqtrx_t *qtrx, *qa;
boolean_t locked;
ASSERT(tp->t_dqinfo);
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
return;
qa = tp->t_dqinfo->dqa_usrdquots;
for (j = 0; j < 2; j++) {
......@@ -604,8 +623,8 @@ xfs_trans_dqresv(
!INT_ISZERO(dqp->q_core.d_id, ARCH_CONVERT) &&
XFS_IS_QUOTA_ENFORCED(dqp->q_mount)) {
#ifdef QUOTADEBUG
printk("BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?\n",
nblks, *resbcountp, hardlimit);
cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld"
" > hardlimit=%Ld?", nblks, *resbcountp, hardlimit);
#endif
if (nblks > 0) {
/*
......@@ -713,6 +732,7 @@ xfs_trans_dqresv(
int
xfs_trans_reserve_quota_bydquots(
xfs_trans_t *tp,
xfs_mount_t *mp,
xfs_dquot_t *udqp,
xfs_dquot_t *gdqp,
long nblks,
......@@ -721,6 +741,9 @@ xfs_trans_reserve_quota_bydquots(
{
int resvd;
if (! XFS_IS_QUOTA_ON(mp))
return (0);
if (tp && tp->t_dqinfo == NULL)
xfs_trans_alloc_dqinfo(tp);
......@@ -760,9 +783,10 @@ xfs_trans_reserve_quota_bydquots(
*
* Returns 0 on success, EDQUOT or other errors otherwise
*/
int
STATIC int
xfs_trans_reserve_quota_nblks(
xfs_trans_t *tp,
xfs_mount_t *mp,
xfs_inode_t *ip,
long nblks,
long ninos,
......@@ -770,6 +794,12 @@ xfs_trans_reserve_quota_nblks(
{
int error;
if (!XFS_IS_QUOTA_ON(mp))
return (0);
ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
#ifdef QUOTADEBUG
if (ip->i_udquot)
ASSERT(! XFS_DQ_IS_LOCKED(ip->i_udquot));
......@@ -785,7 +815,7 @@ xfs_trans_reserve_quota_nblks(
/*
* Reserve nblks against these dquots, with trans as the mediator.
*/
error = xfs_trans_reserve_quota_bydquots(tp,
error = xfs_trans_reserve_quota_bydquots(tp, mp,
ip->i_udquot, ip->i_gdquot,
nblks, ninos,
type);
......@@ -836,17 +866,29 @@ xfs_trans_log_quotaoff_item(
lidp->lid_flags |= XFS_LID_DIRTY;
}
void
STATIC void
xfs_trans_alloc_dqinfo(
xfs_trans_t *tp)
{
(tp)->t_dqinfo = kmem_zone_zalloc(xfs_Gqm->qm_dqtrxzone, KM_SLEEP);
}
void
STATIC void
xfs_trans_free_dqinfo(
xfs_trans_t *tp)
{
if (!tp->t_dqinfo)
return;
kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo);
(tp)->t_dqinfo = NULL;
}
xfs_dqtrxops_t xfs_trans_dquot_ops = {
.qo_dup_dqinfo = xfs_trans_dup_dqinfo,
.qo_free_dqinfo = xfs_trans_free_dqinfo,
.qo_mod_dquot_byino = xfs_trans_mod_dquot_byino,
.qo_apply_dquot_deltas = xfs_trans_apply_dquot_deltas,
.qo_reserve_quota_nblks = xfs_trans_reserve_quota_nblks,
.qo_reserve_quota_bydquots = xfs_trans_reserve_quota_bydquots,
.qo_unreserve_and_mod_dquots = xfs_trans_unreserve_and_mod_dquots,
};
/*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -66,6 +66,9 @@
#include <xfs_dir.h>
#include <xfs_dir2.h>
#include <xfs_imap.h>
#include <xfs_alloc.h>
#include <xfs_dmapi.h>
#include <xfs_quota.h>
#include <xfs_mount.h>
#include <xfs_alloc_btree.h>
#include <xfs_bmap_btree.h>
......@@ -77,17 +80,11 @@
#include <xfs_dir2_sf.h>
#include <xfs_dinode.h>
#include <xfs_inode.h>
#include <xfs_alloc.h>
#include <xfs_bmap.h>
#include <xfs_bit.h>
#include <xfs_rtalloc.h>
#include <xfs_error.h>
#include <xfs_quota.h>
#include <xfs_itable.h>
#include <xfs_dqblk.h>
#include <xfs_dquot_item.h>
#include <xfs_dquot.h>
#include <xfs_qm.h>
#include <xfs_rw.h>
#include <xfs_da_btree.h>
#include <xfs_dir_leaf.h>
......@@ -108,6 +105,5 @@
#include <xfs_trans_priv.h>
#include <xfs_trans_space.h>
#include <xfs_utils.h>
#include <xfs_dmapi.h>
#endif /* __XFS_H__ */
......@@ -197,10 +197,8 @@ xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags,
/*
* Attach the dquots to the inode.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if ((error = xfs_qm_dqattach(dp, 0)))
if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
return (error);
}
/*
* If the inode doesn't have an attribute fork, add one.
......@@ -280,20 +278,14 @@ xfs_attr_set(bhv_desc_t *bdp, char *name, char *value, int valuelen, int flags,
}
xfs_ilock(dp, XFS_ILOCK_EXCL);
if (XFS_IS_QUOTA_ON(mp)) {
if (rsvd) {
error = xfs_trans_reserve_blkquota_force(args.trans,
dp, nblks);
} else {
error = xfs_trans_reserve_blkquota(args.trans,
dp, nblks);
}
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, args.trans, dp, nblks, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(dp, XFS_ILOCK_EXCL);
xfs_trans_cancel(args.trans, XFS_TRANS_RELEASE_LOG_RES);
return (error);
}
}
xfs_trans_ijoin(args.trans, dp, XFS_ILOCK_EXCL);
xfs_trans_ihold(args.trans, dp);
......@@ -483,12 +475,9 @@ xfs_attr_remove(bhv_desc_t *bdp, char *name, int flags, struct cred *cred)
/*
* Attach the dquots to the inode.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if (XFS_NOT_DQATTACHED(mp, dp)) {
if ((error = xfs_qm_dqattach(dp, 0)))
if ((error = XFS_QM_DQATTACH(mp, dp, 0)))
return (error);
}
}
/*
* Start our first transaction of the day.
*
......
......@@ -2145,7 +2145,7 @@ xfs_bmap_alloc(
*/
mp = ap->ip->i_mount;
nullfb = ap->firstblock == NULLFSBLOCK;
rt = (ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata;
rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
#ifdef __KERNEL__
if (rt) {
......@@ -2467,14 +2467,10 @@ xfs_bmap_alloc(
* Adjust the disk quota also. This was reserved
* earlier.
*/
if (XFS_IS_QUOTA_ON(mp) &&
ap->ip->i_ino != mp->m_sb.sb_uquotino &&
ap->ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ?
XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
XFS_TRANS_DQ_RTBCOUNT,
(long)ralen);
(long) ralen);
} else
ap->alen = 0;
#endif /* __KERNEL__ */
......@@ -2691,14 +2687,10 @@ xfs_bmap_alloc(
* Adjust the disk quota also. This was reserved
* earlier.
*/
if (XFS_IS_QUOTA_ON(mp) &&
ap->ip->i_ino != mp->m_sb.sb_uquotino &&
ap->ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
ap->wasdel ?
XFS_TRANS_DQ_DELBCOUNT :
XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
XFS_TRANS_DQ_BCOUNT,
(long)args.len);
(long) args.len);
} else {
ap->rval = NULLFSBLOCK;
ap->alen = 0;
......@@ -2755,10 +2747,7 @@ xfs_bmap_btree_to_extents(
return error;
xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
ip->i_d.di_nblocks--;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(tp, cbp);
if (cur->bc_bufs[0] == cbp)
cur->bc_bufs[0] = NULL;
......@@ -2854,9 +2843,6 @@ xfs_bmap_del_extent(
goto done;
do_fx = 0;
nblks = len * mp->m_sb.sb_rextsize;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
qfield = XFS_TRANS_DQ_RTBCOUNT;
}
/*
......@@ -2865,9 +2851,6 @@ xfs_bmap_del_extent(
else {
do_fx = 1;
nblks = del->br_blockcount;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
qfield = XFS_TRANS_DQ_BCOUNT;
}
/*
......@@ -3088,7 +3071,8 @@ xfs_bmap_del_extent(
* Adjust quota data.
*/
if (qfield)
xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
/*
* Account for change in delayed indirect blocks.
* Nothing to do for disk quota accounting here.
......@@ -3239,10 +3223,7 @@ xfs_bmap_extents_to_btree(
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
ip->i_d.di_nblocks++;
if (XFS_IS_QUOTA_ON(mp) &&
ip->i_ino != mp->m_sb.sb_uquotino &&
ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
/*
* Fill in the child block.
......@@ -3385,11 +3366,8 @@ xfs_bmap_local_to_extents(
xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
ip->i_d.di_nblocks = 1;
if (XFS_IS_QUOTA_ON(args.mp) &&
ip->i_ino != args.mp->m_sb.sb_uquotino &&
ip->i_ino != args.mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
1L);
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
XFS_TRANS_DQ_BCOUNT, 1L);
flags |= XFS_ILOG_FEXT(whichfork);
} else
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
......@@ -3772,19 +3750,14 @@ xfs_bmap_add_attrfork(
XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
goto error0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (XFS_IS_QUOTA_ON(mp)) {
if (rsvd) {
error = xfs_trans_reserve_blkquota_force(tp, ip, blks);
} else {
error = xfs_trans_reserve_blkquota(tp, ip, blks);
}
error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
if (error) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
return error;
}
}
if (XFS_IFORK_Q(ip))
goto error1;
if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
......@@ -4655,8 +4628,8 @@ xfs_bmapi(
cur = NULL;
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
ASSERT(wr && tp);
if ((error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
&logflags, whichfork)))
if ((error = xfs_bmap_local_to_extents(tp, ip,
firstblock, total, &logflags, whichfork)))
goto error0;
}
if (wr && *firstblock == NULLFSBLOCK) {
......@@ -4730,9 +4703,8 @@ xfs_bmapi(
* We return EDQUOT if we haven't allocated
* blks already inside this loop;
*/
if (XFS_IS_QUOTA_ON(ip->i_mount) &&
xfs_trans_reserve_blkquota(NULL, ip,
(long)alen)) {
if (XFS_TRANS_RESERVE_BLKQUOTA(
mp, NULL, ip, (long)alen)) {
if (n == 0) {
*nmap = 0;
ASSERT(cur == NULL);
......@@ -4740,12 +4712,10 @@ xfs_bmapi(
}
break;
}
if (xfs_mod_incore_sb(ip->i_mount,
XFS_SBS_FDBLOCKS,
if (xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
-(alen + indlen), rsvd)) {
if (XFS_IS_QUOTA_ON(ip->i_mount))
xfs_trans_unreserve_blkquota(
NULL, ip, (long)alen);
XFS_TRANS_UNRESERVE_BLKQUOTA(
mp, NULL, ip, (long)alen);
break;
}
ip->i_delayed_blks += alen;
......@@ -4808,15 +4778,11 @@ xfs_bmapi(
alen = bma.alen;
aoff = bma.off;
ASSERT(*firstblock == NULLFSBLOCK ||
XFS_FSB_TO_AGNO(ip->i_mount,
*firstblock) ==
XFS_FSB_TO_AGNO(ip->i_mount,
bma.firstblock) ||
XFS_FSB_TO_AGNO(mp, *firstblock) ==
XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
(flist->xbf_low &&
XFS_FSB_TO_AGNO(ip->i_mount,
*firstblock) <
XFS_FSB_TO_AGNO(ip->i_mount,
bma.firstblock)));
XFS_FSB_TO_AGNO(mp, *firstblock) <
XFS_FSB_TO_AGNO(mp, bma.firstblock)));
*firstblock = bma.firstblock;
if (cur)
cur->bc_private.b.firstblock =
......@@ -4824,7 +4790,7 @@ xfs_bmapi(
if (abno == NULLFSBLOCK)
break;
if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
cur = xfs_btree_init_cursor(ip->i_mount,
cur = xfs_btree_init_cursor(mp,
tp, NULL, 0, XFS_BTNUM_BMAP,
ip, whichfork);
cur->bc_private.b.firstblock =
......@@ -4941,7 +4907,7 @@ xfs_bmapi(
*/
ASSERT(mval->br_blockcount <= len);
if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
cur = xfs_btree_init_cursor(ip->i_mount,
cur = xfs_btree_init_cursor(mp,
tp, NULL, 0, XFS_BTNUM_BMAP,
ip, whichfork);
cur->bc_private.b.firstblock =
......@@ -5063,12 +5029,12 @@ xfs_bmapi(
if (cur) {
if (!error) {
ASSERT(*firstblock == NULLFSBLOCK ||
XFS_FSB_TO_AGNO(ip->i_mount, *firstblock) ==
XFS_FSB_TO_AGNO(ip->i_mount,
XFS_FSB_TO_AGNO(mp, *firstblock) ==
XFS_FSB_TO_AGNO(mp,
cur->bc_private.b.firstblock) ||
(flist->xbf_low &&
XFS_FSB_TO_AGNO(ip->i_mount, *firstblock) <
XFS_FSB_TO_AGNO(ip->i_mount,
XFS_FSB_TO_AGNO(mp, *firstblock) <
XFS_FSB_TO_AGNO(mp,
cur->bc_private.b.firstblock)));
*firstblock = cur->bc_private.b.firstblock;
}
......@@ -5378,16 +5344,11 @@ xfs_bunmapi(
ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
(int)del.br_blockcount, rsvd);
if (XFS_IS_QUOTA_ON(ip->i_mount)) {
ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
if (!isrt)
xfs_trans_unreserve_blkquota(NULL, ip,
(long)del.br_blockcount);
else
xfs_trans_unreserve_rtblkquota(NULL, ip,
(long)del.br_blockcount);
}
/* Unreserve our quota space */
XFS_TRANS_RESERVE_QUOTA_NBLKS(
mp, NULL, ip, -((long)del.br_blockcount), 0,
isrt ? XFS_QMOPT_RES_RTBLKS :
XFS_QMOPT_RES_REGBLKS);
ip->i_delayed_blks -= del.br_blockcount;
if (cur)
cur->bc_private.b.flags |=
......@@ -5556,8 +5517,7 @@ xfs_getbmap(
&& DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)
&& whichfork == XFS_DATA_FORK) {
error = xfs_dm_send_data_event(DM_EVENT_READ, bdp,
0, 0, 0, NULL);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, 0, 0, 0, NULL);
if (error)
return XFS_ERROR(error);
}
......@@ -5579,7 +5539,6 @@ xfs_getbmap(
ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
return XFS_ERROR(EINVAL);
if (whichfork == XFS_DATA_FORK) {
if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) {
prealloced = 1;
......@@ -5928,10 +5887,13 @@ xfs_check_block(
thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
xfs_bmbt, block, j, dmxr);
}
if (INT_GET(*thispa, ARCH_CONVERT) == INT_GET(*pp, ARCH_CONVERT)) {
printk("xfs_check_block: thispa(%d) == pp(%d) %Ld\n",
j, i, INT_GET(*thispa, ARCH_CONVERT));
panic("xfs_check_block: ptrs are equal in node\n");
if (INT_GET(*thispa, ARCH_CONVERT) ==
INT_GET(*pp, ARCH_CONVERT)) {
cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
__FUNCTION__, j, i,
INT_GET(*thispa, ARCH_CONVERT));
panic("%s: ptrs are equal in node\n",
__FUNCTION__);
}
}
}
......@@ -6089,12 +6051,13 @@ xfs_bmap_check_leaf_extents(
return;
error0:
printk("at error0\n");
cmn_err(CE_WARN, "%s: at error0", __FUNCTION__);
if (bp_release)
xfs_trans_brelse(NULL, bp);
error_norelse:
printk("xfs_bmap_check_leaf_extents: BAD after btree leaves for %d extents\n", i);
panic("xfs_bmap_check_leaf_extents: CORRUPTED BTREE OR SOMETHING");
cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
i, __FUNCTION__);
panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__);
return;
}
#endif
......
......@@ -629,8 +629,8 @@ xfs_bmbt_delrec(
xfs_bmbt_log_block(cur, lbp, XFS_BB_RIGHTSIB | XFS_BB_NUMRECS);
if (INT_GET(left->bb_rightsib, ARCH_CONVERT) != NULLDFSBNO) {
if ((error = xfs_btree_read_bufl(mp, cur->bc_tp,
INT_GET(left->bb_rightsib, ARCH_CONVERT), 0, &rrbp,
XFS_BMAP_BTREE_REF))) {
INT_GET(left->bb_rightsib, ARCH_CONVERT),
0, &rrbp, XFS_BMAP_BTREE_REF))) {
XFS_BMBT_TRACE_CURSOR(cur, ERROR);
goto error0;
}
......@@ -646,10 +646,7 @@ xfs_bmbt_delrec(
cur->bc_private.b.flist, mp);
cur->bc_private.b.ip->i_d.di_nblocks--;
xfs_trans_log_inode(cur->bc_tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
if (XFS_IS_QUOTA_ON(mp) &&
cur->bc_private.b.ip->i_ino != mp->m_sb.sb_uquotino &&
cur->bc_private.b.ip->i_ino != mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(cur->bc_tp, cur->bc_private.b.ip,
XFS_TRANS_MOD_DQUOT_BYINO(mp, cur->bc_tp, cur->bc_private.b.ip,
XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(cur->bc_tp, rbp);
if (bp != lbp) {
......@@ -988,11 +985,8 @@ xfs_bmbt_killroot(
xfs_bmap_add_free(XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(cbp)), 1,
cur->bc_private.b.flist, cur->bc_mp);
ip->i_d.di_nblocks--;
if (XFS_IS_QUOTA_ON(cur->bc_mp) &&
ip->i_ino != cur->bc_mp->m_sb.sb_uquotino &&
ip->i_ino != cur->bc_mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(cur->bc_tp, ip, XFS_TRANS_DQ_BCOUNT,
-1L);
XFS_TRANS_MOD_DQUOT_BYINO(cur->bc_mp, cur->bc_tp, ip,
XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(cur->bc_tp, cbp);
cur->bc_bufs[level - 1] = NULL;
INT_MOD(block->bb_level, ARCH_CONVERT, -1);
......@@ -1589,10 +1583,7 @@ xfs_bmbt_split(
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
if (XFS_IS_QUOTA_ON(args.mp) &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_uquotino &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
XFS_TRANS_DQ_BCOUNT, 1L);
rbp = xfs_btree_get_bufl(args.mp, args.tp, args.fsbno, 0);
right = XFS_BUF_TO_BMBT_BLOCK(rbp);
......@@ -2390,10 +2381,7 @@ xfs_bmbt_newroot(
cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
cur->bc_private.b.ip->i_d.di_nblocks++;
if (XFS_IS_QUOTA_ON(args.mp) &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_uquotino &&
cur->bc_private.b.ip->i_ino != args.mp->m_sb.sb_gquotino)
xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, args.tp, cur->bc_private.b.ip,
XFS_TRANS_DQ_BCOUNT, 1L);
bp = xfs_btree_get_bufl(args.mp, cur->bc_tp, args.fsbno, 0);
cblock = XFS_BUF_TO_BMBT_BLOCK(bp);
......
......@@ -32,11 +32,6 @@
#ifndef __XFS_DMAPI_H__
#define __XFS_DMAPI_H__
#ifdef CONFIG_XFS_DMAPI
#include <dmapi/dmapi.h>
#include <dmapi/dmapi_kern.h>
/* Values used to define the on-disk version of dm_attrname_t. All
* on-disk attribute names start with the 8-byte string "SGI_DMI_".
*
......@@ -48,6 +43,42 @@
#define DMATTR_PREFIXLEN 8
#define DMATTR_PREFIXSTRING "SGI_DMI_"
typedef enum {
DM_EVENT_INVALID = -1,
DM_EVENT_CANCEL = 0, /* not supported */
DM_EVENT_MOUNT = 1,
DM_EVENT_PREUNMOUNT = 2,
DM_EVENT_UNMOUNT = 3,
DM_EVENT_DEBUT = 4, /* not supported */
DM_EVENT_CREATE = 5,
DM_EVENT_CLOSE = 6, /* not supported */
DM_EVENT_POSTCREATE = 7,
DM_EVENT_REMOVE = 8,
DM_EVENT_POSTREMOVE = 9,
DM_EVENT_RENAME = 10,
DM_EVENT_POSTRENAME = 11,
DM_EVENT_LINK = 12,
DM_EVENT_POSTLINK = 13,
DM_EVENT_SYMLINK = 14,
DM_EVENT_POSTSYMLINK = 15,
DM_EVENT_READ = 16,
DM_EVENT_WRITE = 17,
DM_EVENT_TRUNCATE = 18,
DM_EVENT_ATTRIBUTE = 19,
DM_EVENT_DESTROY = 20,
DM_EVENT_NOSPACE = 21,
DM_EVENT_USER = 22,
DM_EVENT_MAX = 23
} dm_eventtype_t;
#define HAVE_DM_EVENTTYPE_T
typedef enum {
DM_RIGHT_NULL,
DM_RIGHT_SHARED,
DM_RIGHT_EXCL
} dm_right_t;
#define HAVE_DM_RIGHT_T
/* Defines for determining if an event message should be sent. */
#define DM_EVENT_ENABLED(vfsp, ip, event) ( \
unlikely ((vfsp)->vfs_flag & VFS_DMI) && \
......@@ -58,23 +89,6 @@
#define DM_EVENT_ENABLED_IO(vfsp, io, event) ( \
unlikely ((vfsp)->vfs_flag & VFS_DMI) && \
( ((io)->io_dmevmask & (1 << event)) || \
((io)->io_mount->m_dmevmask & (1 << event)) ) \
)
/*
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
/* events valid in dm_set_eventlist() when called with a filesystem handle.
These events are not persistent.
*/
#define DM_XFS_VALID_FS_EVENTS ( \
(1 << DM_EVENT_PREUNMOUNT) | \
......@@ -120,7 +134,6 @@
(1 << DM_EVENT_ATTRIBUTE) | \
(1 << DM_EVENT_DESTROY) )
/* Events supported by the XFS filesystem. */
#define DM_XFS_SUPPORTED_EVENTS ( \
(1 << DM_EVENT_MOUNT) | \
......@@ -144,156 +157,34 @@
(1 << DM_EVENT_DESTROY) )
extern int
xfs_dm_get_fsys_vector(
bhv_desc_t *bdp,
caddr_t vecrq);
extern int
xfs_dm_send_data_event(
dm_eventtype_t event,
bhv_desc_t *bdp,
xfs_off_t offset,
size_t length,
int flags,
vrwlock_t *locktype);
extern int
xfs_dm_send_mmap_event(
struct vm_area_struct *vma,
unsigned int wantflag);
#else /* CONFIG_XFS_DMAPI */
/*
* Flags needed to build with dmapi disabled.
*/
typedef enum {
DM_EVENT_INVALID = -1,
DM_EVENT_CANCEL = 0, /* not supported */
DM_EVENT_MOUNT = 1,
DM_EVENT_PREUNMOUNT = 2,
DM_EVENT_UNMOUNT = 3,
DM_EVENT_DEBUT = 4, /* not supported */
DM_EVENT_CREATE = 5,
DM_EVENT_CLOSE = 6, /* not supported */
DM_EVENT_POSTCREATE = 7,
DM_EVENT_REMOVE = 8,
DM_EVENT_POSTREMOVE = 9,
DM_EVENT_RENAME = 10,
DM_EVENT_POSTRENAME = 11,
DM_EVENT_LINK = 12,
DM_EVENT_POSTLINK = 13,
DM_EVENT_SYMLINK = 14,
DM_EVENT_POSTSYMLINK = 15,
DM_EVENT_READ = 16,
DM_EVENT_WRITE = 17,
DM_EVENT_TRUNCATE = 18,
DM_EVENT_ATTRIBUTE = 19,
DM_EVENT_DESTROY = 20,
DM_EVENT_NOSPACE = 21,
DM_EVENT_USER = 22,
DM_EVENT_MAX = 23
} dm_eventtype_t;
typedef enum {
DM_RIGHT_NULL,
DM_RIGHT_SHARED,
DM_RIGHT_EXCL
} dm_right_t;
/*
* Defines for determining if an event message should be sent.
*/
#define DM_EVENT_ENABLED(vfsp, ip, event) 0
#define DM_EVENT_ENABLED_IO(vfsp, io, event) 0
/*
* Stubbed out DMAPI delay macros.
*/
#define FILP_DELAY_FLAG(filp) 0
#define AT_DELAY_FLAG(f) 0
/*
* Events supported by the XFS filesystem.
*/
#define DM_XFS_VALID_FS_EVENTS 0
#define DM_XFS_VALID_FILE_EVENTS 0
#define DM_XFS_VALID_DIRECTORY_EVENTS 0
#define DM_XFS_SUPPORTED_EVENTS 0
/*
* Dummy definitions used for the flags field on dm_send_*_event().
* Definitions used for the flags field on dm_send_*_event().
*/
#define DM_FLAGS_NDELAY 0x001 /* return EAGAIN after dm_pending() */
#define DM_FLAGS_UNWANTED 0x002 /* event not in fsys dm_eventset_t */
/*
* Stubs for XFS DMAPI utility routines.
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
static __inline int
xfs_dm_send_data_event(
dm_eventtype_t event,
bhv_desc_t *bdp,
xfs_off_t offset,
size_t length,
int flags,
vrwlock_t *locktype)
{
return ENOSYS;
}
static __inline int
xfs_dm_send_mmap_event(
struct vm_area_struct *vma,
unsigned int wantflag)
{
return 0;
}
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
#define AT_DELAY_FLAG(f) ((f&ATTR_NONBLOCK) ? DM_FLAGS_NDELAY : 0)
/*
* Stubs for routines needed for the X/Open version of DMAPI.
* Macros to turn caller specified delay/block flags into
* dm_send_xxxx_event flag DM_FLAGS_NDELAY.
*/
static __inline int
dm_send_destroy_event(
bhv_desc_t *bdp,
dm_right_t vp_right)
{
return ENOSYS;
}
#define FILP_DELAY_FLAG(filp) ((filp->f_flags&(O_NDELAY|O_NONBLOCK)) ? \
DM_FLAGS_NDELAY : 0)
static __inline int
dm_send_namesp_event(
dm_eventtype_t event,
bhv_desc_t *bdp1,
dm_right_t vp1_right,
bhv_desc_t *bdp2,
dm_right_t vp2_right,
char *name1,
char *name2,
mode_t mode,
int retcode,
int flags)
{
return ENOSYS;
}
extern struct bhv_vfsops xfs_dmops;
static __inline void
dm_send_unmount_event(
vfs_t *vfsp,
vnode_t *vp,
dm_right_t vfsp_right,
mode_t mode,
int retcode,
int flags)
{
}
extern void xfs_dm_init(void);
extern void xfs_dm_exit(void);
#endif /* CONFIG_XFS_DMAPI */
#endif /* __XFS_DMAPI_H__ */
......@@ -29,103 +29,14 @@
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
STATIC int
xfs_dm_parseargs(
struct bhv_desc *bhv,
char *options,
struct xfs_mount_args *args,
int update)
{
size_t length;
char *local_options = options;
char *this_char;
int error;
while ((this_char = strsep(&local_options, ",")) != NULL) {
length = strlen(this_char);
if (local_options)
length++;
if (!strcmp(this_char, MNTOPT_DMAPI)) {
args->flags |= XFSMNT_DMAPI;
} else if (!strcmp(this_char, MNTOPT_XDSM)) {
args->flags |= XFSMNT_DMAPI;
} else {
if (local_options)
*(local_options-1) = ',';
continue;
}
while (length--)
*this_char++ = ',';
}
PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
if (!error && (args->flags & XFSMNT_DMAPI) && (*args->mtpt == '\0'))
error = EINVAL;
if (!error && !update && !(args->flags & XFSMNT_DMAPI))
bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_DM);
return error;
}
STATIC int
xfs_dm_showargs(
struct bhv_desc *bhv,
struct seq_file *m)
{
struct vfs *vfsp = bhvtovfs(bhv);
int error;
if (vfsp->vfs_flag & VFS_DMI)
seq_puts(m, "," MNTOPT_DMAPI);
PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
return error;
}
STATIC int
xfs_dm_mount(
struct bhv_desc *bhv,
struct xfs_mount_args *args,
struct cred *cr)
{
struct bhv_desc *rootbdp;
struct vnode *rootvp;
struct vfs *vfsp;
int error = 0;
PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
if (error)
return error;
if (args->flags & XFSMNT_DMAPI) {
vfsp = bhvtovfs(bhv);
VFS_ROOT(vfsp, &rootvp, error);
if (!error) {
vfsp->vfs_flag |= VFS_DMI;
rootbdp = vn_bhv_lookup_unlocked(
VN_BHV_HEAD(rootvp), &xfs_vnodeops);
VN_RELE(rootvp);
error = dm_send_mount_event(vfsp, DM_RIGHT_NULL, NULL,
DM_RIGHT_NULL, rootbdp, DM_RIGHT_NULL,
args->mtpt, args->fsname);
}
}
return error;
}
vfsops_t xfs_dmops_xfs = {
BHV_IDENTITY_INIT(VFS_BHV_DM, VFS_POSITION_DM),
.vfs_mount = xfs_dm_mount,
.vfs_parseargs = xfs_dm_parseargs,
.vfs_showargs = xfs_dm_showargs,
.vfs_dmapiops = xfs_dm_get_fsys_vector,
#ifndef CONFIG_XFS_DMAPI
xfs_dmops_t xfs_dmcore_xfs = {
.xfs_send_data = (xfs_send_data_t)fs_nosys,
.xfs_send_mmap = (xfs_send_mmap_t)fs_noerr,
.xfs_send_destroy = (xfs_send_destroy_t)fs_nosys,
.xfs_send_namesp = (xfs_send_namesp_t)fs_nosys,
.xfs_send_unmount = (xfs_send_unmount_t)fs_noval,
};
#endif /* CONFIG_XFS_DMAPI */
......@@ -591,9 +591,7 @@ xfs_ireclaim(xfs_inode_t *ip)
* Release dquots (and their references) if any. An inode may escape
* xfs_inactive and get here via vn_alloc->vn_reclaim path.
*/
if (ip->i_udquot || ip->i_gdquot) {
xfs_qm_dqdettach_inode(ip);
}
XFS_QM_DQDETACH(ip->i_mount, ip);
/*
* Pull our behavior descriptor from the vnode chain.
......
......@@ -136,16 +136,15 @@ xlog_bwrite(
/*
* check log record header for recovery
*/
static void
xlog_header_check_dump(xfs_mount_t *mp, xlog_rec_header_t *head)
{
int b;
printk("xlog_header_check_dump:\n SB : uuid = ");
printk("%s: SB : uuid = ", __FUNCTION__);
for (b=0;b<16;b++) printk("%02x",((unsigned char *)&mp->m_sb.sb_uuid)[b]);
printk(", fmt = %d\n",XLOG_FMT);
printk(" log: uuid = ");
printk(" log : uuid = ");
for (b=0;b<16;b++) printk("%02x",((unsigned char *)&head->h_fs_uuid)[b]);
printk(", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT));
}
......@@ -1813,7 +1812,6 @@ xlog_recover_do_reg_buffer(xfs_mount_t *mp,
*/
error = 0;
if (buf_f->blf_flags & (XFS_BLI_UDQUOT_BUF|XFS_BLI_GDQUOT_BUF)) {
/* OK, if this returns ENOSYS */
error = xfs_qm_dqcheck((xfs_disk_dquot_t *)
item->ri_buf[i].i_addr,
-1, 0, XFS_QMOPT_DOWARN,
......@@ -1832,6 +1830,120 @@ xlog_recover_do_reg_buffer(xfs_mount_t *mp,
ASSERT(i == item->ri_total);
} /* xlog_recover_do_reg_buffer */
/*
* Do some primitive error checking on ondisk dquot data structures.
*/
int
xfs_qm_dqcheck(
xfs_disk_dquot_t *ddq,
xfs_dqid_t id,
uint type, /* used only when IO_dorepair is true */
uint flags,
char *str)
{
xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
int errs = 0;
/*
* We can encounter an uninitialized dquot buffer for 2 reasons:
* 1. If we crash while deleting the quotainode(s), and those blks got
* used for user data. This is because we take the path of regular
* file deletion; however, the size field of quotainodes is never
* updated, so all the tricks that we play in itruncate_finish
* don't quite matter.
*
* 2. We don't play the quota buffers when there's a quotaoff logitem.
* But the allocation will be replayed so we'll end up with an
* uninitialized quota block.
*
* This is all fine; things are still consistent, and we haven't lost
* any quota information. Just don't complain about bad dquot blks.
*/
if (INT_GET(ddq->d_magic, ARCH_CONVERT) != XFS_DQUOT_MAGIC) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
str, id,
INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_MAGIC);
errs++;
}
if (INT_GET(ddq->d_version, ARCH_CONVERT) != XFS_DQUOT_VERSION) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
str, id,
INT_GET(ddq->d_magic, ARCH_CONVERT), XFS_DQUOT_VERSION);
errs++;
}
if (INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_USER &&
INT_GET(ddq->d_flags, ARCH_CONVERT) != XFS_DQ_GROUP) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : XFS dquot ID 0x%x, unknown flags 0x%x",
str, id, INT_GET(ddq->d_flags, ARCH_CONVERT));
errs++;
}
if (id != -1 && id != INT_GET(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : ondisk-dquot 0x%x, ID mismatch: "
"0x%x expected, found id 0x%x",
str, ddq, id, INT_GET(ddq->d_id, ARCH_CONVERT));
errs++;
}
if (! errs) {
if (INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT) &&
INT_GET(ddq->d_bcount, ARCH_CONVERT) >=
INT_GET(ddq->d_blk_softlimit, ARCH_CONVERT)) {
if (INT_ISZERO(ddq->d_btimer, ARCH_CONVERT) &&
!INT_ISZERO(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : Dquot ID 0x%x (0x%x) "
"BLK TIMER NOT STARTED",
str, (int)
INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
errs++;
}
}
if (INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT) &&
INT_GET(ddq->d_icount, ARCH_CONVERT) >=
INT_GET(ddq->d_ino_softlimit, ARCH_CONVERT)) {
if (INT_ISZERO(ddq->d_itimer, ARCH_CONVERT) &&
!INT_ISZERO(ddq->d_id, ARCH_CONVERT)) {
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_ALERT,
"%s : Dquot ID 0x%x (0x%x) "
"INODE TIMER NOT STARTED",
str, (int)
INT_GET(ddq->d_id, ARCH_CONVERT), ddq);
errs++;
}
}
}
if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
return errs;
if (flags & XFS_QMOPT_DOWARN)
cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id);
/*
* Typically, a repair is only requested by quotacheck.
*/
ASSERT(id != -1);
ASSERT(flags & XFS_QMOPT_DQREPAIR);
memset(d, 0, sizeof(xfs_dqblk_t));
INT_SET(d->dd_diskdq.d_magic, ARCH_CONVERT, XFS_DQUOT_MAGIC);
INT_SET(d->dd_diskdq.d_version, ARCH_CONVERT, XFS_DQUOT_VERSION);
INT_SET(d->dd_diskdq.d_id, ARCH_CONVERT, id);
INT_SET(d->dd_diskdq.d_flags, ARCH_CONVERT, type);
return errs;
}
/*
* Perform a dquot buffer recovery.
......@@ -2335,8 +2447,6 @@ xlog_recover_do_dquot_trans(xlog_t *log,
dq_f->qlf_id,
0, XFS_QMOPT_DOWARN,
"xlog_recover_do_dquot_trans (log copy)"))) {
if (error == ENOSYS)
return (0);
return XFS_ERROR(EIO);
}
ASSERT(dq_f->qlf_len == 1);
......@@ -2923,8 +3033,6 @@ xlog_recover_process_iunlinks(xlog_t *log)
/*
* Prevent any DMAPI event from being sent while in this function.
* Not a problem for xfs since the file system isn't mounted
* yet. It is a problem for cxfs recovery.
*/
mp_dmevmask = mp->m_dmevmask;
mp->m_dmevmask = 0;
......@@ -2982,10 +3090,7 @@ xlog_recover_process_iunlinks(xlog_t *log)
* Prevent any DMAPI event from
* being sent when the
* reference on the inode is
* dropped. Not a problem for
* xfs since the file system
* isn't mounted yet. It is a
* problem for cxfs recovery.
* dropped.
*/
ip->i_d.di_dmevmask = 0;
......
......@@ -32,7 +32,6 @@
#include <xfs.h>
STATIC void xfs_mount_reset_sbqflags(xfs_mount_t *);
STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
STATIC int xfs_uuid_mount(xfs_mount_t *);
STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
......@@ -154,13 +153,11 @@ xfs_mount_free(
spinlock_destroy(&mp->m_sb_lock);
mutex_destroy(&mp->m_ilock);
freesema(&mp->m_growlock);
if (mp->m_quotainfo)
XFS_QM_DONE(mp);
if (mp->m_fsname != NULL) {
if (mp->m_fsname != NULL)
kmem_free(mp->m_fsname, mp->m_fsname_len);
}
if (mp->m_quotainfo != NULL) {
xfs_qm_unmount_quotadestroy(mp);
}
if (remove_bhv) {
struct vfs *vfsp = XFS_MTOVFS(mp);
......@@ -606,10 +603,8 @@ xfs_mountfs(
vmap_t vmap;
xfs_daddr_t d;
__uint64_t ret64;
uint quotaflags, quotaondisk;
uint uquotaondisk = 0, gquotaondisk = 0;
boolean_t needquotamount;
__int64_t update_flags;
uint quotamount, quotaflags;
int agno, noio;
int uuid_mounted = 0;
int error = 0;
......@@ -946,54 +941,22 @@ xfs_mountfs(
ASSERT(rip != NULL);
rvp = XFS_ITOV(rip);
VMAP(rvp, vmap);
if (unlikely((rip->i_d.di_mode & IFMT) != IFDIR)) {
cmn_err(CE_WARN, "XFS: corrupted root inode");
VMAP(rvp, vmap);
prdev("Root inode %llu is not a directory",
mp->m_dev, (unsigned long long)rip->i_ino);
xfs_iunlock(rip, XFS_ILOCK_EXCL);
VN_RELE(rvp);
vn_purge(rvp, &vmap);
XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
mp);
error = XFS_ERROR(EFSCORRUPTED);
goto error3;
goto error4;
}
mp->m_rootip = rip; /* save it */
xfs_iunlock(rip, XFS_ILOCK_EXCL);
quotaondisk = XFS_SB_VERSION_HASQUOTA(&mp->m_sb) &&
mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT);
if (quotaondisk) {
uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT;
gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT;
}
/*
* If the device itself is read-only, we can't allow
* the user to change the state of quota on the mount -
* this would generate a transaction on the ro device,
* which would lead to an I/O error and shutdown
*/
if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
(!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
(!gquotaondisk && XFS_IS_GQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
cmn_err(CE_WARN,
"XFS: please mount with%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
(gquotaondisk ? " grpquota" : ""));
VN_RELE(rvp);
vn_remove(rvp);
error = XFS_ERROR(EPERM);
goto error3;
}
/*
* Initialize realtime inode pointers in the mount structure
*/
......@@ -1002,10 +965,7 @@ xfs_mountfs(
* Free up the root inode.
*/
cmn_err(CE_WARN, "XFS: failed to read RT inodes");
VMAP(rvp, vmap);
VN_RELE(rvp);
vn_purge(rvp, &vmap);
goto error3;
goto error4;
}
/*
......@@ -1015,41 +975,11 @@ xfs_mountfs(
if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY))
xfs_mount_log_sbunit(mp, update_flags);
quotaflags = 0;
needquotamount = B_FALSE;
/*
* Figure out if we'll need to do a quotacheck.
*/
if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
/*
* Call mount_quotas at this point only if we won't have to do
* a quotacheck.
*/
if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) {
/*
* If the xfs quota code isn't installed,
* we have to reset the quotachk'd bit.
* If an error occurred, qm_mount_quotas code
* has already disabled quotas. So, just finish
* mounting, and get on with the boring life
* without disk quotas.
*/
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
} else {
/*
* Clear the quota flags, but remember them. This
* is so that the quota code doesn't get invoked
* before we're ready. This can happen when an
* inode goes inactive and wants to free blocks,
* or via xfs_log_mount_finish.
* Initialise the XFS quota management subsystem for this mount
*/
quotaflags = mp->m_qflags;
mp->m_qflags = 0;
needquotamount = B_TRUE;
}
}
if ((error = XFS_QM_INIT(mp, &quotamount, &quotaflags)))
goto error4;
/*
* Finish recovering the file system. This part needed to be
......@@ -1059,30 +989,23 @@ xfs_mountfs(
error = xfs_log_mount_finish(mp, mfsi_flags);
if (error) {
cmn_err(CE_WARN, "XFS: log mount finish failed");
goto error3;
}
if (needquotamount) {
ASSERT(mp->m_qflags == 0);
mp->m_qflags = quotaflags;
if (xfs_qm_mount_quotas(mp))
xfs_mount_reset_sbqflags(mp);
goto error4;
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
if (! (XFS_IS_QUOTA_ON(mp)))
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas not turned on");
else
xfs_fs_cmn_err(CE_NOTE, mp, "Disk quotas turned on");
#endif
#ifdef QUOTADEBUG
if (XFS_IS_QUOTA_ON(mp) && xfs_qm_internalqcheck(mp))
cmn_err(CE_WARN, "XFS: mount internalqcheck failed");
#endif
/*
* Complete the quota initialisation, post-log-replay component.
*/
if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags)))
goto error4;
return (0);
return 0;
error4:
/*
* Free up the root inode.
*/
VN_RELE(rvp);
vn_purge(rvp, &vmap);
error3:
xfs_log_unmount_dealloc(mp);
error2:
......@@ -1112,25 +1035,14 @@ int
xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
{
struct vfs *vfsp = XFS_MTOVFS(mp);
int ndquots;
#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
int64_t fsid;
#endif
xfs_iflush_all(mp, XFS_FLUSH_ALL);
/*
* Purge the dquot cache.
* None of the dquots should really be busy at this point.
*/
if (mp->m_quotainfo) {
while ((ndquots = xfs_qm_dqpurge_all(mp,
XFS_QMOPT_UQUOTA|
XFS_QMOPT_GQUOTA|
XFS_QMOPT_UMOUNTING))) {
delay(ndquots * 10);
}
}
XFS_QM_DQPURGEALL(mp,
XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
/*
* Flush out the log synchronously so that we know for sure
......@@ -1645,47 +1557,6 @@ xfs_uuid_unmount(xfs_mount_t *mp)
mutex_unlock(&xfs_uuidtabmon);
}
/*
* When xfsquotas isn't installed and the superblock had quotas, we need to
* clear the quotaflags from superblock.
*/
STATIC void
xfs_mount_reset_sbqflags(
xfs_mount_t *mp)
{
xfs_trans_t *tp;
unsigned long s;
mp->m_qflags = 0;
/*
* It is OK to look at sb_qflags here in mount path,
* without SB_LOCK.
*/
if (mp->m_sb.sb_qflags == 0)
return;
s = XFS_SB_LOCK(mp);
mp->m_sb.sb_qflags = 0;
XFS_SB_UNLOCK(mp, s);
/*
* if the fs is readonly, let the incore superblock run
* with quotas off but don't flush the update out to disk
*/
if (XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY)
return;
#ifdef QUOTADEBUG
xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes");
#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
XFS_DEFAULT_LOG_COUNT)) {
xfs_trans_cancel(tp, 0);
return;
}
xfs_mod_sb(tp, XFS_SB_QFLAGS);
(void)xfs_trans_commit(tp, 0, NULL);
}
/*
* Used to log changes to the superblock unit and width fields which could
* be altered by the mount options. Only the first superblock is updated.
......
......@@ -75,7 +75,6 @@ struct xfs_ihash;
struct xfs_chash;
struct xfs_inode;
struct xfs_perag;
struct xfs_quotainfo;
struct xfs_iocore;
struct xfs_bmbt_irec;
struct xfs_bmap_free;
......@@ -87,12 +86,117 @@ struct xfs_bmap_free;
#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock)
#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s)
/*
* Prototypes and functions for I/O core modularization.
* Prototypes and functions for the Data Migration subsystem.
*/
typedef int (*xfs_send_data_t)(int, struct bhv_desc *,
xfs_off_t, size_t, int, vrwlock_t *);
typedef int (*xfs_send_mmap_t)(struct vm_area_struct *, uint);
typedef int (*xfs_send_destroy_t)(struct bhv_desc *, dm_right_t);
typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct bhv_desc *,
dm_right_t, struct bhv_desc *, dm_right_t,
char *, char *, mode_t, int, int);
typedef void (*xfs_send_unmount_t)(struct vfs *, struct vnode *,
dm_right_t, mode_t, int, int);
typedef struct xfs_dmops {
xfs_send_data_t xfs_send_data;
xfs_send_mmap_t xfs_send_mmap;
xfs_send_destroy_t xfs_send_destroy;
xfs_send_namesp_t xfs_send_namesp;
xfs_send_unmount_t xfs_send_unmount;
} xfs_dmops_t;
#define XFS_SEND_DATA(mp, ev,bdp,off,len,fl,lock) \
(*(mp)->m_dm_ops.xfs_send_data)(ev,bdp,off,len,fl,lock)
#define XFS_SEND_MMAP(mp, vma,fl) \
(*(mp)->m_dm_ops.xfs_send_mmap)(vma,fl)
#define XFS_SEND_DESTROY(mp, bdp,right) \
(*(mp)->m_dm_ops.xfs_send_destroy)(bdp,right)
#define XFS_SEND_NAMESP(mp, ev,b1,r1,b2,r2,n1,n2,mode,rval,fl) \
(*(mp)->m_dm_ops.xfs_send_namesp)(ev,b1,r1,b2,r2,n1,n2,mode,rval,fl)
#define XFS_SEND_UNMOUNT(mp, vfsp,vp,right,mode,rval,fl) \
(*(mp)->m_dm_ops.xfs_send_unmount)(vfsp,vp,right,mode,rval,fl)
/*
* Prototypes and functions for the Quota Management subsystem.
*/
struct flid;
struct buf;
struct xfs_dquot;
struct xfs_dqtrxops;
struct xfs_quotainfo;
typedef int (*xfs_qminit_t)(struct xfs_mount *, uint *, uint *);
typedef int (*xfs_qmmount_t)(struct xfs_mount *, uint, uint);
typedef int (*xfs_qmunmount_t)(struct xfs_mount *);
typedef void (*xfs_qmdone_t)(struct xfs_mount *);
typedef void (*xfs_dqrele_t)(struct xfs_dquot *);
typedef int (*xfs_dqattach_t)(struct xfs_inode *, uint);
typedef void (*xfs_dqdetach_t)(struct xfs_inode *);
typedef int (*xfs_dqpurgeall_t)(struct xfs_mount *, uint);
typedef int (*xfs_dqvopalloc_t)(struct xfs_mount *,
struct xfs_inode *, uid_t, gid_t, uint,
struct xfs_dquot **, struct xfs_dquot **);
typedef void (*xfs_dqvopcreate_t)(struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot *, struct xfs_dquot *);
typedef int (*xfs_dqvoprename_t)(struct xfs_inode **);
typedef struct xfs_dquot * (*xfs_dqvopchown_t)(
struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot **, struct xfs_dquot *);
typedef int (*xfs_dqvopchownresv_t)(struct xfs_trans *, struct xfs_inode *,
struct xfs_dquot *, struct xfs_dquot *, uint);
typedef struct xfs_qmops {
xfs_qminit_t xfs_qminit;
xfs_qmdone_t xfs_qmdone;
xfs_qmmount_t xfs_qmmount;
xfs_qmunmount_t xfs_qmunmount;
xfs_dqrele_t xfs_dqrele;
xfs_dqattach_t xfs_dqattach;
xfs_dqdetach_t xfs_dqdetach;
xfs_dqpurgeall_t xfs_dqpurgeall;
xfs_dqvopalloc_t xfs_dqvopalloc;
xfs_dqvopcreate_t xfs_dqvopcreate;
xfs_dqvoprename_t xfs_dqvoprename;
xfs_dqvopchown_t xfs_dqvopchown;
xfs_dqvopchownresv_t xfs_dqvopchownresv;
struct xfs_dqtrxops *xfs_dqtrxops;
} xfs_qmops_t;
#define XFS_QM_INIT(mp, mnt, fl) \
(*(mp)->m_qm_ops.xfs_qminit)(mp, mnt, fl)
#define XFS_QM_MOUNT(mp, mnt, fl) \
(*(mp)->m_qm_ops.xfs_qmmount)(mp, mnt, fl)
#define XFS_QM_UNMOUNT(mp) \
(*(mp)->m_qm_ops.xfs_qmunmount)(mp)
#define XFS_QM_DONE(mp) \
(*(mp)->m_qm_ops.xfs_qmdone)(mp)
#define XFS_QM_DQRELE(mp, dq) \
(*(mp)->m_qm_ops.xfs_dqrele)(dq)
#define XFS_QM_DQATTACH(mp, ip, fl) \
(*(mp)->m_qm_ops.xfs_dqattach)(ip, fl)
#define XFS_QM_DQDETACH(mp, ip) \
(*(mp)->m_qm_ops.xfs_dqdetach)(ip)
#define XFS_QM_DQPURGEALL(mp, fl) \
(*(mp)->m_qm_ops.xfs_dqpurgeall)(mp, fl)
#define XFS_QM_DQVOPALLOC(mp, ip, uid, gid, fl, dq1, dq2) \
(*(mp)->m_qm_ops.xfs_dqvopalloc)(mp, ip, uid, gid, fl, dq1, dq2)
#define XFS_QM_DQVOPCREATE(mp, tp, ip, dq1, dq2) \
(*(mp)->m_qm_ops.xfs_dqvopcreate)(tp, ip, dq1, dq2)
#define XFS_QM_DQVOPRENAME(mp, ip) \
(*(mp)->m_qm_ops.xfs_dqvoprename)(ip)
#define XFS_QM_DQVOPCHOWN(mp, tp, ip, dqp, dq) \
(*(mp)->m_qm_ops.xfs_dqvopchown)(tp, ip, dqp, dq)
#define XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, dq1, dq2, fl) \
(*(mp)->m_qm_ops.xfs_dqvopchownresv)(tp, ip, dq1, dq2, fl)
/*
* Prototypes and functions for I/O core modularization.
*/
typedef int (*xfs_ioinit_t)(struct vfs *,
struct xfs_mount_args *, int);
......@@ -137,52 +241,38 @@ typedef struct xfs_ioops {
xfs_iodone_t xfs_iodone;
} xfs_ioops_t;
#define XFS_IOINIT(vfsp, args, flags) \
(*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags)
#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \
(*(mp)->m_io_ops.xfs_bmapi_func) \
(trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist)
#define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \
(*(mp)->m_io_ops.xfs_bmap_eof_func) \
((io)->io_obj, endoff, whichfork, eof)
#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\
(*(mp)->m_io_ops.xfs_iomap_write_direct) \
((io)->io_obj, offset, count, flags, mval, nmap, found)
#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_delay) \
((io)->io_obj, offset, count, flags, mval, nmap)
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_allocate) \
((io)->io_obj, mval, nmap)
#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
((io)->io_obj, offset, count)
#define XFS_LCK_MAP_SHARED(mp, io) \
(*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj)
#define XFS_ILOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode)
#define XFS_ILOCK_NOWAIT(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode)
#define XFS_IUNLOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode)
#define XFS_ILOCK_DEMOTE(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock_demote)((io)->io_obj, mode)
#define XFS_SIZE(mp, io) \
(*(mp)->m_io_ops.xfs_size_func)((io)->io_obj)
#define XFS_IODONE(vfsp) \
(*(mp)->m_io_ops.xfs_iodone)(vfsp)
......@@ -284,13 +374,9 @@ typedef struct xfs_mount {
int m_chsize; /* size of next field */
struct xfs_chash *m_chash; /* fs private inode per-cluster
* hash table */
struct xfs_dmops m_dm_ops; /* vector of DMI ops */
struct xfs_qmops m_qm_ops; /* vector of XQM ops */
struct xfs_ioops m_io_ops; /* vector of I/O ops */
struct xfs_expinfo *m_expinfo; /* info to export to other
cells. */
uint64_t m_shadow_pinmask;
/* which bits matter in rpc
log item pin masks */
uint m_cxfstype; /* mounted shared, etc. */
lock_t m_freeze_lock; /* Lock for m_frozen */
uint m_frozen; /* FS frozen for shutdown or
* snapshot */
......@@ -482,11 +568,10 @@ extern void xfs_check_frozen(xfs_mount_t *, bhv_desc_t *, int);
extern struct vfsops xfs_vfsops;
extern struct vnodeops xfs_vnodeops;
extern struct xfs_dmops xfs_dmcore_xfs;
extern struct xfs_qmops xfs_qmcore_xfs;
extern struct xfs_ioops xfs_iocore_xfs;
extern struct vfsops xfs_qmops_xfs;
extern struct vfsops xfs_dmops_xfs;
extern int xfs_init(void);
extern void xfs_cleanup(void);
......
......@@ -29,154 +29,32 @@
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#define MNTOPT_QUOTA "quota" /* disk quotas (user) */
#define MNTOPT_NOQUOTA "noquota" /* no quotas */
#define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */
#define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */
#define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */
#define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */
#define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
#define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
#define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */
STATIC int
xfs_qm_parseargs(
struct bhv_desc *bhv,
char *options,
struct xfs_mount_args *args,
int update)
{
size_t length;
char *local_options = options;
char *this_char;
int error;
int referenced = update;
while ((this_char = strsep(&local_options, ",")) != NULL) {
length = strlen(this_char);
if (local_options)
length++;
if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA);
args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA);
referenced = update;
} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
!strcmp(this_char, MNTOPT_UQUOTA) ||
!strcmp(this_char, MNTOPT_USRQUOTA)) {
args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
!strcmp(this_char, MNTOPT_UQUOTANOENF)) {
args->flags |= XFSMNT_UQUOTA;
args->flags &= ~XFSMNT_UQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
!strcmp(this_char, MNTOPT_GRPQUOTA)) {
args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF;
referenced = 1;
} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
args->flags |= XFSMNT_GQUOTA;
args->flags &= ~XFSMNT_GQUOTAENF;
referenced = 1;
} else {
if (local_options)
*(local_options-1) = ',';
continue;
}
while (length--)
*this_char++ = ',';
}
PVFS_PARSEARGS(BHV_NEXT(bhv), options, args, update, error);
if (!error && !referenced)
bhv_remove_vfsops(bhvtovfs(bhv), VFS_POSITION_QM);
return error;
}
STATIC int
xfs_qm_showargs(
struct bhv_desc *bhv,
struct seq_file *m)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
if (mp->m_qflags & XFS_UQUOTA_ACCT) {
(mp->m_qflags & XFS_UQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_USRQUOTA) :
seq_puts(m, "," MNTOPT_UQUOTANOENF);
}
if (mp->m_qflags & XFS_GQUOTA_ACCT) {
(mp->m_qflags & XFS_GQUOTA_ENFD) ?
seq_puts(m, "," MNTOPT_GRPQUOTA) :
seq_puts(m, "," MNTOPT_GQUOTANOENF);
}
if (!(mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT)))
seq_puts(m, "," MNTOPT_NOQUOTA);
PVFS_SHOWARGS(BHV_NEXT(bhv), m, error);
return error;
}
STATIC int
xfs_qm_mount(
struct bhv_desc *bhv,
struct xfs_mount_args *args,
struct cred *cr)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
if (args->flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA))
xfs_qm_mount_quotainit(mp, args->flags);
PVFS_MOUNT(BHV_NEXT(bhv), args, cr, error);
return error;
}
STATIC int
xfs_qm_syncall(
struct bhv_desc *bhv,
int flags,
cred_t *credp)
#ifndef CONFIG_XFS_QUOTA
STATIC struct xfs_dquot *
xfs_dqvopchown_default(
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_dquot **dqp,
struct xfs_dquot *dq)
{
struct vfs *vfsp = bhvtovfs(bhv);
struct xfs_mount *mp = XFS_VFSTOM(vfsp);
int error;
/*
* Get the Quota Manager to flush the dquots.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if ((error = xfs_qm_sync(mp, flags))) {
/*
* If we got an IO error, we will be shutting down.
* So, there's nothing more for us to do here.
*/
ASSERT(error != EIO || XFS_FORCED_SHUTDOWN(mp));
if (XFS_FORCED_SHUTDOWN(mp)) {
return XFS_ERROR(error);
}
}
}
PVFS_SYNC(BHV_NEXT(bhv), flags, credp, error);
return error;
return NULL;
}
vfsops_t xfs_qmops_xfs = {
BHV_IDENTITY_INIT(VFS_BHV_QM, VFS_POSITION_QM),
.vfs_parseargs = xfs_qm_parseargs,
.vfs_showargs = xfs_qm_showargs,
.vfs_mount = xfs_qm_mount,
.vfs_sync = xfs_qm_syncall,
.vfs_quotactl = xfs_qm_quotactl,
xfs_qmops_t xfs_qmcore_xfs = {
.xfs_qminit = (xfs_qminit_t) fs_noerr,
.xfs_qmdone = (xfs_qmdone_t) fs_noerr,
.xfs_qmmount = (xfs_qmmount_t) fs_noerr,
.xfs_qmunmount = (xfs_qmunmount_t) fs_noerr,
.xfs_dqrele = (xfs_dqrele_t) fs_noerr,
.xfs_dqattach = (xfs_dqattach_t) fs_noerr,
.xfs_dqdetach = (xfs_dqdetach_t) fs_noerr,
.xfs_dqpurgeall = (xfs_dqpurgeall_t) fs_noerr,
.xfs_dqvopalloc = (xfs_dqvopalloc_t) fs_noerr,
.xfs_dqvopcreate = (xfs_dqvopcreate_t) fs_noerr,
.xfs_dqvoprename = (xfs_dqvoprename_t) fs_noerr,
.xfs_dqvopchown = xfs_dqvopchown_default,
.xfs_dqvopchownresv = (xfs_dqvopchownresv_t) fs_noerr,
};
#endif /* CONFIG_XFS_QUOTA */
This diff is collapsed.
......@@ -261,11 +261,12 @@ xfs_rename(
src_dp = XFS_BHVTOI(src_dir_bdp);
target_dp = XFS_BHVTOI(target_dir_bdp);
mp = src_dp->i_mount;
if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
target_dp, DM_EVENT_RENAME)) {
error = dm_send_namesp_event(DM_EVENT_RENAME,
error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
src_dir_bdp, DM_RIGHT_NULL,
target_dir_bdp, DM_RIGHT_NULL,
src_name, target_name,
......@@ -323,7 +324,6 @@ xfs_rename(
xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
XFS_BMAP_INIT(&free_list, &first_block);
mp = src_dp->i_mount;
tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen);
......@@ -343,13 +343,11 @@ xfs_rename(
/*
* Attach the dquots to the inodes
*/
if (XFS_IS_QUOTA_ON(mp)) {
if ((error = xfs_qm_vop_rename_dqattach(inodes))) {
if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {
xfs_trans_cancel(tp, cancel_flags);
rename_which_error_return = __LINE__;
goto rele_return;
}
}
/*
* Reacquire the inode locks we dropped above.
......@@ -625,7 +623,7 @@ xfs_rename(
if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_POSTRENAME) ||
DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
target_dp, DM_EVENT_POSTRENAME)) {
(void) dm_send_namesp_event(DM_EVENT_POSTRENAME,
(void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
src_dir_bdp, DM_RIGHT_NULL,
target_dir_bdp, DM_RIGHT_NULL,
src_name, target_name,
......
......@@ -173,11 +173,7 @@ xfs_trans_dup(
ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
tp->t_rtx_res = tp->t_rtx_res_used;
/*
* dup the dquot stuff too.
*/
if (tp->t_dqinfo)
xfs_trans_dup_dqinfo(tp, ntp);
XFS_TRANS_DUP_DQINFO(tp->t_mountp, tp, ntp);
atomic_inc(&tp->t_mountp->m_active_trans);
return ntp;
......@@ -703,9 +699,7 @@ xfs_trans_commit(
* means is that we have some (non-persistent) quota
* reservations that need to be unreserved.
*/
if (tp->t_dqinfo && (tp->t_flags & XFS_TRANS_DQ_DIRTY)) {
xfs_trans_unreserve_and_mod_dquots(tp);
}
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
if (tp->t_ticket) {
commit_lsn = xfs_log_done(mp, tp->t_ticket,
NULL, log_flags);
......@@ -733,9 +727,7 @@ xfs_trans_commit(
if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
xfs_trans_apply_sb_deltas(tp);
}
if (tp->t_flags & XFS_TRANS_DQ_DIRTY) {
xfs_trans_apply_dquot_deltas(tp);
}
XFS_TRANS_APPLY_DQUOT_DELTAS(mp, tp);
/*
* Ask each log item how many log_vector entries it will
......@@ -955,9 +947,7 @@ xfs_trans_uncommit(
}
xfs_trans_unreserve_and_mod_sb(tp);
if (tp->t_dqinfo && (tp->t_flags & XFS_TRANS_DQ_DIRTY)) {
xfs_trans_unreserve_and_mod_dquots(tp);
}
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
xfs_trans_free_items(tp, flags);
xfs_trans_free_busy(tp);
......@@ -1079,9 +1069,7 @@ xfs_trans_cancel(
}
#endif
xfs_trans_unreserve_and_mod_sb(tp);
if (tp->t_dqinfo && (tp->t_flags & XFS_TRANS_DQ_DIRTY))
xfs_trans_unreserve_and_mod_dquots(tp);
XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
if (tp->t_ticket) {
if (flags & XFS_TRANS_RELEASE_LOG_RES) {
......@@ -1110,8 +1098,7 @@ xfs_trans_free(
xfs_trans_t *tp)
{
atomic_dec(&tp->t_mountp->m_active_trans);
if (tp->t_dqinfo)
xfs_trans_free_dqinfo(tp);
XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
kmem_zone_free(xfs_trans_zone, tp);
}
......
......@@ -232,7 +232,7 @@ xfs_dir_ialloc(
xfs_buf_relse(ialloc_context);
if (dqinfo) {
tp->t_dqinfo = dqinfo;
xfs_trans_free_dqinfo(tp);
XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp);
}
*tpp = ntp;
*ipp = NULL;
......@@ -254,7 +254,7 @@ xfs_dir_ialloc(
*ipp = NULL;
return code;
}
xfs_trans_bjoin (tp, ialloc_context);
xfs_trans_bjoin(tp, ialloc_context);
/*
* Call ialloc again. Since we've locked out all
......
......@@ -68,7 +68,6 @@ xfs_init(void)
spinlock_init(&xfs_dabuf_global_lock, "xfsda");
#endif
mutex_init(&xfs_uuidtabmon, MUTEX_DEFAULT, "xfs_uuidtab");
mutex_init(&xfs_Gqm_lock, MUTEX_DEFAULT, "xfs_qmlock");
/*
* Initialize all of the zone allocators we use.
......@@ -175,8 +174,6 @@ xfs_cleanup(void)
kmem_cache_destroy(xfs_ifork_zone);
kmem_cache_destroy(xfs_ili_zone);
kmem_cache_destroy(xfs_chashlist_zone);
_XQM_ZONE_DESTROY(qm_dqzone);
_XQM_ZONE_DESTROY(qm_dqtrxzone);
_ACL_ZONE_DESTROY(xfs_acl_zone);
#if (defined(DEBUG) || defined(CONFIG_XFS_VNODE_TRACING))
ktrace_uninit();
......@@ -389,6 +386,7 @@ xfs_mount(
cred_t *credp)
{
struct vfs *vfsp = bhvtovfs(bhvp);
struct bhv_desc *p;
struct xfs_mount *mp = XFS_BHVTOM(bhvp);
struct block_device *ddev, *logdev, *rtdev;
int ronly = (vfsp->vfs_flag & VFS_RDONLY);
......@@ -421,28 +419,43 @@ xfs_mount(
}
}
mp->m_io_ops = xfs_iocore_xfs;
/*
* Setup xfs_mount function vectors from available behaviors
*/
p = vfs_bhv_lookup(vfsp, VFS_POSITION_DM);
mp->m_dm_ops = p ? *(xfs_dmops_t *) vfs_bhv_custom(p) : xfs_dmcore_xfs;
p = vfs_bhv_lookup(vfsp, VFS_POSITION_QM);
mp->m_qm_ops = p ? *(xfs_qmops_t *) vfs_bhv_custom(p) : xfs_qmcore_xfs;
p = vfs_bhv_lookup(vfsp, VFS_POSITION_IO);
mp->m_io_ops = p ? *(xfs_ioops_t *) vfs_bhv_custom(p) : xfs_iocore_xfs;
/*
* Setup xfs_mount buffer target pointers
*/
mp->m_ddev_targp = xfs_alloc_buftarg(ddev);
if (rtdev)
mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev);
mp->m_logdev_targp = (logdev && logdev != ddev) ?
xfs_alloc_buftarg(logdev) : mp->m_ddev_targp;
/*
* Setup flags based on mount(2) options and then the superblock
*/
error = xfs_start_flags(args, mp, ronly);
if (error)
goto error;
error = xfs_readsb(mp);
if (error)
goto error;
error = xfs_finish_flags(args, mp, ronly);
if (error) {
xfs_freesb(mp);
goto error;
}
/*
* Setup xfs_mount buffer target pointers based on superblock
*/
xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
mp->m_sb.sb_sectsize);
if (logdev && logdev != ddev) {
......@@ -531,16 +544,15 @@ xfs_unmount(
int flags,
cred_t *credp)
{
xfs_mount_t *mp;
xfs_inode_t *rip;
vnode_t *rvp = 0;
struct vfs *vfsp = bhvtovfs(bdp);
xfs_mount_t *mp = XFS_BHVTOM(bdp);
xfs_inode_t *rip;
vnode_t *rvp;
int unmount_event_wanted = 0;
int unmount_event_flags = 0;
int xfs_unmountfs_needed = 0;
int error;
mp = XFS_BHVTOM(bdp);
rip = mp->m_rootip;
rvp = XFS_ITOV(rip);
......@@ -548,7 +560,7 @@ xfs_unmount(
bhv_desc_t *rbdp;
rbdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(rvp), &xfs_vnodeops);
error = dm_send_namesp_event(DM_EVENT_PREUNMOUNT,
error = XFS_SEND_NAMESP(mp, DM_EVENT_PREUNMOUNT,
rbdp, DM_RIGHT_NULL, rbdp, DM_RIGHT_NULL,
NULL, NULL, 0, 0,
(mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))?
......@@ -601,9 +613,9 @@ xfs_unmount(
*/
if (unmount_event_wanted) {
/* Note: mp structure must still exist for
* dm_send_unmount_event() call.
* XFS_SEND_UNMOUNT() call.
*/
dm_send_unmount_event(vfsp, error == 0 ? rvp : NULL,
XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL,
DM_RIGHT_NULL, 0, error, unmount_event_flags);
}
if (xfs_unmountfs_needed) {
......@@ -679,7 +691,7 @@ xfs_unmount_flush(
* Release dquot that rootinode, rbmino and rsumino might be holding,
* flush and purge the quota inodes.
*/
error = xfs_qm_unmount_quotas(mp);
error = XFS_QM_UNMOUNT(mp);
if (error == EFSCORRUPTED)
goto fscorrupt_out2;
......
This diff is collapsed.
......@@ -31,8 +31,8 @@
*/
#include <xfs.h>
#include <xfs_quota_priv.h>
#include <xfs_log_recover.h>
#include "quota/xfs_qm.h"
#include "pagebuf/page_buf_internal.h"
#include <linux/ctype.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment