Commit 4a0a9d51 authored by Richard Russon's avatar Richard Russon

Merge ssh://linux-ntfs@bkbits.net/ntfs-2.6

into flatcap.org:/home/flatcap/backup/bk/ntfs-2.6
parents 27df0cf8 2499715e
...@@ -372,6 +372,38 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, ...@@ -372,6 +372,38 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
len, offset); len, offset);
} }
struct bio_map_data {
struct bio_vec *iovecs;
void __user *userptr;
};
static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio)
{
memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
bio->bi_private = bmd;
}
static void bio_free_map_data(struct bio_map_data *bmd)
{
kfree(bmd->iovecs);
kfree(bmd);
}
static struct bio_map_data *bio_alloc_map_data(int nr_segs)
{
struct bio_map_data *bmd = kmalloc(sizeof(*bmd), GFP_KERNEL);
if (!bmd)
return NULL;
bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, GFP_KERNEL);
if (bmd)
return bmd;
kfree(bmd);
return NULL;
}
/** /**
* bio_uncopy_user - finish previously mapped bio * bio_uncopy_user - finish previously mapped bio
* @bio: bio being terminated * @bio: bio being terminated
...@@ -381,20 +413,22 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, ...@@ -381,20 +413,22 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
*/ */
int bio_uncopy_user(struct bio *bio) int bio_uncopy_user(struct bio *bio)
{ {
struct bio_map_data *bmd = bio->bi_private;
const int read = bio_data_dir(bio) == READ;
struct bio_vec *bvec; struct bio_vec *bvec;
int i, ret = 0; int i, ret = 0;
char *uaddr = bio->bi_private;
__bio_for_each_segment(bvec, bio, i, 0) { __bio_for_each_segment(bvec, bio, i, 0) {
char *addr = page_address(bvec->bv_page); char *addr = page_address(bvec->bv_page);
if (bio_data_dir(bio) == READ && !ret && unsigned int len = bmd->iovecs[i].bv_len;
copy_to_user(uaddr, addr, bvec->bv_len))
if (read && !ret && copy_to_user(bmd->userptr, addr, len))
ret = -EFAULT; ret = -EFAULT;
__free_page(bvec->bv_page); __free_page(bvec->bv_page);
uaddr += bvec->bv_len; bmd->userptr += len;
} }
bio_free_map_data(bmd);
bio_put(bio); bio_put(bio);
return ret; return ret;
} }
...@@ -415,14 +449,25 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, ...@@ -415,14 +449,25 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
{ {
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT; unsigned long start = uaddr >> PAGE_SHIFT;
struct bio_map_data *bmd;
struct bio_vec *bvec; struct bio_vec *bvec;
struct page *page; struct page *page;
struct bio *bio; struct bio *bio;
int i, ret; int i, ret;
bmd = bio_alloc_map_data(end - start);
if (!bmd)
return ERR_PTR(-ENOMEM);
bmd->userptr = (void __user *) uaddr;
bio = bio_alloc(GFP_KERNEL, end - start); bio = bio_alloc(GFP_KERNEL, end - start);
if (!bio) if (!bio) {
bio_free_map_data(bmd);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
bio->bi_rw |= (!write_to_vm << BIO_RW);
ret = 0; ret = 0;
while (len) { while (len) {
...@@ -445,13 +490,15 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, ...@@ -445,13 +490,15 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
len -= bytes; len -= bytes;
} }
if (ret)
goto cleanup;
/* /*
* success * success
*/ */
if (!ret) {
if (!write_to_vm) { if (!write_to_vm) {
unsigned long p = uaddr; unsigned long p = uaddr;
bio->bi_rw |= (1 << BIO_RW);
/* /*
* for a write, copy in data to kernel pages * for a write, copy in data to kernel pages
*/ */
...@@ -465,13 +512,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, ...@@ -465,13 +512,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
} }
} }
bio->bi_private = (void *) uaddr; bio_set_map_data(bmd, bio);
return bio; return bio;
}
/*
* cleanup
*/
cleanup: cleanup:
bio_for_each_segment(bvec, bio, i) bio_for_each_segment(bvec, bio, i)
__free_page(bvec->bv_page); __free_page(bvec->bv_page);
......
...@@ -550,6 +550,7 @@ xfs_map_unwritten( ...@@ -550,6 +550,7 @@ xfs_map_unwritten(
STATIC void STATIC void
xfs_submit_page( xfs_submit_page(
struct page *page, struct page *page,
struct writeback_control *wbc,
struct buffer_head *bh_arr[], struct buffer_head *bh_arr[],
int cnt) int cnt)
{ {
...@@ -573,8 +574,10 @@ xfs_submit_page( ...@@ -573,8 +574,10 @@ xfs_submit_page(
for (i = 0; i < cnt; i++) for (i = 0; i < cnt; i++)
submit_bh(WRITE, bh_arr[i]); submit_bh(WRITE, bh_arr[i]);
} else } else {
end_page_writeback(page); end_page_writeback(page);
wbc->pages_skipped++; /* We didn't write this page */
}
} }
/* /*
...@@ -652,7 +655,7 @@ xfs_convert_page( ...@@ -652,7 +655,7 @@ xfs_convert_page(
if (startio) { if (startio) {
wbc->nr_to_write--; wbc->nr_to_write--;
xfs_submit_page(page, bh_arr, index); xfs_submit_page(page, wbc, bh_arr, index);
} else { } else {
unlock_page(page); unlock_page(page);
} }
...@@ -864,7 +867,7 @@ xfs_page_state_convert( ...@@ -864,7 +867,7 @@ xfs_page_state_convert(
SetPageUptodate(page); SetPageUptodate(page);
if (startio) if (startio)
xfs_submit_page(page, bh_arr, cnt); xfs_submit_page(page, wbc, bh_arr, cnt);
if (iomp) { if (iomp) {
tlast = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> tlast = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
......
...@@ -409,6 +409,8 @@ linvfs_file_mmap( ...@@ -409,6 +409,8 @@ linvfs_file_mmap(
vma->vm_ops = &linvfs_file_vm_ops; vma->vm_ops = &linvfs_file_vm_ops;
VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error); VOP_SETATTR(vp, &va, XFS_AT_UPDATIME, NULL, error);
if (!error)
vn_revalidate(vp); /* update Linux inode flags */
return 0; return 0;
} }
......
...@@ -63,6 +63,7 @@ xfs_param_t xfs_params = { ...@@ -63,6 +63,7 @@ xfs_param_t xfs_params = {
.inherit_noatim = { 0, 1, 1 }, .inherit_noatim = { 0, 1, 1 },
.xfs_buf_timer = { 100/2, 1*100, 30*100 }, .xfs_buf_timer = { 100/2, 1*100, 30*100 },
.xfs_buf_age = { 1*100, 15*100, 7200*100}, .xfs_buf_age = { 1*100, 15*100, 7200*100},
.inherit_nosym = { 0, 0, 1 },
}; };
/* /*
......
...@@ -174,8 +174,9 @@ linvfs_mknod( ...@@ -174,8 +174,9 @@ linvfs_mknod(
*/ */
teardown.d_inode = ip = LINVFS_GET_IP(vp); teardown.d_inode = ip = LINVFS_GET_IP(vp);
teardown.d_name = dentry->d_name; teardown.d_name = dentry->d_name;
remove_inode_hash(ip);
make_bad_inode(ip); vn_mark_bad(vp);
if (S_ISDIR(mode)) if (S_ISDIR(mode))
VOP_RMDIR(dvp, &teardown, NULL, err2); VOP_RMDIR(dvp, &teardown, NULL, err2);
else else
...@@ -225,26 +226,21 @@ linvfs_lookup( ...@@ -225,26 +226,21 @@ linvfs_lookup(
struct dentry *dentry, struct dentry *dentry,
struct nameidata *nd) struct nameidata *nd)
{ {
struct inode *ip = NULL; struct vnode *vp = LINVFS_GET_VP(dir), *cvp;
vnode_t *vp, *cvp = NULL;
int error; int error;
if (dentry->d_name.len >= MAXNAMELEN) if (dentry->d_name.len >= MAXNAMELEN)
return ERR_PTR(-ENAMETOOLONG); return ERR_PTR(-ENAMETOOLONG);
vp = LINVFS_GET_VP(dir);
VOP_LOOKUP(vp, dentry, &cvp, 0, NULL, NULL, error); VOP_LOOKUP(vp, dentry, &cvp, 0, NULL, NULL, error);
if (!error) { if (error) {
ASSERT(cvp); if (unlikely(error != ENOENT))
ip = LINVFS_GET_IP(cvp);
if (!ip) {
VN_RELE(cvp);
return ERR_PTR(-EACCES);
}
}
if (error && (error != ENOENT))
return ERR_PTR(-error); return ERR_PTR(-error);
return d_splice_alias(ip, dentry); d_add(dentry, NULL);
return NULL;
}
return d_splice_alias(LINVFS_GET_IP(cvp), dentry);
} }
STATIC int STATIC int
...@@ -304,7 +300,7 @@ linvfs_symlink( ...@@ -304,7 +300,7 @@ linvfs_symlink(
{ {
struct inode *ip; struct inode *ip;
vattr_t va; vattr_t va;
vnode_t *dvp; /* directory containing name to remove */ vnode_t *dvp; /* directory containing name of symlink */
vnode_t *cvp; /* used to lookup symlink to put in dentry */ vnode_t *cvp; /* used to lookup symlink to put in dentry */
int error; int error;
......
...@@ -85,6 +85,7 @@ ...@@ -85,6 +85,7 @@
#include <linux/vfs.h> #include <linux/vfs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/version.h> #include <linux/version.h>
...@@ -140,6 +141,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh) ...@@ -140,6 +141,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_inherit_noatime xfs_params.inherit_noatim.val #define xfs_inherit_noatime xfs_params.inherit_noatim.val
#define xfs_buf_timer_centisecs xfs_params.xfs_buf_timer.val #define xfs_buf_timer_centisecs xfs_params.xfs_buf_timer.val
#define xfs_buf_age_centisecs xfs_params.xfs_buf_age.val #define xfs_buf_age_centisecs xfs_params.xfs_buf_age.val
#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
#define current_cpu() smp_processor_id() #define current_cpu() smp_processor_id()
#define current_pid() (current->pid) #define current_pid() (current->pid)
......
...@@ -296,11 +296,6 @@ xfs_read( ...@@ -296,11 +296,6 @@ xfs_read(
return -EIO; return -EIO;
} }
/* OK so we are holding the I/O lock for the duration
* of the submission, then what happens if the I/O
* does not really happen here, but is scheduled
* later?
*/
xfs_ilock(ip, XFS_IOLOCK_SHARED); xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
...@@ -321,6 +316,7 @@ xfs_read( ...@@ -321,6 +316,7 @@ xfs_read(
ret = __generic_file_aio_read(iocb, iovp, segs, offset); ret = __generic_file_aio_read(iocb, iovp, segs, offset);
if (ret == -EIOCBQUEUED) if (ret == -EIOCBQUEUED)
ret = wait_on_sync_kiocb(iocb); ret = wait_on_sync_kiocb(iocb);
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
if (ret > 0) if (ret > 0)
...@@ -380,12 +376,17 @@ xfs_sendfile( ...@@ -380,12 +376,17 @@ xfs_sendfile(
} }
} }
xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore, xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
(void*)(unsigned long)target, count, *offset, ioflags); (void *)(unsigned long)target, count, *offset, ioflags);
ret = generic_file_sendfile(filp, offset, count, actor, target); ret = generic_file_sendfile(filp, offset, count, actor, target);
xfs_iunlock(ip, XFS_IOLOCK_SHARED); xfs_iunlock(ip, XFS_IOLOCK_SHARED);
if (ret > 0)
XFS_STATS_ADD(xs_read_bytes, ret); XFS_STATS_ADD(xs_read_bytes, ret);
if (likely(!(ioflags & IO_INVIS)))
xfs_ichgtime(ip, XFS_ICHGTIME_ACC); xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
return ret; return ret;
} }
......
...@@ -141,7 +141,7 @@ xfs_set_inodeops( ...@@ -141,7 +141,7 @@ xfs_set_inodeops(
vnode_t *vp = LINVFS_GET_VP(inode); vnode_t *vp = LINVFS_GET_VP(inode);
if (vp->v_type == VNON) { if (vp->v_type == VNON) {
make_bad_inode(inode); vn_mark_bad(vp);
} else if (S_ISREG(inode->i_mode)) { } else if (S_ISREG(inode->i_mode)) {
inode->i_op = &linvfs_file_inode_operations; inode->i_op = &linvfs_file_inode_operations;
inode->i_fop = &linvfs_file_operations; inode->i_fop = &linvfs_file_operations;
...@@ -223,42 +223,21 @@ xfs_initialize_vnode( ...@@ -223,42 +223,21 @@ xfs_initialize_vnode(
bhv_insert(VN_BHV_HEAD(vp), inode_bhv); bhv_insert(VN_BHV_HEAD(vp), inode_bhv);
} }
vp->v_type = IFTOVT(ip->i_d.di_mode); /*
* We need to set the ops vectors, and unlock the inode, but if
/* Have we been called during the new inode create process, * we have been called during the new inode create process, it is
* in which case we are too early to fill in the Linux inode. * too early to fill in the Linux inode. We will get called a
* second time once the inode is properly set up, and then we can
* finish our work.
*/ */
if (vp->v_type == VNON) if (ip->i_d.di_mode != 0 && unlock && (inode->i_state & I_NEW)) {
return; vp->v_type = IFTOVT(ip->i_d.di_mode);
xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip);
/* For new inodes we need to set the ops vectors,
* and unlock the inode.
*/
if (unlock && (inode->i_state & I_NEW)) {
xfs_set_inodeops(inode); xfs_set_inodeops(inode);
unlock_new_inode(inode); unlock_new_inode(inode);
} }
} }
void
xfs_flush_inode(
xfs_inode_t *ip)
{
struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
filemap_flush(inode->i_mapping);
}
void
xfs_flush_device(
xfs_inode_t *ip)
{
sync_blockdev(XFS_ITOV(ip)->v_vfsp->vfs_super->s_bdev);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
int int
xfs_blkdev_get( xfs_blkdev_get(
xfs_mount_t *mp, xfs_mount_t *mp,
...@@ -312,7 +291,6 @@ xfs_inode_shake( ...@@ -312,7 +291,6 @@ xfs_inode_shake(
{ {
int pages; int pages;
pages = kmem_zone_shrink(linvfs_inode_zone); pages = kmem_zone_shrink(linvfs_inode_zone);
pages += kmem_zone_shrink(xfs_inode_zone); pages += kmem_zone_shrink(xfs_inode_zone);
return pages; return pages;
...@@ -337,7 +315,6 @@ init_inodecache( void ) ...@@ -337,7 +315,6 @@ init_inodecache( void )
linvfs_inode_zone = kmem_cache_create("linvfs_icache", linvfs_inode_zone = kmem_cache_create("linvfs_icache",
sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT, sizeof(vnode_t), 0, SLAB_RECLAIM_ACCOUNT,
init_once, NULL); init_once, NULL);
if (linvfs_inode_zone == NULL) if (linvfs_inode_zone == NULL)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
...@@ -391,36 +368,146 @@ linvfs_clear_inode( ...@@ -391,36 +368,146 @@ linvfs_clear_inode(
} }
/*
* Enqueue a work item to be picked up by the vfs xfssyncd thread.
* Doing this has two advantages:
* - It saves on stack space, which is tight in certain situations
* - It can be used (with care) as a mechanism to avoid deadlocks.
* Flushing while allocating in a full filesystem requires both.
*/
STATIC void
xfs_syncd_queue_work(
struct vfs *vfs,
void *data,
void (*syncer)(vfs_t *, void *))
{
vfs_sync_work_t *work;
work = kmem_alloc(sizeof(struct vfs_sync_work), KM_SLEEP);
INIT_LIST_HEAD(&work->w_list);
work->w_syncer = syncer;
work->w_data = data;
work->w_vfs = vfs;
spin_lock(&vfs->vfs_sync_lock);
list_add_tail(&work->w_list, &vfs->vfs_sync_list);
spin_unlock(&vfs->vfs_sync_lock);
wake_up_process(vfs->vfs_sync_task);
}
/*
* Flush delayed allocate data, attempting to free up reserved space
* from existing allocations. At this point a new allocation attempt
* has failed with ENOSPC and we are in the process of scratching our
* heads, looking about for more room...
*/
STATIC void
xfs_flush_inode_work(
vfs_t *vfs,
void *inode)
{
filemap_flush(((struct inode *)inode)->i_mapping);
iput((struct inode *)inode);
}
void
xfs_flush_inode(
xfs_inode_t *ip)
{
struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
igrab(inode);
xfs_syncd_queue_work(vfs, inode, xfs_flush_inode_work);
delay(HZ/2);
}
/*
* This is the "bigger hammer" version of xfs_flush_inode_work...
* (IOW, "If at first you don't succeed, use a Bigger Hammer").
*/
STATIC void
xfs_flush_device_work(
vfs_t *vfs,
void *inode)
{
sync_blockdev(vfs->vfs_super->s_bdev);
iput((struct inode *)inode);
}
void
xfs_flush_device(
xfs_inode_t *ip)
{
struct inode *inode = LINVFS_GET_IP(XFS_ITOV(ip));
struct vfs *vfs = XFS_MTOVFS(ip->i_mount);
igrab(inode);
xfs_syncd_queue_work(vfs, inode, xfs_flush_device_work);
delay(HZ/2);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
}
#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR) #define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
STATIC void
vfs_sync_worker(
vfs_t *vfsp,
void *unused)
{
int error;
if (!(vfsp->vfs_flag & VFS_RDONLY))
VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
vfsp->vfs_sync_seq++;
wmb();
wake_up(&vfsp->vfs_wait_single_sync_task);
}
STATIC int STATIC int
xfssyncd( xfssyncd(
void *arg) void *arg)
{ {
long timeleft;
vfs_t *vfsp = (vfs_t *) arg; vfs_t *vfsp = (vfs_t *) arg;
int error; struct list_head tmp;
struct vfs_sync_work *work, *n;
daemonize("xfssyncd"); daemonize("xfssyncd");
vfsp->vfs_sync_work.w_vfs = vfsp;
vfsp->vfs_sync_work.w_syncer = vfs_sync_worker;
vfsp->vfs_sync_task = current; vfsp->vfs_sync_task = current;
wmb(); wmb();
wake_up(&vfsp->vfs_wait_sync_task); wake_up(&vfsp->vfs_wait_sync_task);
INIT_LIST_HEAD(&tmp);
timeleft = (xfs_syncd_centisecs * HZ) / 100;
for (;;) { for (;;) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((xfs_syncd_centisecs * HZ) / 100); timeleft = schedule_timeout(timeleft);
/* swsusp */ /* swsusp */
if (current->flags & PF_FREEZE) if (current->flags & PF_FREEZE)
refrigerator(PF_FREEZE); refrigerator(PF_FREEZE);
if (vfsp->vfs_flag & VFS_UMOUNT) if (vfsp->vfs_flag & VFS_UMOUNT)
break; break;
if (vfsp->vfs_flag & VFS_RDONLY)
continue;
VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
vfsp->vfs_sync_seq++; spin_lock(&vfsp->vfs_sync_lock);
wmb(); if (!timeleft) {
wake_up(&vfsp->vfs_wait_single_sync_task); timeleft = (xfs_syncd_centisecs * HZ) / 100;
INIT_LIST_HEAD(&vfsp->vfs_sync_work.w_list);
list_add_tail(&vfsp->vfs_sync_work.w_list,
&vfsp->vfs_sync_list);
}
list_for_each_entry_safe(work, n, &vfsp->vfs_sync_list, w_list)
list_move(&work->w_list, &tmp);
spin_unlock(&vfsp->vfs_sync_lock);
list_for_each_entry_safe(work, n, &tmp, w_list) {
(*work->w_syncer)(vfsp, work->w_data);
list_del(&work->w_list);
if (work == &vfsp->vfs_sync_work)
continue;
kmem_free(work, sizeof(struct vfs_sync_work));
}
} }
vfsp->vfs_sync_task = NULL; vfsp->vfs_sync_task = NULL;
...@@ -570,7 +657,6 @@ linvfs_get_parent( ...@@ -570,7 +657,6 @@ linvfs_get_parent(
int error; int error;
vnode_t *vp, *cvp; vnode_t *vp, *cvp;
struct dentry *parent; struct dentry *parent;
struct inode *ip = NULL;
struct dentry dotdot; struct dentry dotdot;
dotdot.d_name.name = ".."; dotdot.d_name.name = "..";
...@@ -580,21 +666,13 @@ linvfs_get_parent( ...@@ -580,21 +666,13 @@ linvfs_get_parent(
cvp = NULL; cvp = NULL;
vp = LINVFS_GET_VP(child->d_inode); vp = LINVFS_GET_VP(child->d_inode);
VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error); VOP_LOOKUP(vp, &dotdot, &cvp, 0, NULL, NULL, error);
if (unlikely(error))
if (!error) {
ASSERT(cvp);
ip = LINVFS_GET_IP(cvp);
if (!ip) {
VN_RELE(cvp);
return ERR_PTR(-EACCES);
}
}
if (error)
return ERR_PTR(-error); return ERR_PTR(-error);
parent = d_alloc_anon(ip);
if (!parent) { parent = d_alloc_anon(LINVFS_GET_IP(cvp));
if (unlikely(!parent)) {
VN_RELE(cvp); VN_RELE(cvp);
parent = ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
return parent; return parent;
} }
......
...@@ -129,6 +129,11 @@ STATIC ctl_table xfs_table[] = { ...@@ -129,6 +129,11 @@ STATIC ctl_table xfs_table[] = {
&sysctl_intvec, NULL, &sysctl_intvec, NULL,
&xfs_params.xfs_buf_age.min, &xfs_params.xfs_buf_age.max}, &xfs_params.xfs_buf_age.min, &xfs_params.xfs_buf_age.max},
{XFS_INHERIT_NOSYM, "inherit_nosymlinks", &xfs_params.inherit_nosym.val,
sizeof(int), 0644, NULL, &proc_dointvec_minmax,
&sysctl_intvec, NULL,
&xfs_params.inherit_nosym.min, &xfs_params.inherit_nosym.max},
/* please keep this the last entry */ /* please keep this the last entry */
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
{XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val, {XFS_STATS_CLEAR, "stats_clear", &xfs_params.stats_clear.val,
......
...@@ -59,6 +59,7 @@ typedef struct xfs_param { ...@@ -59,6 +59,7 @@ typedef struct xfs_param {
xfs_sysctl_val_t inherit_noatim;/* Inherit the "noatime" inode flag. */ xfs_sysctl_val_t inherit_noatim;/* Inherit the "noatime" inode flag. */
xfs_sysctl_val_t xfs_buf_timer; /* Interval between xfsbufd wakeups. */ xfs_sysctl_val_t xfs_buf_timer; /* Interval between xfsbufd wakeups. */
xfs_sysctl_val_t xfs_buf_age; /* Metadata buffer age before flush. */ xfs_sysctl_val_t xfs_buf_age; /* Metadata buffer age before flush. */
xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */
} xfs_param_t; } xfs_param_t;
/* /*
...@@ -95,6 +96,7 @@ enum { ...@@ -95,6 +96,7 @@ enum {
XFS_BUF_TIMER = 16, XFS_BUF_TIMER = 16,
XFS_BUF_AGE = 17, XFS_BUF_AGE = 17,
/* XFS_IO_BYPASS = 18 */ /* XFS_IO_BYPASS = 18 */
XFS_INHERIT_NOSYM = 19,
}; };
extern xfs_param_t xfs_params; extern xfs_param_t xfs_params;
......
...@@ -249,6 +249,8 @@ vfs_allocate( void ) ...@@ -249,6 +249,8 @@ vfs_allocate( void )
vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP); vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP);
bhv_head_init(VFS_BHVHEAD(vfsp), "vfs"); bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
INIT_LIST_HEAD(&vfsp->vfs_sync_list);
vfsp->vfs_sync_lock = SPIN_LOCK_UNLOCKED;
init_waitqueue_head(&vfsp->vfs_wait_sync_task); init_waitqueue_head(&vfsp->vfs_wait_sync_task);
init_waitqueue_head(&vfsp->vfs_wait_single_sync_task); init_waitqueue_head(&vfsp->vfs_wait_single_sync_task);
return vfsp; return vfsp;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "xfs_fs.h" #include "xfs_fs.h"
struct fid; struct fid;
struct vfs;
struct cred; struct cred;
struct vnode; struct vnode;
struct kstatfs; struct kstatfs;
...@@ -45,14 +46,24 @@ struct xfs_mount_args; ...@@ -45,14 +46,24 @@ struct xfs_mount_args;
typedef struct kstatfs xfs_statfs_t; typedef struct kstatfs xfs_statfs_t;
typedef struct vfs_sync_work {
struct list_head w_list;
struct vfs *w_vfs;
void *w_data; /* syncer routine argument */
void (*w_syncer)(struct vfs *, void *);
} vfs_sync_work_t;
typedef struct vfs { typedef struct vfs {
u_int vfs_flag; /* flags */ u_int vfs_flag; /* flags */
xfs_fsid_t vfs_fsid; /* file system ID */ xfs_fsid_t vfs_fsid; /* file system ID */
xfs_fsid_t *vfs_altfsid; /* An ID fixed for life of FS */ xfs_fsid_t *vfs_altfsid; /* An ID fixed for life of FS */
bhv_head_t vfs_bh; /* head of vfs behavior chain */ bhv_head_t vfs_bh; /* head of vfs behavior chain */
struct super_block *vfs_super; /* Linux superblock structure */ struct super_block *vfs_super; /* generic superblock pointer */
struct task_struct *vfs_sync_task; /* xfssyncd process */ struct task_struct *vfs_sync_task; /* generalised sync thread */
int vfs_sync_seq; /* xfssyncd generation number */ vfs_sync_work_t vfs_sync_work; /* work item for VFS_SYNC */
struct list_head vfs_sync_list; /* sync thread work item list */
spinlock_t vfs_sync_lock; /* work item list lock */
int vfs_sync_seq; /* sync thread generation no. */
wait_queue_head_t vfs_wait_single_sync_task; wait_queue_head_t vfs_wait_single_sync_task;
wait_queue_head_t vfs_wait_sync_task; wait_queue_head_t vfs_wait_sync_task;
} vfs_t; } vfs_t;
......
...@@ -594,6 +594,19 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag) ...@@ -594,6 +594,19 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
#define VN_ATIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_atime = *(tvp)) #define VN_ATIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_atime = *(tvp))
#define VN_CTIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_ctime = *(tvp)) #define VN_CTIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_ctime = *(tvp))
/*
* Dealing with bad inodes
*/
static inline void vn_mark_bad(struct vnode *vp)
{
make_bad_inode(LINVFS_GET_IP(vp));
}
static inline int VN_BAD(struct vnode *vp)
{
return is_bad_inode(LINVFS_GET_IP(vp));
}
/* /*
* Some useful predicates. * Some useful predicates.
*/ */
......
...@@ -121,23 +121,15 @@ xfs_attr_fetch(xfs_inode_t *ip, char *name, int namelen, ...@@ -121,23 +121,15 @@ xfs_attr_fetch(xfs_inode_t *ip, char *name, int namelen,
xfs_da_args_t args; xfs_da_args_t args;
int error; int error;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return(EIO);
if ((XFS_IFORK_Q(ip) == 0) || if ((XFS_IFORK_Q(ip) == 0) ||
(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_anextents == 0)) ip->i_d.di_anextents == 0))
return(ENOATTR); return(ENOATTR);
if (!(flags & ATTR_KERNACCESS)) { if (!(flags & (ATTR_KERNACCESS|ATTR_SECURE))) {
xfs_ilock(ip, XFS_ILOCK_SHARED); if ((error = xfs_iaccess(ip, S_IRUSR, cred)))
if (!(flags & ATTR_SECURE) &&
((error = xfs_iaccess(ip, S_IRUSR, cred)))) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return(XFS_ERROR(error)); return(XFS_ERROR(error));
} }
}
/* /*
* Fill in the arg structure for this request. * Fill in the arg structure for this request.
...@@ -167,9 +159,6 @@ xfs_attr_fetch(xfs_inode_t *ip, char *name, int namelen, ...@@ -167,9 +159,6 @@ xfs_attr_fetch(xfs_inode_t *ip, char *name, int namelen,
error = xfs_attr_node_get(&args); error = xfs_attr_node_get(&args);
} }
if (!(flags & ATTR_KERNACCESS))
xfs_iunlock(ip, XFS_ILOCK_SHARED);
/* /*
* Return the number of bytes in the value to the caller. * Return the number of bytes in the value to the caller.
*/ */
...@@ -185,7 +174,7 @@ xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp, ...@@ -185,7 +174,7 @@ xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp,
int flags, struct cred *cred) int flags, struct cred *cred)
{ {
xfs_inode_t *ip = XFS_BHVTOI(bdp); xfs_inode_t *ip = XFS_BHVTOI(bdp);
int namelen; int error, namelen;
XFS_STATS_INC(xs_attr_get); XFS_STATS_INC(xs_attr_get);
...@@ -195,7 +184,13 @@ xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp, ...@@ -195,7 +184,13 @@ xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp,
if (namelen >= MAXNAMELEN) if (namelen >= MAXNAMELEN)
return(EFAULT); /* match IRIX behaviour */ return(EFAULT); /* match IRIX behaviour */
return xfs_attr_fetch(ip, name, namelen, value, valuelenp, flags, cred); if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return(EIO);
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_attr_fetch(ip, name, namelen, value, valuelenp, flags, cred);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return(error);
} }
/*ARGSUSED*/ /*ARGSUSED*/
...@@ -718,16 +713,15 @@ xfs_attr_inactive(xfs_inode_t *dp) ...@@ -718,16 +713,15 @@ xfs_attr_inactive(xfs_inode_t *dp)
mp = dp->i_mount; mp = dp->i_mount;
ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
/* XXXsup - why on earth are we taking ILOCK_EXCL here??? */ xfs_ilock(dp, XFS_ILOCK_SHARED);
xfs_ilock(dp, XFS_ILOCK_EXCL);
if ((XFS_IFORK_Q(dp) == 0) || if ((XFS_IFORK_Q(dp) == 0) ||
(dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
(dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
dp->i_d.di_anextents == 0)) { dp->i_d.di_anextents == 0)) {
xfs_iunlock(dp, XFS_ILOCK_EXCL); xfs_iunlock(dp, XFS_ILOCK_SHARED);
return(0); return(0);
} }
xfs_iunlock(dp, XFS_ILOCK_EXCL); xfs_iunlock(dp, XFS_ILOCK_SHARED);
/* /*
* Start our first transaction of the day. * Start our first transaction of the day.
......
...@@ -457,6 +457,8 @@ xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp); ...@@ -457,6 +457,8 @@ xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp);
#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */ #define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */ #define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */ #define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) #define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) #define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) #define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
...@@ -466,10 +468,13 @@ xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp); ...@@ -466,10 +468,13 @@ xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp);
#define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT) #define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT)
#define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT) #define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT)
#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT) #define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
#define XFS_DIFLAG_ANY \ #define XFS_DIFLAG_ANY \
(XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT) XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS)
#endif /* __XFS_DINODE_H__ */ #endif /* __XFS_DINODE_H__ */
...@@ -77,6 +77,8 @@ struct fsxattr { ...@@ -77,6 +77,8 @@ struct fsxattr {
#define XFS_XFLAG_NOATIME 0x00000040 /* do not update access time */ #define XFS_XFLAG_NOATIME 0x00000040 /* do not update access time */
#define XFS_XFLAG_NODUMP 0x00000080 /* do not include in backups */ #define XFS_XFLAG_NODUMP 0x00000080 /* do not include in backups */
#define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */ #define XFS_XFLAG_RTINHERIT 0x00000100 /* create with rt bit set */
#define XFS_XFLAG_PROJINHERIT 0x00000200 /* create with parents projid */
#define XFS_XFLAG_NOSYMLINKS 0x00000400 /* disallow symlink creation */
#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ #define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
/* /*
......
...@@ -457,7 +457,7 @@ xfs_iget( ...@@ -457,7 +457,7 @@ xfs_iget(
error = xfs_iget_core(vp, mp, tp, ino, error = xfs_iget_core(vp, mp, tp, ino,
lock_flags, ipp, bno); lock_flags, ipp, bno);
if (error) { if (error) {
make_bad_inode(inode); vn_mark_bad(vp);
if (inode->i_state & I_NEW) if (inode->i_state & I_NEW)
unlock_new_inode(inode); unlock_new_inode(inode);
iput(inode); iput(inode);
...@@ -576,11 +576,8 @@ xfs_iput_new(xfs_inode_t *ip, ...@@ -576,11 +576,8 @@ xfs_iput_new(xfs_inode_t *ip,
vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address);
/* We shouldn't get here without this being true, but just in case */ if (inode->i_state & I_NEW)
if (inode->i_state & I_NEW) {
make_bad_inode(inode);
unlock_new_inode(inode); unlock_new_inode(inode);
}
if (lock_flags) if (lock_flags)
xfs_iunlock(ip, lock_flags); xfs_iunlock(ip, lock_flags);
VN_RELE(vp); VN_RELE(vp);
......
...@@ -881,6 +881,10 @@ xfs_dic2xflags( ...@@ -881,6 +881,10 @@ xfs_dic2xflags(
flags |= XFS_XFLAG_NODUMP; flags |= XFS_XFLAG_NODUMP;
if (di_flags & XFS_DIFLAG_RTINHERIT) if (di_flags & XFS_DIFLAG_RTINHERIT)
flags |= XFS_XFLAG_RTINHERIT; flags |= XFS_XFLAG_RTINHERIT;
if (di_flags & XFS_DIFLAG_PROJINHERIT)
flags |= XFS_XFLAG_PROJINHERIT;
if (di_flags & XFS_DIFLAG_NOSYMLINKS)
flags |= XFS_XFLAG_NOSYMLINKS;
} }
return flags; return flags;
} }
...@@ -1257,6 +1261,9 @@ xfs_ialloc( ...@@ -1257,6 +1261,9 @@ xfs_ialloc(
if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) && if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
xfs_inherit_sync) xfs_inherit_sync)
ip->i_d.di_flags |= XFS_DIFLAG_SYNC; ip->i_d.di_flags |= XFS_DIFLAG_SYNC;
if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
xfs_inherit_nosymlinks)
ip->i_d.di_flags |= XFS_DIFLAG_NOSYMLINKS;
} }
/* FALLTHROUGH */ /* FALLTHROUGH */
case S_IFLNK: case S_IFLNK:
......
...@@ -1966,7 +1966,8 @@ xfs_growfs_rt( ...@@ -1966,7 +1966,8 @@ xfs_growfs_rt(
/* /*
* Calculate new parameters. These are the final values to be reached. * Calculate new parameters. These are the final values to be reached.
*/ */
nrextents = do_div(nrblocks, in->extsize); nrextents = nrblocks;
do_div(nrextents, in->extsize);
nrbmblocks = roundup_64(nrextents, NBBY * sbp->sb_blocksize); nrbmblocks = roundup_64(nrextents, NBBY * sbp->sb_blocksize);
nrextslog = xfs_highbit32(nrextents); nrextslog = xfs_highbit32(nrextents);
nrsumlevels = nrextslog + 1; nrsumlevels = nrextslog + 1;
...@@ -2021,7 +2022,8 @@ xfs_growfs_rt( ...@@ -2021,7 +2022,8 @@ xfs_growfs_rt(
XFS_RTMIN(nrblocks, XFS_RTMIN(nrblocks,
nsbp->sb_rbmblocks * NBBY * nsbp->sb_rbmblocks * NBBY *
nsbp->sb_blocksize * nsbp->sb_rextsize); nsbp->sb_blocksize * nsbp->sb_rextsize);
nsbp->sb_rextents = do_div(nsbp->sb_rblocks, nsbp->sb_rextsize); nsbp->sb_rextents = nsbp->sb_rblocks;
do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
nrsumsize = nrsumsize =
......
...@@ -1055,6 +1055,11 @@ xfs_sync_inodes( ...@@ -1055,6 +1055,11 @@ xfs_sync_inodes(
continue; continue;
} }
if (VN_BAD(vp)) {
ip = ip->i_mnext;
continue;
}
if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
XFS_MOUNT_IUNLOCK(mp); XFS_MOUNT_IUNLOCK(mp);
kmem_free(ipointer, sizeof(xfs_iptr_t)); kmem_free(ipointer, sizeof(xfs_iptr_t));
...@@ -1582,31 +1587,35 @@ xfs_vget( ...@@ -1582,31 +1587,35 @@ xfs_vget(
vnode_t **vpp, vnode_t **vpp,
fid_t *fidp) fid_t *fidp)
{ {
xfs_fid_t *xfid; xfs_mount_t *mp = XFS_BHVTOM(bdp);
xfs_fid_t *xfid = (struct xfs_fid *)fidp;
xfs_inode_t *ip; xfs_inode_t *ip;
int error; int error;
xfs_ino_t ino; xfs_ino_t ino;
unsigned int igen; unsigned int igen;
xfs_mount_t *mp;
xfid = (struct xfs_fid *)fidp; /*
if (xfid->xfs_fid_len == sizeof(*xfid) - sizeof(xfid->xfs_fid_len)) { * Invalid. Since handles can be created in user space and passed in
* via gethandle(), this is not cause for a panic.
*/
if (xfid->xfs_fid_len != sizeof(*xfid) - sizeof(xfid->xfs_fid_len))
return XFS_ERROR(EINVAL);
ino = xfid->xfs_fid_ino; ino = xfid->xfs_fid_ino;
igen = xfid->xfs_fid_gen; igen = xfid->xfs_fid_gen;
} else {
/* /*
* Invalid. Since handles can be created in user space * NFS can sometimes send requests for ino 0. Fail them gracefully.
* and passed in via gethandle(), this is not cause for
* a panic.
*/ */
return XFS_ERROR(EINVAL); if (ino == 0)
} return XFS_ERROR(ESTALE);
mp = XFS_BHVTOM(bdp);
error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0); error = xfs_iget(mp, NULL, ino, XFS_ILOCK_SHARED, &ip, 0);
if (error) { if (error) {
*vpp = NULL; *vpp = NULL;
return error; return error;
} }
if (ip == NULL) { if (ip == NULL) {
*vpp = NULL; *vpp = NULL;
return XFS_ERROR(EIO); return XFS_ERROR(EIO);
......
...@@ -826,29 +826,34 @@ xfs_setattr( ...@@ -826,29 +826,34 @@ xfs_setattr(
mp->m_sb.sb_blocklog; mp->m_sb.sb_blocklog;
} }
if (mask & XFS_AT_XFLAGS) { if (mask & XFS_AT_XFLAGS) {
uint di_flags;
/* can't set PREALLOC this way, just preserve it */ /* can't set PREALLOC this way, just preserve it */
ip->i_d.di_flags = di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
(ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
if (vap->va_xflags & XFS_XFLAG_REALTIME &&
(ip->i_d.di_mode & S_IFMT) == S_IFREG) {
ip->i_d.di_flags |= XFS_DIFLAG_REALTIME;
ip->i_iocore.io_flags |= XFS_IOCORE_RT;
} else {
ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
}
if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
ip->i_d.di_flags |= XFS_DIFLAG_IMMUTABLE; di_flags |= XFS_DIFLAG_IMMUTABLE;
if (vap->va_xflags & XFS_XFLAG_APPEND) if (vap->va_xflags & XFS_XFLAG_APPEND)
ip->i_d.di_flags |= XFS_DIFLAG_APPEND; di_flags |= XFS_DIFLAG_APPEND;
if (vap->va_xflags & XFS_XFLAG_SYNC) if (vap->va_xflags & XFS_XFLAG_SYNC)
ip->i_d.di_flags |= XFS_DIFLAG_SYNC; di_flags |= XFS_DIFLAG_SYNC;
if (vap->va_xflags & XFS_XFLAG_NOATIME) if (vap->va_xflags & XFS_XFLAG_NOATIME)
ip->i_d.di_flags |= XFS_DIFLAG_NOATIME; di_flags |= XFS_DIFLAG_NOATIME;
if (vap->va_xflags & XFS_XFLAG_NODUMP) if (vap->va_xflags & XFS_XFLAG_NODUMP)
ip->i_d.di_flags |= XFS_DIFLAG_NODUMP; di_flags |= XFS_DIFLAG_NODUMP;
if ((vap->va_xflags & XFS_XFLAG_RTINHERIT) && if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
(ip->i_d.di_mode & S_IFMT) == S_IFDIR) if (vap->va_xflags & XFS_XFLAG_REALTIME) {
ip->i_d.di_flags |= XFS_DIFLAG_RTINHERIT; ip->i_iocore.io_flags |= XFS_IOCORE_RT;
di_flags |= XFS_DIFLAG_REALTIME;
}
if (vap->va_xflags & XFS_XFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_RTINHERIT;
if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS)
di_flags |= XFS_DIFLAG_NOSYMLINKS;
} else {
if (!(vap->va_xflags & XFS_XFLAG_REALTIME))
ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
}
ip->i_d.di_flags = di_flags;
} }
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
timeflags |= XFS_ICHGTIME_CHG; timeflags |= XFS_ICHGTIME_CHG;
...@@ -1606,7 +1611,7 @@ xfs_inactive( ...@@ -1606,7 +1611,7 @@ xfs_inactive(
* If the inode is already free, then there can be nothing * If the inode is already free, then there can be nothing
* to clean up here. * to clean up here.
*/ */
if (ip->i_d.di_mode == 0) { if (ip->i_d.di_mode == 0 || VN_BAD(vp)) {
ASSERT(ip->i_df.if_real_bytes == 0); ASSERT(ip->i_df.if_real_bytes == 0);
ASSERT(ip->i_df.if_broot_bytes == 0); ASSERT(ip->i_df.if_broot_bytes == 0);
return VN_INACTIVE_CACHE; return VN_INACTIVE_CACHE;
...@@ -3389,6 +3394,14 @@ xfs_symlink( ...@@ -3389,6 +3394,14 @@ xfs_symlink(
xfs_ilock(dp, XFS_ILOCK_EXCL); xfs_ilock(dp, XFS_ILOCK_EXCL);
/*
* Check whether the directory allows new symlinks or not.
*/
if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
error = XFS_ERROR(EPERM);
goto error_return;
}
/* /*
* Reserve disk quota : blocks and inode. * Reserve disk quota : blocks and inode.
*/ */
...@@ -3795,11 +3808,17 @@ xfs_reclaim( ...@@ -3795,11 +3808,17 @@ xfs_reclaim(
vnode_t *vp; vnode_t *vp;
vp = BHV_TO_VNODE(bdp); vp = BHV_TO_VNODE(bdp);
ip = XFS_BHVTOI(bdp);
vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address); vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
ASSERT(!VN_MAPPED(vp)); ASSERT(!VN_MAPPED(vp));
ip = XFS_BHVTOI(bdp);
/* bad inode, get out here ASAP */
if (VN_BAD(vp)) {
xfs_ireclaim(ip);
return 0;
}
if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
if (ip->i_d.di_size > 0) { if (ip->i_d.di_size > 0) {
...@@ -3877,8 +3896,12 @@ xfs_finish_reclaim( ...@@ -3877,8 +3896,12 @@ xfs_finish_reclaim(
int sync_mode) int sync_mode)
{ {
xfs_ihash_t *ih = ip->i_hash; xfs_ihash_t *ih = ip->i_hash;
vnode_t *vp = XFS_ITOV_NULL(ip);
int error; int error;
if (vp && VN_BAD(vp))
return 0;
/* The hash lock here protects a thread in xfs_iget_core from /* The hash lock here protects a thread in xfs_iget_core from
* racing with us on linking the inode back with a vnode. * racing with us on linking the inode back with a vnode.
* Once we have the XFS_IRECLAIM flag set it will not touch * Once we have the XFS_IRECLAIM flag set it will not touch
...@@ -3886,8 +3909,7 @@ xfs_finish_reclaim( ...@@ -3886,8 +3909,7 @@ xfs_finish_reclaim(
*/ */
write_lock(&ih->ih_lock); write_lock(&ih->ih_lock);
if ((ip->i_flags & XFS_IRECLAIM) || if ((ip->i_flags & XFS_IRECLAIM) ||
(!(ip->i_flags & XFS_IRECLAIMABLE) && (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) {
(XFS_ITOV_NULL(ip) == NULL))) {
write_unlock(&ih->ih_lock); write_unlock(&ih->ih_lock);
if (locked) { if (locked) {
xfs_ifunlock(ip); xfs_ifunlock(ip);
...@@ -3954,15 +3976,13 @@ int ...@@ -3954,15 +3976,13 @@ int
xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock) xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock)
{ {
int purged; int purged;
struct list_head *curr, *next; xfs_inode_t *ip, *n;
xfs_inode_t *ip;
int done = 0; int done = 0;
while (!done) { while (!done) {
purged = 0; purged = 0;
XFS_MOUNT_ILOCK(mp); XFS_MOUNT_ILOCK(mp);
list_for_each_safe(curr, next, &mp->m_del_inodes) { list_for_each_entry_safe(ip, n, &mp->m_del_inodes, i_reclaim) {
ip = list_entry(curr, xfs_inode_t, i_reclaim);
if (noblock) { if (noblock) {
if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
continue; continue;
......
/* /*
* Copyright (c) 1995-2001 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 1995-2001,2004 Silicon Graphics, Inc. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License * under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation. * as published by the Free Software Foundation.
* *
* This program is distributed in the hope that it would be useful, but * This program is distributed in the hope that it will be useful,
* WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* *
* Further, this software is distributed without any warranty that it is * You should have received a copy of the GNU General Public License
* free of the rightful claim of any third person regarding infringement * along with this program; if not, write to the Free Software
* or the like. Any license provided herein, whether implied or * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* otherwise, applies only to this software file. Patent licenses, if * USA
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
* *
* You should have received a copy of the GNU Lesser General Public * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
* License along with this program; if not, write the Free Software * Mountain View, CA 94043, USA, or: http://www.sgi.com
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
* USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/ */
#ifndef _LINUX_DQBLK_XFS_H #ifndef _LINUX_DQBLK_XFS_H
#define _LINUX_DQBLK_XFS_H #define _LINUX_DQBLK_XFS_H
...@@ -40,12 +28,12 @@ ...@@ -40,12 +28,12 @@
*/ */
#define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */ #define XQM_CMD(x) (('X'<<8)+(x)) /* note: forms first QCMD argument */
#define Q_XQUOTAON XQM_CMD(0x1) /* enable accounting/enforcement */ #define Q_XQUOTAON XQM_CMD(1) /* enable accounting/enforcement */
#define Q_XQUOTAOFF XQM_CMD(0x2) /* disable accounting/enforcement */ #define Q_XQUOTAOFF XQM_CMD(2) /* disable accounting/enforcement */
#define Q_XGETQUOTA XQM_CMD(0x3) /* get disk limits and usage */ #define Q_XGETQUOTA XQM_CMD(3) /* get disk limits and usage */
#define Q_XSETQLIM XQM_CMD(0x4) /* set disk limits */ #define Q_XSETQLIM XQM_CMD(4) /* set disk limits */
#define Q_XGETQSTAT XQM_CMD(0x5) /* get quota subsystem status */ #define Q_XGETQSTAT XQM_CMD(5) /* get quota subsystem status */
#define Q_XQUOTARM XQM_CMD(0x6) /* free disk space used by dquots */ #define Q_XQUOTARM XQM_CMD(6) /* free disk space used by dquots */
/* /*
* fs_disk_quota structure: * fs_disk_quota structure:
...@@ -104,6 +92,19 @@ typedef struct fs_disk_quota { ...@@ -104,6 +92,19 @@ typedef struct fs_disk_quota {
#define FS_DQ_RTBTIMER (1<<8) #define FS_DQ_RTBTIMER (1<<8)
#define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER) #define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER)
/*
* Warning counts are set in both super user's dquot and others. For others,
* warnings are set/cleared by the administrators (or automatically by going
* below the soft limit). Superusers warning values set the warning limits
* for the rest. In case these values are zero, the DQ_{F,B}WARNLIMIT values
* defined below are used.
* These values also apply only to the d_fieldmask field for Q_XSETQLIM.
*/
#define FS_DQ_BWARNS (1<<9)
#define FS_DQ_IWARNS (1<<10)
#define FS_DQ_RTBWARNS (1<<11)
#define FS_DQ_WARNS_MASK (FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS)
/* /*
* Various flags related to quotactl(2). Only relevant to XFS filesystems. * Various flags related to quotactl(2). Only relevant to XFS filesystems.
*/ */
...@@ -111,9 +112,11 @@ typedef struct fs_disk_quota { ...@@ -111,9 +112,11 @@ typedef struct fs_disk_quota {
#define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ #define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */
#define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ #define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */
#define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ #define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */
#define XFS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */
#define XFS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */
#define XFS_USER_QUOTA (1<<0) /* user quota type */ #define XFS_USER_QUOTA (1<<0) /* user quota type */
#define XFS_PROJ_QUOTA (1<<1) /* (IRIX) project quota type */ #define XFS_PROJ_QUOTA (1<<1) /* project quota type */
#define XFS_GROUP_QUOTA (1<<2) /* group quota type */ #define XFS_GROUP_QUOTA (1<<2) /* group quota type */
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment