Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4a0a9d51
Commit
4a0a9d51
authored
Sep 29, 2004
by
Richard Russon
Browse files
Options
Browse Files
Download
Plain Diff
Merge
ssh://linux-ntfs@bkbits.net/ntfs-2.6
into flatcap.org:/home/flatcap/backup/bk/ntfs-2.6
parents
27df0cf8
2499715e
Changes
22
Show whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
404 additions
and
207 deletions
+404
-207
fs/bio.c
fs/bio.c
+68
-26
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_aops.c
+6
-3
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_file.c
+2
-0
fs/xfs/linux-2.6/xfs_globals.c
fs/xfs/linux-2.6/xfs_globals.c
+1
-0
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_iops.c
+12
-16
fs/xfs/linux-2.6/xfs_linux.h
fs/xfs/linux-2.6/xfs_linux.h
+2
-0
fs/xfs/linux-2.6/xfs_lrw.c
fs/xfs/linux-2.6/xfs_lrw.c
+9
-8
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_super.c
+133
-55
fs/xfs/linux-2.6/xfs_sysctl.c
fs/xfs/linux-2.6/xfs_sysctl.c
+5
-0
fs/xfs/linux-2.6/xfs_sysctl.h
fs/xfs/linux-2.6/xfs_sysctl.h
+2
-0
fs/xfs/linux-2.6/xfs_vfs.c
fs/xfs/linux-2.6/xfs_vfs.c
+2
-0
fs/xfs/linux-2.6/xfs_vfs.h
fs/xfs/linux-2.6/xfs_vfs.h
+14
-3
fs/xfs/linux-2.6/xfs_vnode.h
fs/xfs/linux-2.6/xfs_vnode.h
+13
-0
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr.c
+13
-19
fs/xfs/xfs_dinode.h
fs/xfs/xfs_dinode.h
+6
-1
fs/xfs/xfs_fs.h
fs/xfs/xfs_fs.h
+2
-0
fs/xfs/xfs_iget.c
fs/xfs/xfs_iget.c
+2
-5
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.c
+7
-0
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_rtalloc.c
+4
-2
fs/xfs/xfs_vfsops.c
fs/xfs/xfs_vfsops.c
+23
-14
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.c
+45
-25
include/linux/dqblk_xfs.h
include/linux/dqblk_xfs.h
+33
-30
No files found.
fs/bio.c
View file @
4a0a9d51
...
...
@@ -372,6 +372,38 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
len
,
offset
);
}
struct
bio_map_data
{
struct
bio_vec
*
iovecs
;
void
__user
*
userptr
;
};
static
void
bio_set_map_data
(
struct
bio_map_data
*
bmd
,
struct
bio
*
bio
)
{
memcpy
(
bmd
->
iovecs
,
bio
->
bi_io_vec
,
sizeof
(
struct
bio_vec
)
*
bio
->
bi_vcnt
);
bio
->
bi_private
=
bmd
;
}
static
void
bio_free_map_data
(
struct
bio_map_data
*
bmd
)
{
kfree
(
bmd
->
iovecs
);
kfree
(
bmd
);
}
static
struct
bio_map_data
*
bio_alloc_map_data
(
int
nr_segs
)
{
struct
bio_map_data
*
bmd
=
kmalloc
(
sizeof
(
*
bmd
),
GFP_KERNEL
);
if
(
!
bmd
)
return
NULL
;
bmd
->
iovecs
=
kmalloc
(
sizeof
(
struct
bio_vec
)
*
nr_segs
,
GFP_KERNEL
);
if
(
bmd
)
return
bmd
;
kfree
(
bmd
);
return
NULL
;
}
/**
* bio_uncopy_user - finish previously mapped bio
* @bio: bio being terminated
...
...
@@ -381,20 +413,22 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
*/
int
bio_uncopy_user
(
struct
bio
*
bio
)
{
struct
bio_map_data
*
bmd
=
bio
->
bi_private
;
const
int
read
=
bio_data_dir
(
bio
)
==
READ
;
struct
bio_vec
*
bvec
;
int
i
,
ret
=
0
;
char
*
uaddr
=
bio
->
bi_private
;
__bio_for_each_segment
(
bvec
,
bio
,
i
,
0
)
{
char
*
addr
=
page_address
(
bvec
->
bv_page
);
if
(
bio_data_dir
(
bio
)
==
READ
&&
!
ret
&&
copy_to_user
(
uaddr
,
addr
,
bvec
->
bv_len
))
unsigned
int
len
=
bmd
->
iovecs
[
i
].
bv_len
;
if
(
read
&&
!
ret
&&
copy_to_user
(
bmd
->
userptr
,
addr
,
len
))
ret
=
-
EFAULT
;
__free_page
(
bvec
->
bv_page
);
uaddr
+=
bvec
->
bv_
len
;
bmd
->
userptr
+=
len
;
}
bio_free_map_data
(
bmd
);
bio_put
(
bio
);
return
ret
;
}
...
...
@@ -415,14 +449,25 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
{
unsigned
long
end
=
(
uaddr
+
len
+
PAGE_SIZE
-
1
)
>>
PAGE_SHIFT
;
unsigned
long
start
=
uaddr
>>
PAGE_SHIFT
;
struct
bio_map_data
*
bmd
;
struct
bio_vec
*
bvec
;
struct
page
*
page
;
struct
bio
*
bio
;
int
i
,
ret
;
bmd
=
bio_alloc_map_data
(
end
-
start
);
if
(
!
bmd
)
return
ERR_PTR
(
-
ENOMEM
);
bmd
->
userptr
=
(
void
__user
*
)
uaddr
;
bio
=
bio_alloc
(
GFP_KERNEL
,
end
-
start
);
if
(
!
bio
)
if
(
!
bio
)
{
bio_free_map_data
(
bmd
);
return
ERR_PTR
(
-
ENOMEM
);
}
bio
->
bi_rw
|=
(
!
write_to_vm
<<
BIO_RW
);
ret
=
0
;
while
(
len
)
{
...
...
@@ -445,13 +490,15 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
len
-=
bytes
;
}
if
(
ret
)
goto
cleanup
;
/*
* success
*/
if
(
!
ret
)
{
if
(
!
write_to_vm
)
{
unsigned
long
p
=
uaddr
;
bio
->
bi_rw
|=
(
1
<<
BIO_RW
);
/*
* for a write, copy in data to kernel pages
*/
...
...
@@ -465,13 +512,8 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
}
}
bio
->
bi_private
=
(
void
*
)
uaddr
;
bio_set_map_data
(
bmd
,
bio
)
;
return
bio
;
}
/*
* cleanup
*/
cleanup:
bio_for_each_segment
(
bvec
,
bio
,
i
)
__free_page
(
bvec
->
bv_page
);
...
...
fs/xfs/linux-2.6/xfs_aops.c
View file @
4a0a9d51
...
...
@@ -550,6 +550,7 @@ xfs_map_unwritten(
STATIC
void
xfs_submit_page
(
struct
page
*
page
,
struct
writeback_control
*
wbc
,
struct
buffer_head
*
bh_arr
[],
int
cnt
)
{
...
...
@@ -573,8 +574,10 @@ xfs_submit_page(
for
(
i
=
0
;
i
<
cnt
;
i
++
)
submit_bh
(
WRITE
,
bh_arr
[
i
]);
}
else
}
else
{
end_page_writeback
(
page
);
wbc
->
pages_skipped
++
;
/* We didn't write this page */
}
}
/*
...
...
@@ -652,7 +655,7 @@ xfs_convert_page(
if
(
startio
)
{
wbc
->
nr_to_write
--
;
xfs_submit_page
(
page
,
bh_arr
,
index
);
xfs_submit_page
(
page
,
wbc
,
bh_arr
,
index
);
}
else
{
unlock_page
(
page
);
}
...
...
@@ -864,7 +867,7 @@ xfs_page_state_convert(
SetPageUptodate
(
page
);
if
(
startio
)
xfs_submit_page
(
page
,
bh_arr
,
cnt
);
xfs_submit_page
(
page
,
wbc
,
bh_arr
,
cnt
);
if
(
iomp
)
{
tlast
=
(
iomp
->
iomap_offset
+
iomp
->
iomap_bsize
-
1
)
>>
...
...
fs/xfs/linux-2.6/xfs_file.c
View file @
4a0a9d51
...
...
@@ -409,6 +409,8 @@ linvfs_file_mmap(
vma
->
vm_ops
=
&
linvfs_file_vm_ops
;
VOP_SETATTR
(
vp
,
&
va
,
XFS_AT_UPDATIME
,
NULL
,
error
);
if
(
!
error
)
vn_revalidate
(
vp
);
/* update Linux inode flags */
return
0
;
}
...
...
fs/xfs/linux-2.6/xfs_globals.c
View file @
4a0a9d51
...
...
@@ -63,6 +63,7 @@ xfs_param_t xfs_params = {
.
inherit_noatim
=
{
0
,
1
,
1
},
.
xfs_buf_timer
=
{
100
/
2
,
1
*
100
,
30
*
100
},
.
xfs_buf_age
=
{
1
*
100
,
15
*
100
,
7200
*
100
},
.
inherit_nosym
=
{
0
,
0
,
1
},
};
/*
...
...
fs/xfs/linux-2.6/xfs_iops.c
View file @
4a0a9d51
...
...
@@ -174,8 +174,9 @@ linvfs_mknod(
*/
teardown
.
d_inode
=
ip
=
LINVFS_GET_IP
(
vp
);
teardown
.
d_name
=
dentry
->
d_name
;
remove_inode_hash
(
ip
);
make_bad_inode
(
ip
);
vn_mark_bad
(
vp
);
if
(
S_ISDIR
(
mode
))
VOP_RMDIR
(
dvp
,
&
teardown
,
NULL
,
err2
);
else
...
...
@@ -225,26 +226,21 @@ linvfs_lookup(
struct
dentry
*
dentry
,
struct
nameidata
*
nd
)
{
struct
inode
*
ip
=
NULL
;
vnode_t
*
vp
,
*
cvp
=
NULL
;
struct
vnode
*
vp
=
LINVFS_GET_VP
(
dir
),
*
cvp
;
int
error
;
if
(
dentry
->
d_name
.
len
>=
MAXNAMELEN
)
return
ERR_PTR
(
-
ENAMETOOLONG
);
vp
=
LINVFS_GET_VP
(
dir
);
VOP_LOOKUP
(
vp
,
dentry
,
&
cvp
,
0
,
NULL
,
NULL
,
error
);
if
(
!
error
)
{
ASSERT
(
cvp
);
ip
=
LINVFS_GET_IP
(
cvp
);
if
(
!
ip
)
{
VN_RELE
(
cvp
);
return
ERR_PTR
(
-
EACCES
);
}
}
if
(
error
&&
(
error
!=
ENOENT
))
if
(
error
)
{
if
(
unlikely
(
error
!=
ENOENT
))
return
ERR_PTR
(
-
error
);
return
d_splice_alias
(
ip
,
dentry
);
d_add
(
dentry
,
NULL
);
return
NULL
;
}
return
d_splice_alias
(
LINVFS_GET_IP
(
cvp
),
dentry
);
}
STATIC
int
...
...
@@ -304,7 +300,7 @@ linvfs_symlink(
{
struct
inode
*
ip
;
vattr_t
va
;
vnode_t
*
dvp
;
/* directory containing name
to remove
*/
vnode_t
*
dvp
;
/* directory containing name
of symlink
*/
vnode_t
*
cvp
;
/* used to lookup symlink to put in dentry */
int
error
;
...
...
fs/xfs/linux-2.6/xfs_linux.h
View file @
4a0a9d51
...
...
@@ -85,6 +85,7 @@
#include <linux/vfs.h>
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/version.h>
...
...
@@ -140,6 +141,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_inherit_noatime xfs_params.inherit_noatim.val
#define xfs_buf_timer_centisecs xfs_params.xfs_buf_timer.val
#define xfs_buf_age_centisecs xfs_params.xfs_buf_age.val
#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
#define current_cpu() smp_processor_id()
#define current_pid() (current->pid)
...
...
fs/xfs/linux-2.6/xfs_lrw.c
View file @
4a0a9d51
...
...
@@ -296,11 +296,6 @@ xfs_read(
return
-
EIO
;
}
/* OK so we are holding the I/O lock for the duration
* of the submission, then what happens if the I/O
* does not really happen here, but is scheduled
* later?
*/
xfs_ilock
(
ip
,
XFS_IOLOCK_SHARED
);
if
(
DM_EVENT_ENABLED
(
vp
->
v_vfsp
,
ip
,
DM_EVENT_READ
)
&&
...
...
@@ -321,6 +316,7 @@ xfs_read(
ret
=
__generic_file_aio_read
(
iocb
,
iovp
,
segs
,
offset
);
if
(
ret
==
-
EIOCBQUEUED
)
ret
=
wait_on_sync_kiocb
(
iocb
);
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
if
(
ret
>
0
)
...
...
@@ -380,12 +376,17 @@ xfs_sendfile(
}
}
xfs_rw_enter_trace
(
XFS_SENDFILE_ENTER
,
&
ip
->
i_iocore
,
(
void
*
)(
unsigned
long
)
target
,
count
,
*
offset
,
ioflags
);
(
void
*
)(
unsigned
long
)
target
,
count
,
*
offset
,
ioflags
);
ret
=
generic_file_sendfile
(
filp
,
offset
,
count
,
actor
,
target
);
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
if
(
ret
>
0
)
XFS_STATS_ADD
(
xs_read_bytes
,
ret
);
if
(
likely
(
!
(
ioflags
&
IO_INVIS
)))
xfs_ichgtime
(
ip
,
XFS_ICHGTIME_ACC
);
return
ret
;
}
...
...
fs/xfs/linux-2.6/xfs_super.c
View file @
4a0a9d51
...
...
@@ -141,7 +141,7 @@ xfs_set_inodeops(
vnode_t
*
vp
=
LINVFS_GET_VP
(
inode
);
if
(
vp
->
v_type
==
VNON
)
{
make_bad_inode
(
inode
);
vn_mark_bad
(
vp
);
}
else
if
(
S_ISREG
(
inode
->
i_mode
))
{
inode
->
i_op
=
&
linvfs_file_inode_operations
;
inode
->
i_fop
=
&
linvfs_file_operations
;
...
...
@@ -223,42 +223,21 @@ xfs_initialize_vnode(
bhv_insert
(
VN_BHV_HEAD
(
vp
),
inode_bhv
);
}
vp
->
v_type
=
IFTOVT
(
ip
->
i_d
.
di_mode
);
/* Have we been called during the new inode create process,
* in which case we are too early to fill in the Linux inode.
/*
* We need to set the ops vectors, and unlock the inode, but if
* we have been called during the new inode create process, it is
* too early to fill in the Linux inode. We will get called a
* second time once the inode is properly set up, and then we can
* finish our work.
*/
if
(
vp
->
v_type
==
VNON
)
return
;
if
(
ip
->
i_d
.
di_mode
!=
0
&&
unlock
&&
(
inode
->
i_state
&
I_NEW
))
{
vp
->
v_type
=
IFTOVT
(
ip
->
i_d
.
di_mode
);
xfs_revalidate_inode
(
XFS_BHVTOM
(
bdp
),
vp
,
ip
);
/* For new inodes we need to set the ops vectors,
* and unlock the inode.
*/
if
(
unlock
&&
(
inode
->
i_state
&
I_NEW
))
{
xfs_set_inodeops
(
inode
);
unlock_new_inode
(
inode
);
}
}
void
xfs_flush_inode
(
xfs_inode_t
*
ip
)
{
struct
inode
*
inode
=
LINVFS_GET_IP
(
XFS_ITOV
(
ip
));
filemap_flush
(
inode
->
i_mapping
);
}
void
xfs_flush_device
(
xfs_inode_t
*
ip
)
{
sync_blockdev
(
XFS_ITOV
(
ip
)
->
v_vfsp
->
vfs_super
->
s_bdev
);
xfs_log_force
(
ip
->
i_mount
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
|
XFS_LOG_SYNC
);
}
int
xfs_blkdev_get
(
xfs_mount_t
*
mp
,
...
...
@@ -312,7 +291,6 @@ xfs_inode_shake(
{
int
pages
;
pages
=
kmem_zone_shrink
(
linvfs_inode_zone
);
pages
+=
kmem_zone_shrink
(
xfs_inode_zone
);
return
pages
;
...
...
@@ -337,7 +315,6 @@ init_inodecache( void )
linvfs_inode_zone
=
kmem_cache_create
(
"linvfs_icache"
,
sizeof
(
vnode_t
),
0
,
SLAB_RECLAIM_ACCOUNT
,
init_once
,
NULL
);
if
(
linvfs_inode_zone
==
NULL
)
return
-
ENOMEM
;
return
0
;
...
...
@@ -391,36 +368,146 @@ linvfs_clear_inode(
}
/*
* Enqueue a work item to be picked up by the vfs xfssyncd thread.
* Doing this has two advantages:
* - It saves on stack space, which is tight in certain situations
* - It can be used (with care) as a mechanism to avoid deadlocks.
* Flushing while allocating in a full filesystem requires both.
*/
STATIC
void
xfs_syncd_queue_work
(
struct
vfs
*
vfs
,
void
*
data
,
void
(
*
syncer
)(
vfs_t
*
,
void
*
))
{
vfs_sync_work_t
*
work
;
work
=
kmem_alloc
(
sizeof
(
struct
vfs_sync_work
),
KM_SLEEP
);
INIT_LIST_HEAD
(
&
work
->
w_list
);
work
->
w_syncer
=
syncer
;
work
->
w_data
=
data
;
work
->
w_vfs
=
vfs
;
spin_lock
(
&
vfs
->
vfs_sync_lock
);
list_add_tail
(
&
work
->
w_list
,
&
vfs
->
vfs_sync_list
);
spin_unlock
(
&
vfs
->
vfs_sync_lock
);
wake_up_process
(
vfs
->
vfs_sync_task
);
}
/*
* Flush delayed allocate data, attempting to free up reserved space
* from existing allocations. At this point a new allocation attempt
* has failed with ENOSPC and we are in the process of scratching our
* heads, looking about for more room...
*/
STATIC
void
xfs_flush_inode_work
(
vfs_t
*
vfs
,
void
*
inode
)
{
filemap_flush
(((
struct
inode
*
)
inode
)
->
i_mapping
);
iput
((
struct
inode
*
)
inode
);
}
void
xfs_flush_inode
(
xfs_inode_t
*
ip
)
{
struct
inode
*
inode
=
LINVFS_GET_IP
(
XFS_ITOV
(
ip
));
struct
vfs
*
vfs
=
XFS_MTOVFS
(
ip
->
i_mount
);
igrab
(
inode
);
xfs_syncd_queue_work
(
vfs
,
inode
,
xfs_flush_inode_work
);
delay
(
HZ
/
2
);
}
/*
* This is the "bigger hammer" version of xfs_flush_inode_work...
* (IOW, "If at first you don't succeed, use a Bigger Hammer").
*/
STATIC
void
xfs_flush_device_work
(
vfs_t
*
vfs
,
void
*
inode
)
{
sync_blockdev
(
vfs
->
vfs_super
->
s_bdev
);
iput
((
struct
inode
*
)
inode
);
}
void
xfs_flush_device
(
xfs_inode_t
*
ip
)
{
struct
inode
*
inode
=
LINVFS_GET_IP
(
XFS_ITOV
(
ip
));
struct
vfs
*
vfs
=
XFS_MTOVFS
(
ip
->
i_mount
);
igrab
(
inode
);
xfs_syncd_queue_work
(
vfs
,
inode
,
xfs_flush_device_work
);
delay
(
HZ
/
2
);
xfs_log_force
(
ip
->
i_mount
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
|
XFS_LOG_SYNC
);
}
#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
STATIC
void
vfs_sync_worker
(
vfs_t
*
vfsp
,
void
*
unused
)
{
int
error
;
if
(
!
(
vfsp
->
vfs_flag
&
VFS_RDONLY
))
VFS_SYNC
(
vfsp
,
SYNCD_FLAGS
,
NULL
,
error
);
vfsp
->
vfs_sync_seq
++
;
wmb
();
wake_up
(
&
vfsp
->
vfs_wait_single_sync_task
);
}
STATIC
int
xfssyncd
(
void
*
arg
)
{
long
timeleft
;
vfs_t
*
vfsp
=
(
vfs_t
*
)
arg
;
int
error
;
struct
list_head
tmp
;
struct
vfs_sync_work
*
work
,
*
n
;
daemonize
(
"xfssyncd"
);
vfsp
->
vfs_sync_work
.
w_vfs
=
vfsp
;
vfsp
->
vfs_sync_work
.
w_syncer
=
vfs_sync_worker
;
vfsp
->
vfs_sync_task
=
current
;
wmb
();
wake_up
(
&
vfsp
->
vfs_wait_sync_task
);
INIT_LIST_HEAD
(
&
tmp
);
timeleft
=
(
xfs_syncd_centisecs
*
HZ
)
/
100
;
for
(;;)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
schedule_timeout
((
xfs_syncd_centisecs
*
HZ
)
/
100
);
timeleft
=
schedule_timeout
(
timeleft
);
/* swsusp */
if
(
current
->
flags
&
PF_FREEZE
)
refrigerator
(
PF_FREEZE
);
if
(
vfsp
->
vfs_flag
&
VFS_UMOUNT
)
break
;
if
(
vfsp
->
vfs_flag
&
VFS_RDONLY
)
continue
;
VFS_SYNC
(
vfsp
,
SYNCD_FLAGS
,
NULL
,
error
);
vfsp
->
vfs_sync_seq
++
;
wmb
();
wake_up
(
&
vfsp
->
vfs_wait_single_sync_task
);
spin_lock
(
&
vfsp
->
vfs_sync_lock
);
if
(
!
timeleft
)
{
timeleft
=
(
xfs_syncd_centisecs
*
HZ
)
/
100
;
INIT_LIST_HEAD
(
&
vfsp
->
vfs_sync_work
.
w_list
);
list_add_tail
(
&
vfsp
->
vfs_sync_work
.
w_list
,
&
vfsp
->
vfs_sync_list
);
}
list_for_each_entry_safe
(
work
,
n
,
&
vfsp
->
vfs_sync_list
,
w_list
)
list_move
(
&
work
->
w_list
,
&
tmp
);
spin_unlock
(
&
vfsp
->
vfs_sync_lock
);
list_for_each_entry_safe
(
work
,
n
,
&
tmp
,
w_list
)
{
(
*
work
->
w_syncer
)(
vfsp
,
work
->
w_data
);
list_del
(
&
work
->
w_list
);
if
(
work
==
&
vfsp
->
vfs_sync_work
)
continue
;
kmem_free
(
work
,
sizeof
(
struct
vfs_sync_work
));
}
}
vfsp
->
vfs_sync_task
=
NULL
;
...
...
@@ -570,7 +657,6 @@ linvfs_get_parent(
int
error
;
vnode_t
*
vp
,
*
cvp
;
struct
dentry
*
parent
;
struct
inode
*
ip
=
NULL
;
struct
dentry
dotdot
;
dotdot
.
d_name
.
name
=
".."
;
...
...
@@ -580,21 +666,13 @@ linvfs_get_parent(
cvp
=
NULL
;
vp
=
LINVFS_GET_VP
(
child
->
d_inode
);
VOP_LOOKUP
(
vp
,
&
dotdot
,
&
cvp
,
0
,
NULL
,
NULL
,
error
);
if
(
!
error
)
{
ASSERT
(
cvp
);
ip
=
LINVFS_GET_IP
(
cvp
);
if
(
!
ip
)
{
VN_RELE
(
cvp
);
return
ERR_PTR
(
-
EACCES
);
}
}
if
(
error
)
if
(
unlikely
(
error
))
return
ERR_PTR
(
-
error
);
parent
=
d_alloc_anon
(
ip
);
if
(
!
parent
)
{
parent
=
d_alloc_anon
(
LINVFS_GET_IP
(
cvp
));
if
(
unlikely
(
!
parent
))
{
VN_RELE
(
cvp
);
parent
=
ERR_PTR
(
-
ENOMEM
);
return
ERR_PTR
(
-
ENOMEM
);
}
return
parent
;
}
...
...
fs/xfs/linux-2.6/xfs_sysctl.c
View file @
4a0a9d51
...
...
@@ -129,6 +129,11 @@ STATIC ctl_table xfs_table[] = {
&
sysctl_intvec
,
NULL
,
&
xfs_params
.
xfs_buf_age
.
min
,
&
xfs_params
.
xfs_buf_age
.
max
},
{
XFS_INHERIT_NOSYM
,
"inherit_nosymlinks"
,
&
xfs_params
.
inherit_nosym
.
val
,
sizeof
(
int
),
0644
,
NULL
,
&
proc_dointvec_minmax
,
&
sysctl_intvec
,
NULL
,
&
xfs_params
.
inherit_nosym
.
min
,
&
xfs_params
.
inherit_nosym
.
max
},
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
{
XFS_STATS_CLEAR
,
"stats_clear"
,
&
xfs_params
.
stats_clear
.
val
,
...
...
fs/xfs/linux-2.6/xfs_sysctl.h
View file @
4a0a9d51
...
...
@@ -59,6 +59,7 @@ typedef struct xfs_param {
xfs_sysctl_val_t
inherit_noatim
;
/* Inherit the "noatime" inode flag. */
xfs_sysctl_val_t
xfs_buf_timer
;
/* Interval between xfsbufd wakeups. */
xfs_sysctl_val_t
xfs_buf_age
;
/* Metadata buffer age before flush. */
xfs_sysctl_val_t
inherit_nosym
;
/* Inherit the "nosymlinks" flag. */
}
xfs_param_t
;
/*
...
...
@@ -95,6 +96,7 @@ enum {
XFS_BUF_TIMER
=
16
,
XFS_BUF_AGE
=
17
,
/* XFS_IO_BYPASS = 18 */
XFS_INHERIT_NOSYM
=
19
,
};
extern
xfs_param_t
xfs_params
;
...
...
fs/xfs/linux-2.6/xfs_vfs.c
View file @
4a0a9d51
...
...
@@ -249,6 +249,8 @@ vfs_allocate( void )
vfsp
=
kmem_zalloc
(
sizeof
(
vfs_t
),
KM_SLEEP
);
bhv_head_init
(
VFS_BHVHEAD
(
vfsp
),
"vfs"
);
INIT_LIST_HEAD
(
&
vfsp
->
vfs_sync_list
);
vfsp
->
vfs_sync_lock
=
SPIN_LOCK_UNLOCKED
;
init_waitqueue_head
(
&
vfsp
->
vfs_wait_sync_task
);
init_waitqueue_head
(
&
vfsp
->
vfs_wait_single_sync_task
);
return
vfsp
;
...
...
fs/xfs/linux-2.6/xfs_vfs.h
View file @
4a0a9d51
...
...
@@ -36,6 +36,7 @@
#include "xfs_fs.h"
struct
fid
;
struct
vfs
;
struct
cred
;
struct
vnode
;
struct
kstatfs
;
...
...
@@ -45,14 +46,24 @@ struct xfs_mount_args;
typedef
struct
kstatfs
xfs_statfs_t
;
typedef
struct
vfs_sync_work
{
struct
list_head
w_list
;
struct
vfs
*
w_vfs
;
void
*
w_data
;
/* syncer routine argument */
void
(
*
w_syncer
)(
struct
vfs
*
,
void
*
);
}
vfs_sync_work_t
;
typedef
struct
vfs
{
u_int
vfs_flag
;
/* flags */
xfs_fsid_t
vfs_fsid
;
/* file system ID */
xfs_fsid_t
*
vfs_altfsid
;
/* An ID fixed for life of FS */
bhv_head_t
vfs_bh
;
/* head of vfs behavior chain */
struct
super_block
*
vfs_super
;
/* Linux superblock structure */
struct
task_struct
*
vfs_sync_task
;
/* xfssyncd process */
int
vfs_sync_seq
;
/* xfssyncd generation number */
struct
super_block
*
vfs_super
;
/* generic superblock pointer */
struct
task_struct
*
vfs_sync_task
;
/* generalised sync thread */
vfs_sync_work_t
vfs_sync_work
;
/* work item for VFS_SYNC */
struct
list_head
vfs_sync_list
;
/* sync thread work item list */
spinlock_t
vfs_sync_lock
;
/* work item list lock */
int
vfs_sync_seq
;
/* sync thread generation no. */
wait_queue_head_t
vfs_wait_single_sync_task
;
wait_queue_head_t
vfs_wait_sync_task
;
}
vfs_t
;
...
...
fs/xfs/linux-2.6/xfs_vnode.h
View file @
4a0a9d51
...
...
@@ -594,6 +594,19 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
#define VN_ATIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_atime = *(tvp))
#define VN_CTIMESET(vp, tvp) (LINVFS_GET_IP(vp)->i_ctime = *(tvp))
/*
* Dealing with bad inodes
*/
static
inline
void
vn_mark_bad
(
struct
vnode
*
vp
)
{
make_bad_inode
(
LINVFS_GET_IP
(
vp
));
}
static
inline
int
VN_BAD
(
struct
vnode
*
vp
)
{
return
is_bad_inode
(
LINVFS_GET_IP
(
vp
));
}
/*
* Some useful predicates.
*/
...
...
fs/xfs/xfs_attr.c
View file @
4a0a9d51
...
...
@@ -121,23 +121,15 @@ xfs_attr_fetch(xfs_inode_t *ip, char *name, int namelen,
xfs_da_args_t
args
;
int
error
;
if
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
))
return
(
EIO
);
if
((
XFS_IFORK_Q
(
ip
)
==
0
)
||
(
ip
->
i_d
.
di_aformat
==
XFS_DINODE_FMT_EXTENTS
&&
ip
->
i_d
.
di_anextents
==
0
))
return
(
ENOATTR
);
if
(
!
(
flags
&
ATTR_KERNACCESS
))
{
xfs_ilock
(
ip
,
XFS_ILOCK_SHARED
);
if
(
!
(
flags
&
ATTR_SECURE
)
&&
((
error
=
xfs_iaccess
(
ip
,
S_IRUSR
,
cred
))))
{
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
if
(
!
(
flags
&
(
ATTR_KERNACCESS
|
ATTR_SECURE
)))
{
if
((
error
=
xfs_iaccess
(
ip
,
S_IRUSR
,
cred
)))
return
(
XFS_ERROR
(
error
));
}
}
/*
* Fill in the arg structure for this request.
...
...
@@ -167,9 +159,6 @@ xfs_attr_fetch(xfs_inode_t *ip, char *name, int namelen,
error
=
xfs_attr_node_get
(
&
args
);
}
if
(
!
(
flags
&
ATTR_KERNACCESS
))
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
/*
* Return the number of bytes in the value to the caller.
*/
...
...
@@ -185,7 +174,7 @@ xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp,
int
flags
,
struct
cred
*
cred
)
{
xfs_inode_t
*
ip
=
XFS_BHVTOI
(
bdp
);
int
namelen
;
int
error
,
namelen
;
XFS_STATS_INC
(
xs_attr_get
);
...
...
@@ -195,7 +184,13 @@ xfs_attr_get(bhv_desc_t *bdp, char *name, char *value, int *valuelenp,
if
(
namelen
>=
MAXNAMELEN
)
return
(
EFAULT
);
/* match IRIX behaviour */
return
xfs_attr_fetch
(
ip
,
name
,
namelen
,
value
,
valuelenp
,
flags
,
cred
);
if
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
))
return
(
EIO
);
xfs_ilock
(
ip
,
XFS_ILOCK_SHARED
);
error
=
xfs_attr_fetch
(
ip
,
name
,
namelen
,
value
,
valuelenp
,
flags
,
cred
);
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
return
(
error
);
}
/*ARGSUSED*/
...
...
@@ -718,16 +713,15 @@ xfs_attr_inactive(xfs_inode_t *dp)
mp
=
dp
->
i_mount
;
ASSERT
(
!
XFS_NOT_DQATTACHED
(
mp
,
dp
));
/* XXXsup - why on earth are we taking ILOCK_EXCL here??? */
xfs_ilock
(
dp
,
XFS_ILOCK_EXCL
);
xfs_ilock
(
dp
,
XFS_ILOCK_SHARED
);
if
((
XFS_IFORK_Q
(
dp
)
==
0
)
||
(
dp
->
i_d
.
di_aformat
==
XFS_DINODE_FMT_LOCAL
)
||
(
dp
->
i_d
.
di_aformat
==
XFS_DINODE_FMT_EXTENTS
&&
dp
->
i_d
.
di_anextents
==
0
))
{
xfs_iunlock
(
dp
,
XFS_ILOCK_
EXCL
);
xfs_iunlock
(
dp
,
XFS_ILOCK_
SHARED
);
return
(
0
);
}
xfs_iunlock
(
dp
,
XFS_ILOCK_
EXCL
);
xfs_iunlock
(
dp
,
XFS_ILOCK_
SHARED
);
/*
* Start our first transaction of the day.
...
...
fs/xfs/xfs_dinode.h
View file @
4a0a9d51
...
...
@@ -457,6 +457,8 @@ xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp);
#define XFS_DIFLAG_NOATIME_BIT 6
/* do not update atime */
#define XFS_DIFLAG_NODUMP_BIT 7
/* do not dump */
#define XFS_DIFLAG_RTINHERIT_BIT 8
/* create with realtime bit set */
#define XFS_DIFLAG_PROJINHERIT_BIT 9
/* create with parents projid */
#define XFS_DIFLAG_NOSYMLINKS_BIT 10
/* disallow symlink creation */
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
...
...
@@ -466,10 +468,13 @@ xfs_dinode_t *xfs_buf_to_dinode(struct xfs_buf *bp);
#define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT)
#define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT)
#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
#define XFS_DIFLAG_ANY \
(XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT)
XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS)
#endif
/* __XFS_DINODE_H__ */
fs/xfs/xfs_fs.h
View file @
4a0a9d51
...
...
@@ -77,6 +77,8 @@ struct fsxattr {
#define XFS_XFLAG_NOATIME 0x00000040
/* do not update access time */
#define XFS_XFLAG_NODUMP 0x00000080
/* do not include in backups */
#define XFS_XFLAG_RTINHERIT 0x00000100
/* create with rt bit set */
#define XFS_XFLAG_PROJINHERIT 0x00000200
/* create with parents projid */
#define XFS_XFLAG_NOSYMLINKS 0x00000400
/* disallow symlink creation */
#define XFS_XFLAG_HASATTR 0x80000000
/* no DIFLAG for this */
/*
...
...
fs/xfs/xfs_iget.c
View file @
4a0a9d51
...
...
@@ -457,7 +457,7 @@ xfs_iget(
error
=
xfs_iget_core
(
vp
,
mp
,
tp
,
ino
,
lock_flags
,
ipp
,
bno
);
if
(
error
)
{
make_bad_inode
(
inode
);
vn_mark_bad
(
vp
);
if
(
inode
->
i_state
&
I_NEW
)
unlock_new_inode
(
inode
);
iput
(
inode
);
...
...
@@ -576,11 +576,8 @@ xfs_iput_new(xfs_inode_t *ip,
vn_trace_entry
(
vp
,
"xfs_iput_new"
,
(
inst_t
*
)
__return_address
);
/* We shouldn't get here without this being true, but just in case */
if
(
inode
->
i_state
&
I_NEW
)
{
make_bad_inode
(
inode
);
if
(
inode
->
i_state
&
I_NEW
)
unlock_new_inode
(
inode
);
}
if
(
lock_flags
)
xfs_iunlock
(
ip
,
lock_flags
);
VN_RELE
(
vp
);
...
...
fs/xfs/xfs_inode.c
View file @
4a0a9d51
...
...
@@ -881,6 +881,10 @@ xfs_dic2xflags(
flags
|=
XFS_XFLAG_NODUMP
;
if
(
di_flags
&
XFS_DIFLAG_RTINHERIT
)
flags
|=
XFS_XFLAG_RTINHERIT
;
if
(
di_flags
&
XFS_DIFLAG_PROJINHERIT
)
flags
|=
XFS_XFLAG_PROJINHERIT
;
if
(
di_flags
&
XFS_DIFLAG_NOSYMLINKS
)
flags
|=
XFS_XFLAG_NOSYMLINKS
;
}
return
flags
;
}
...
...
@@ -1257,6 +1261,9 @@ xfs_ialloc(
if
((
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_SYNC
)
&&
xfs_inherit_sync
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_SYNC
;
if
((
pip
->
i_d
.
di_flags
&
XFS_DIFLAG_NOSYMLINKS
)
&&
xfs_inherit_nosymlinks
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_NOSYMLINKS
;
}
/* FALLTHROUGH */
case
S_IFLNK
:
...
...
fs/xfs/xfs_rtalloc.c
View file @
4a0a9d51
...
...
@@ -1966,7 +1966,8 @@ xfs_growfs_rt(
/*
* Calculate new parameters. These are the final values to be reached.
*/
nrextents
=
do_div
(
nrblocks
,
in
->
extsize
);
nrextents
=
nrblocks
;
do_div
(
nrextents
,
in
->
extsize
);
nrbmblocks
=
roundup_64
(
nrextents
,
NBBY
*
sbp
->
sb_blocksize
);
nrextslog
=
xfs_highbit32
(
nrextents
);
nrsumlevels
=
nrextslog
+
1
;
...
...
@@ -2021,7 +2022,8 @@ xfs_growfs_rt(
XFS_RTMIN
(
nrblocks
,
nsbp
->
sb_rbmblocks
*
NBBY
*
nsbp
->
sb_blocksize
*
nsbp
->
sb_rextsize
);
nsbp
->
sb_rextents
=
do_div
(
nsbp
->
sb_rblocks
,
nsbp
->
sb_rextsize
);
nsbp
->
sb_rextents
=
nsbp
->
sb_rblocks
;
do_div
(
nsbp
->
sb_rextents
,
nsbp
->
sb_rextsize
);
nsbp
->
sb_rextslog
=
xfs_highbit32
(
nsbp
->
sb_rextents
);
nrsumlevels
=
nmp
->
m_rsumlevels
=
nsbp
->
sb_rextslog
+
1
;
nrsumsize
=
...
...
fs/xfs/xfs_vfsops.c
View file @
4a0a9d51
...
...
@@ -1055,6 +1055,11 @@ xfs_sync_inodes(
continue
;
}
if
(
VN_BAD
(
vp
))
{
ip
=
ip
->
i_mnext
;
continue
;
}
if
(
XFS_FORCED_SHUTDOWN
(
mp
)
&&
!
(
flags
&
SYNC_CLOSE
))
{
XFS_MOUNT_IUNLOCK
(
mp
);
kmem_free
(
ipointer
,
sizeof
(
xfs_iptr_t
));
...
...
@@ -1582,31 +1587,35 @@ xfs_vget(
vnode_t
**
vpp
,
fid_t
*
fidp
)
{
xfs_fid_t
*
xfid
;
xfs_mount_t
*
mp
=
XFS_BHVTOM
(
bdp
);
xfs_fid_t
*
xfid
=
(
struct
xfs_fid
*
)
fidp
;
xfs_inode_t
*
ip
;
int
error
;
xfs_ino_t
ino
;
unsigned
int
igen
;
xfs_mount_t
*
mp
;
xfid
=
(
struct
xfs_fid
*
)
fidp
;
if
(
xfid
->
xfs_fid_len
==
sizeof
(
*
xfid
)
-
sizeof
(
xfid
->
xfs_fid_len
))
{
/*
* Invalid. Since handles can be created in user space and passed in
* via gethandle(), this is not cause for a panic.
*/
if
(
xfid
->
xfs_fid_len
!=
sizeof
(
*
xfid
)
-
sizeof
(
xfid
->
xfs_fid_len
))
return
XFS_ERROR
(
EINVAL
);
ino
=
xfid
->
xfs_fid_ino
;
igen
=
xfid
->
xfs_fid_gen
;
}
else
{
/*
* Invalid. Since handles can be created in user space
* and passed in via gethandle(), this is not cause for
* a panic.
* NFS can sometimes send requests for ino 0. Fail them gracefully.
*/
return
XFS_ERROR
(
EINVAL
);
}
mp
=
XFS_BHVTOM
(
bdp
);
if
(
ino
==
0
)
return
XFS_ERROR
(
ESTALE
);
error
=
xfs_iget
(
mp
,
NULL
,
ino
,
XFS_ILOCK_SHARED
,
&
ip
,
0
);
if
(
error
)
{
*
vpp
=
NULL
;
return
error
;
}
if
(
ip
==
NULL
)
{
*
vpp
=
NULL
;
return
XFS_ERROR
(
EIO
);
...
...
fs/xfs/xfs_vnodeops.c
View file @
4a0a9d51
...
...
@@ -826,29 +826,34 @@ xfs_setattr(
mp
->
m_sb
.
sb_blocklog
;
}
if
(
mask
&
XFS_AT_XFLAGS
)
{
uint
di_flags
;
/* can't set PREALLOC this way, just preserve it */
ip
->
i_d
.
di_flags
=
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PREALLOC
);
if
(
vap
->
va_xflags
&
XFS_XFLAG_REALTIME
&&
(
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
)
{
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_REALTIME
;
ip
->
i_iocore
.
io_flags
|=
XFS_IOCORE_RT
;
}
else
{
ip
->
i_iocore
.
io_flags
&=
~
XFS_IOCORE_RT
;
}
di_flags
=
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PREALLOC
);
if
(
vap
->
va_xflags
&
XFS_XFLAG_IMMUTABLE
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_IMMUTABLE
;
di_flags
|=
XFS_DIFLAG_IMMUTABLE
;
if
(
vap
->
va_xflags
&
XFS_XFLAG_APPEND
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_APPEND
;
di_flags
|=
XFS_DIFLAG_APPEND
;
if
(
vap
->
va_xflags
&
XFS_XFLAG_SYNC
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_SYNC
;
di_flags
|=
XFS_DIFLAG_SYNC
;
if
(
vap
->
va_xflags
&
XFS_XFLAG_NOATIME
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_NOATIME
;
di_flags
|=
XFS_DIFLAG_NOATIME
;
if
(
vap
->
va_xflags
&
XFS_XFLAG_NODUMP
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_NODUMP
;
if
((
vap
->
va_xflags
&
XFS_XFLAG_RTINHERIT
)
&&
(
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFDIR
)
ip
->
i_d
.
di_flags
|=
XFS_DIFLAG_RTINHERIT
;
di_flags
|=
XFS_DIFLAG_NODUMP
;
if
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFDIR
)
{
if
(
vap
->
va_xflags
&
XFS_XFLAG_REALTIME
)
{
ip
->
i_iocore
.
io_flags
|=
XFS_IOCORE_RT
;
di_flags
|=
XFS_DIFLAG_REALTIME
;
}
if
(
vap
->
va_xflags
&
XFS_XFLAG_RTINHERIT
)
di_flags
|=
XFS_DIFLAG_RTINHERIT
;
if
(
vap
->
va_xflags
&
XFS_XFLAG_NOSYMLINKS
)
di_flags
|=
XFS_DIFLAG_NOSYMLINKS
;
}
else
{
if
(
!
(
vap
->
va_xflags
&
XFS_XFLAG_REALTIME
))
ip
->
i_iocore
.
io_flags
&=
~
XFS_IOCORE_RT
;
}
ip
->
i_d
.
di_flags
=
di_flags
;
}
xfs_trans_log_inode
(
tp
,
ip
,
XFS_ILOG_CORE
);
timeflags
|=
XFS_ICHGTIME_CHG
;
...
...
@@ -1606,7 +1611,7 @@ xfs_inactive(
* If the inode is already free, then there can be nothing
* to clean up here.
*/
if
(
ip
->
i_d
.
di_mode
==
0
)
{
if
(
ip
->
i_d
.
di_mode
==
0
||
VN_BAD
(
vp
)
)
{
ASSERT
(
ip
->
i_df
.
if_real_bytes
==
0
);
ASSERT
(
ip
->
i_df
.
if_broot_bytes
==
0
);
return
VN_INACTIVE_CACHE
;
...
...
@@ -3389,6 +3394,14 @@ xfs_symlink(
xfs_ilock
(
dp
,
XFS_ILOCK_EXCL
);
/*
* Check whether the directory allows new symlinks or not.
*/
if
(
dp
->
i_d
.
di_flags
&
XFS_DIFLAG_NOSYMLINKS
)
{
error
=
XFS_ERROR
(
EPERM
);
goto
error_return
;
}
/*
* Reserve disk quota : blocks and inode.
*/
...
...
@@ -3795,11 +3808,17 @@ xfs_reclaim(
vnode_t
*
vp
;
vp
=
BHV_TO_VNODE
(
bdp
);
ip
=
XFS_BHVTOI
(
bdp
);
vn_trace_entry
(
vp
,
__FUNCTION__
,
(
inst_t
*
)
__return_address
);
ASSERT
(
!
VN_MAPPED
(
vp
));
ip
=
XFS_BHVTOI
(
bdp
);
/* bad inode, get out here ASAP */
if
(
VN_BAD
(
vp
))
{
xfs_ireclaim
(
ip
);
return
0
;
}
if
((
ip
->
i_d
.
di_mode
&
S_IFMT
)
==
S_IFREG
)
{
if
(
ip
->
i_d
.
di_size
>
0
)
{
...
...
@@ -3877,8 +3896,12 @@ xfs_finish_reclaim(
int
sync_mode
)
{
xfs_ihash_t
*
ih
=
ip
->
i_hash
;
vnode_t
*
vp
=
XFS_ITOV_NULL
(
ip
);
int
error
;
if
(
vp
&&
VN_BAD
(
vp
))
return
0
;
/* The hash lock here protects a thread in xfs_iget_core from
* racing with us on linking the inode back with a vnode.
* Once we have the XFS_IRECLAIM flag set it will not touch
...
...
@@ -3886,8 +3909,7 @@ xfs_finish_reclaim(
*/
write_lock
(
&
ih
->
ih_lock
);
if
((
ip
->
i_flags
&
XFS_IRECLAIM
)
||
(
!
(
ip
->
i_flags
&
XFS_IRECLAIMABLE
)
&&
(
XFS_ITOV_NULL
(
ip
)
==
NULL
)))
{
(
!
(
ip
->
i_flags
&
XFS_IRECLAIMABLE
)
&&
vp
==
NULL
))
{
write_unlock
(
&
ih
->
ih_lock
);
if
(
locked
)
{
xfs_ifunlock
(
ip
);
...
...
@@ -3954,15 +3976,13 @@ int
xfs_finish_reclaim_all
(
xfs_mount_t
*
mp
,
int
noblock
)
{
int
purged
;
struct
list_head
*
curr
,
*
next
;
xfs_inode_t
*
ip
;
xfs_inode_t
*
ip
,
*
n
;
int
done
=
0
;
while
(
!
done
)
{
purged
=
0
;
XFS_MOUNT_ILOCK
(
mp
);
list_for_each_safe
(
curr
,
next
,
&
mp
->
m_del_inodes
)
{
ip
=
list_entry
(
curr
,
xfs_inode_t
,
i_reclaim
);
list_for_each_entry_safe
(
ip
,
n
,
&
mp
->
m_del_inodes
,
i_reclaim
)
{
if
(
noblock
)
{
if
(
xfs_ilock_nowait
(
ip
,
XFS_ILOCK_EXCL
)
==
0
)
continue
;
...
...
include/linux/dqblk_xfs.h
View file @
4a0a9d51
/*
* Copyright (c) 1995-2001 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 1995-2001
,2004
Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2.1 of the GNU Lesser General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307,
* USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
* Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane,
* Mountain View, CA 94043, USA, or: http://www.sgi.com
*/
#ifndef _LINUX_DQBLK_XFS_H
#define _LINUX_DQBLK_XFS_H
...
...
@@ -40,12 +28,12 @@
*/
#define XQM_CMD(x) (('X'<<8)+(x))
/* note: forms first QCMD argument */
#define Q_XQUOTAON XQM_CMD(
0x
1)
/* enable accounting/enforcement */
#define Q_XQUOTAOFF XQM_CMD(
0x
2)
/* disable accounting/enforcement */
#define Q_XGETQUOTA XQM_CMD(
0x
3)
/* get disk limits and usage */
#define Q_XSETQLIM XQM_CMD(
0x
4)
/* set disk limits */
#define Q_XGETQSTAT XQM_CMD(
0x
5)
/* get quota subsystem status */
#define Q_XQUOTARM XQM_CMD(
0x
6)
/* free disk space used by dquots */
#define Q_XQUOTAON XQM_CMD(1)
/* enable accounting/enforcement */
#define Q_XQUOTAOFF XQM_CMD(2)
/* disable accounting/enforcement */
#define Q_XGETQUOTA XQM_CMD(3)
/* get disk limits and usage */
#define Q_XSETQLIM XQM_CMD(4)
/* set disk limits */
#define Q_XGETQSTAT XQM_CMD(5)
/* get quota subsystem status */
#define Q_XQUOTARM XQM_CMD(6)
/* free disk space used by dquots */
/*
* fs_disk_quota structure:
...
...
@@ -104,6 +92,19 @@ typedef struct fs_disk_quota {
#define FS_DQ_RTBTIMER (1<<8)
#define FS_DQ_TIMER_MASK (FS_DQ_BTIMER | FS_DQ_ITIMER | FS_DQ_RTBTIMER)
/*
* Warning counts are set in both super user's dquot and others. For others,
* warnings are set/cleared by the administrators (or automatically by going
* below the soft limit). Superusers warning values set the warning limits
* for the rest. In case these values are zero, the DQ_{F,B}WARNLIMIT values
* defined below are used.
* These values also apply only to the d_fieldmask field for Q_XSETQLIM.
*/
#define FS_DQ_BWARNS (1<<9)
#define FS_DQ_IWARNS (1<<10)
#define FS_DQ_RTBWARNS (1<<11)
#define FS_DQ_WARNS_MASK (FS_DQ_BWARNS | FS_DQ_IWARNS | FS_DQ_RTBWARNS)
/*
* Various flags related to quotactl(2). Only relevant to XFS filesystems.
*/
...
...
@@ -111,9 +112,11 @@ typedef struct fs_disk_quota {
#define XFS_QUOTA_UDQ_ENFD (1<<1)
/* user quota limits enforcement */
#define XFS_QUOTA_GDQ_ACCT (1<<2)
/* group quota accounting */
#define XFS_QUOTA_GDQ_ENFD (1<<3)
/* group quota limits enforcement */
#define XFS_QUOTA_PDQ_ACCT (1<<4)
/* project quota accounting */
#define XFS_QUOTA_PDQ_ENFD (1<<5)
/* project quota limits enforcement */
#define XFS_USER_QUOTA (1<<0)
/* user quota type */
#define XFS_PROJ_QUOTA (1<<1)
/*
(IRIX)
project quota type */
#define XFS_PROJ_QUOTA (1<<1)
/* project quota type */
#define XFS_GROUP_QUOTA (1<<2)
/* group quota type */
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment