Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
aa641e8a
Commit
aa641e8a
authored
Oct 02, 2003
by
Stephen Lord
Browse files
Options
Browse Files
Download
Plain Diff
Merge
ssh://lord@kernel.bkbits.net/xfs-2.6
into penguin.americas.sgi.com:/src/lord/bitkeeper/xfs-2.6
parents
7d7bb19c
c910b5d0
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
144 additions
and
113 deletions
+144
-113
fs/xfs/linux/xfs_aops.c
fs/xfs/linux/xfs_aops.c
+4
-1
fs/xfs/linux/xfs_lrw.c
fs/xfs/linux/xfs_lrw.c
+3
-3
fs/xfs/linux/xfs_stats.c
fs/xfs/linux/xfs_stats.c
+22
-7
fs/xfs/linux/xfs_stats.h
fs/xfs/linux/xfs_stats.h
+8
-4
fs/xfs/linux/xfs_sysctl.c
fs/xfs/linux/xfs_sysctl.c
+11
-5
fs/xfs/linux/xfs_vnode.c
fs/xfs/linux/xfs_vnode.c
+1
-1
fs/xfs/pagebuf/page_buf.c
fs/xfs/pagebuf/page_buf.c
+45
-47
fs/xfs/pagebuf/page_buf.h
fs/xfs/pagebuf/page_buf.h
+0
-1
fs/xfs/pagebuf/page_buf_internal.h
fs/xfs/pagebuf/page_buf_internal.h
+5
-2
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.c
+1
-1
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_recover.c
+1
-1
fs/xfs/xfs_mount.h
fs/xfs/xfs_mount.h
+3
-3
fs/xfs/xfs_types.h
fs/xfs/xfs_types.h
+1
-1
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.c
+39
-36
No files found.
fs/xfs/linux/xfs_aops.c
View file @
aa641e8a
...
...
@@ -461,7 +461,8 @@ map_unwritten(
struct
page
*
page
;
tlast
=
i_size_read
(
inode
)
>>
PAGE_CACHE_SHIFT
;
tloff
=
min
(
tlast
,
start_page
->
index
+
pb
->
pb_page_count
-
1
);
tloff
=
(
mp
->
pbm_offset
+
mp
->
pbm_bsize
)
>>
PAGE_CACHE_SHIFT
;
tloff
=
min
(
tlast
,
tloff
);
for
(
tindex
=
start_page
->
index
+
1
;
tindex
<
tloff
;
tindex
++
)
{
page
=
probe_unwritten_page
(
mapping
,
tindex
,
mp
,
pb
,
PAGE_CACHE_SIZE
,
&
bs
,
bbits
);
...
...
@@ -1041,6 +1042,8 @@ count_page_state(
do
{
if
(
buffer_uptodate
(
bh
)
&&
!
buffer_mapped
(
bh
))
(
*
unmapped
)
=
1
;
else
if
(
buffer_unwritten
(
bh
)
&&
!
buffer_delay
(
bh
))
clear_buffer_unwritten
(
bh
);
else
if
(
buffer_unwritten
(
bh
))
(
*
unwritten
)
=
1
;
else
if
(
buffer_delay
(
bh
))
...
...
fs/xfs/linux/xfs_lrw.c
View file @
aa641e8a
...
...
@@ -218,7 +218,7 @@ xfs_read(
int
error
;
vrwlock_t
locktype
=
VRWLOCK_READ
;
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
bdp
,
*
offset
,
size
,
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
BHV_TO_VNODE
(
bdp
)
,
*
offset
,
size
,
FILP_DELAY_FLAG
(
file
),
&
locktype
);
if
(
error
)
{
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
...
...
@@ -278,7 +278,7 @@ xfs_sendfile(
vrwlock_t
locktype
=
VRWLOCK_READ
;
int
error
;
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
bdp
,
*
offset
,
count
,
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
BHV_TO_VNODE
(
bdp
)
,
*
offset
,
count
,
FILP_DELAY_FLAG
(
filp
),
&
locktype
);
if
(
error
)
{
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
...
...
@@ -612,7 +612,7 @@ xfs_write(
loff_t
savedsize
=
*
offset
;
xfs_iunlock
(
xip
,
XFS_ILOCK_EXCL
);
error
=
XFS_SEND_DATA
(
xip
->
i_mount
,
DM_EVENT_WRITE
,
bd
p
,
error
=
XFS_SEND_DATA
(
xip
->
i_mount
,
DM_EVENT_WRITE
,
v
p
,
*
offset
,
size
,
FILP_DELAY_FLAG
(
file
),
&
locktype
);
if
(
error
)
{
...
...
fs/xfs/linux/xfs_stats.c
View file @
aa641e8a
...
...
@@ -33,7 +33,7 @@
#include "xfs.h"
#include <linux/proc_fs.h>
struct
xfsstats
xfsstats
;
DEFINE_PER_CPU
(
struct
xfsstats
,
xfsstats
)
;
STATIC
int
xfs_read_xfsstats
(
...
...
@@ -44,7 +44,11 @@ xfs_read_xfsstats(
int
*
eof
,
void
*
data
)
{
int
i
,
j
,
len
;
int
c
,
i
,
j
,
len
,
val
;
__uint64_t
xs_xstrat_bytes
=
0
;
__uint64_t
xs_write_bytes
=
0
;
__uint64_t
xs_read_bytes
=
0
;
static
struct
xstats_entry
{
char
*
desc
;
int
endpoint
;
...
...
@@ -65,21 +69,32 @@ xfs_read_xfsstats(
{
"vnodes"
,
XFSSTAT_END_VNODE_OPS
},
};
/* Loop over all stats groups */
for
(
i
=
j
=
len
=
0
;
i
<
sizeof
(
xstats
)
/
sizeof
(
struct
xstats_entry
);
i
++
)
{
len
+=
sprintf
(
buffer
+
len
,
xstats
[
i
].
desc
);
/* inner loop does each group */
while
(
j
<
xstats
[
i
].
endpoint
)
{
len
+=
sprintf
(
buffer
+
len
,
" %u"
,
*
(((
__u32
*
)
&
xfsstats
)
+
j
));
val
=
0
;
/* sum over all cpus */
for
(
c
=
0
;
c
<
NR_CPUS
;
c
++
)
{
if
(
!
cpu_possible
(
c
))
continue
;
val
+=
*
(((
__u32
*
)
&
per_cpu
(
xfsstats
,
c
)
+
j
));
}
len
+=
sprintf
(
buffer
+
len
,
" %u"
,
val
);
j
++
;
}
buffer
[
len
++
]
=
'\n'
;
}
/* extra precision counters */
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
if
(
!
cpu_possible
(
i
))
continue
;
xs_xstrat_bytes
+=
per_cpu
(
xfsstats
,
i
).
xs_xstrat_bytes
;
xs_write_bytes
+=
per_cpu
(
xfsstats
,
i
).
xs_write_bytes
;
xs_read_bytes
+=
per_cpu
(
xfsstats
,
i
).
xs_read_bytes
;
}
len
+=
sprintf
(
buffer
+
len
,
"xpc %Lu %Lu %Lu
\n
"
,
xfsstats
.
xs_xstrat_bytes
,
xfsstats
.
xs_write_bytes
,
xfsstats
.
xs_read_bytes
);
xs_xstrat_bytes
,
xs_write_bytes
,
xs_read_bytes
);
len
+=
sprintf
(
buffer
+
len
,
"debug %u
\n
"
,
#if defined(XFSDEBUG)
1
);
...
...
fs/xfs/linux/xfs_stats.h
View file @
aa641e8a
...
...
@@ -35,6 +35,8 @@
#if defined(CONFIG_PROC_FS) && !defined(XFS_STATS_OFF)
#include <linux/percpu.h>
/*
* XFS global statistics
*/
...
...
@@ -126,11 +128,13 @@ struct xfsstats {
__uint64_t
xs_read_bytes
;
};
extern
struct
xfsstats
xfsstats
;
DECLARE_PER_CPU
(
struct
xfsstats
,
xfsstats
)
;
# define XFS_STATS_INC(count) ( xfsstats.count++ )
# define XFS_STATS_DEC(count) ( xfsstats.count-- )
# define XFS_STATS_ADD(count, inc) ( xfsstats.count += (inc) )
/* We don't disable preempt, not too worried about poking the
* wrong cpu's stat for now */
#define XFS_STATS_INC(count) (__get_cpu_var(xfsstats).count++)
#define XFS_STATS_DEC(count) (__get_cpu_var(xfsstats).count--)
#define XFS_STATS_ADD(count, inc) (__get_cpu_var(xfsstats).count += (inc))
extern
void
xfs_init_procfs
(
void
);
extern
void
xfs_cleanup_procfs
(
void
);
...
...
fs/xfs/linux/xfs_sysctl.c
View file @
aa641e8a
...
...
@@ -48,17 +48,23 @@ xfs_stats_clear_proc_handler(
void
*
buffer
,
size_t
*
lenp
)
{
int
ret
,
*
valp
=
ctl
->
data
;
int
c
,
ret
,
*
valp
=
ctl
->
data
;
__uint32_t
vn_active
;
ret
=
proc_doulongvec_minmax
(
ctl
,
write
,
filp
,
buffer
,
lenp
);
if
(
!
ret
&&
write
&&
*
valp
)
{
printk
(
"XFS Clearing xfsstats
\n
"
);
/* save vn_active, it's a universal truth! */
vn_active
=
xfsstats
.
vn_active
;
memset
(
&
xfsstats
,
0
,
sizeof
(
xfsstats
));
xfsstats
.
vn_active
=
vn_active
;
for
(
c
=
0
;
c
<
NR_CPUS
;
c
++
)
{
if
(
!
cpu_possible
(
c
))
continue
;
preempt_disable
();
/* save vn_active, it's a universal truth! */
vn_active
=
per_cpu
(
xfsstats
,
c
).
vn_active
;
memset
(
&
per_cpu
(
xfsstats
,
c
),
0
,
sizeof
(
struct
xfsstats
));
per_cpu
(
xfsstats
,
c
).
vn_active
=
vn_active
;
preempt_enable
();
}
xfs_stats_clear
=
0
;
}
...
...
fs/xfs/linux/xfs_vnode.c
View file @
aa641e8a
...
...
@@ -200,7 +200,7 @@ vn_revalidate(
vn_trace_entry
(
vp
,
"vn_revalidate"
,
(
inst_t
*
)
__return_address
);
ASSERT
(
vp
->
v_fbhv
!=
NULL
);
va
.
va_mask
=
XFS_AT_STAT
|
XFS_AT_
GENCOUNT
;
va
.
va_mask
=
XFS_AT_STAT
|
XFS_AT_
XFLAGS
;
VOP_GETATTR
(
vp
,
&
va
,
0
,
NULL
,
error
);
if
(
!
error
)
{
inode
=
LINVFS_GET_IP
(
vp
);
...
...
fs/xfs/pagebuf/page_buf.c
View file @
aa641e8a
...
...
@@ -141,7 +141,7 @@ pagebuf_param_t pb_params = {
* Pagebuf statistics variables
*/
struct
pbstats
pbstats
;
DEFINE_PER_CPU
(
struct
pbstats
,
pbstats
)
;
/*
* Pagebuf allocation / freeing.
...
...
@@ -293,7 +293,7 @@ _pagebuf_initialize(
atomic_set
(
&
pb
->
pb_pin_count
,
0
);
init_waitqueue_head
(
&
pb
->
pb_waiters
);
PB_STATS_INC
(
pb
stats
.
pb
_create
);
PB_STATS_INC
(
pb_create
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
get
),
target
);
}
...
...
@@ -485,7 +485,7 @@ _pagebuf_lookup_pages(
page
=
find_or_create_page
(
aspace
,
index
,
gfp_mask
);
if
(
!
page
)
{
if
(
--
retry_count
>
0
)
{
PB_STATS_INC
(
pb
stats
.
pb
_page_retries
);
PB_STATS_INC
(
pb_page_retries
);
pagebuf_daemon_wakeup
(
1
);
current
->
state
=
TASK_UNINTERRUPTIBLE
;
schedule_timeout
(
10
);
...
...
@@ -495,7 +495,7 @@ _pagebuf_lookup_pages(
all_mapped
=
0
;
continue
;
}
PB_STATS_INC
(
pb
stats
.
pb
_page_found
);
PB_STATS_INC
(
pb_page_found
);
mark_page_accessed
(
page
);
pb
->
pb_pages
[
pi
]
=
page
;
}
else
{
...
...
@@ -645,7 +645,7 @@ _pagebuf_find( /* find buffer for block */
h
->
pb_count
++
;
list_add
(
&
new_pb
->
pb_hash_list
,
&
h
->
pb_hash
);
}
else
{
PB_STATS_INC
(
pb
stats
.
pb
_miss_locked
);
PB_STATS_INC
(
pb_miss_locked
);
}
spin_unlock
(
&
h
->
pb_hash_lock
);
...
...
@@ -665,7 +665,7 @@ _pagebuf_find( /* find buffer for block */
/* wait for buffer ownership */
PB_TRACE
(
pb
,
PB_TRACE_REC
(
get_lk
),
0
);
pagebuf_lock
(
pb
);
PB_STATS_INC
(
pb
stats
.
pb
_get_locked_waited
);
PB_STATS_INC
(
pb_get_locked_waited
);
}
else
{
/* We asked for a trylock and failed, no need
* to look at file offset and length here, we
...
...
@@ -675,7 +675,7 @@ _pagebuf_find( /* find buffer for block */
*/
pagebuf_rele
(
pb
);
PB_STATS_INC
(
pb
stats
.
pb
_busy_locked
);
PB_STATS_INC
(
pb_busy_locked
);
return
(
NULL
);
}
}
else
{
...
...
@@ -691,7 +691,7 @@ _pagebuf_find( /* find buffer for block */
_PBF_ADDR_ALLOCATED
|
\
_PBF_MEM_ALLOCATED
;
PB_TRACE
(
pb
,
PB_TRACE_REC
(
got_lk
),
0
);
PB_STATS_INC
(
pb
stats
.
pb
_get_locked
);
PB_STATS_INC
(
pb_get_locked
);
return
(
pb
);
}
...
...
@@ -747,7 +747,7 @@ pagebuf_get( /* allocate a buffer */
return
(
NULL
);
}
PB_STATS_INC
(
pb
stats
.
pb
_get
);
PB_STATS_INC
(
pb_get
);
/* fill in any missing pages */
error
=
_pagebuf_lookup_pages
(
pb
,
pb
->
pb_target
->
pbr_mapping
,
flags
);
...
...
@@ -766,7 +766,7 @@ pagebuf_get( /* allocate a buffer */
if
(
flags
&
PBF_READ
)
{
if
(
PBF_NOT_DONE
(
pb
))
{
PB_TRACE
(
pb
,
PB_TRACE_REC
(
get_read
),
flags
);
PB_STATS_INC
(
pb
stats
.
pb
_get_read
);
PB_STATS_INC
(
pb_get_read
);
pagebuf_iostart
(
pb
,
flags
);
}
else
if
(
flags
&
PBF_ASYNC
)
{
/*
...
...
@@ -1677,6 +1677,9 @@ pagebuf_daemon(
break
;
}
pb
->
pb_flags
&=
~
PBF_DELWRI
;
pb
->
pb_flags
|=
PBF_WRITE
;
list_del
(
&
pb
->
pb_list
);
list_add
(
&
pb
->
pb_list
,
&
tmp
);
...
...
@@ -1688,8 +1691,6 @@ pagebuf_daemon(
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
page_buf_t
,
pb_list
);
list_del_init
(
&
pb
->
pb_list
);
pb
->
pb_flags
&=
~
PBF_DELWRI
;
pb
->
pb_flags
|=
PBF_WRITE
;
pagebuf_iostrategy
(
pb
);
}
...
...
@@ -1720,6 +1721,7 @@ pagebuf_delwri_flush(
int
flush_cnt
=
0
;
pagebuf_runall_queues
(
pagebuf_dataio_workqueue
);
pagebuf_runall_queues
(
pagebuf_logio_workqueue
);
spin_lock
(
&
pbd_delwrite_lock
);
INIT_LIST_HEAD
(
&
tmp
);
...
...
@@ -1742,47 +1744,32 @@ pagebuf_delwri_flush(
continue
;
}
if
(
flags
&
PBDF_TRYLOCK
)
{
if
(
!
pagebuf_cond_lock
(
pb
))
{
pincount
++
;
continue
;
}
}
list_del_init
(
&
pb
->
pb_list
);
if
(
flags
&
PBDF_WAIT
)
{
list_add
(
&
pb
->
pb_list
,
&
tmp
);
pb
->
pb_flags
&=
~
PBF_ASYNC
;
}
spin_unlock
(
&
pbd_delwrite_lock
);
if
((
flags
&
PBDF_TRYLOCK
)
==
0
)
{
pagebuf_lock
(
pb
);
}
pb
->
pb_flags
&=
~
PBF_DELWRI
;
pb
->
pb_flags
|=
PBF_WRITE
;
list_move
(
&
pb
->
pb_list
,
&
tmp
);
}
/* ok found all the items that can be worked on
* drop the lock and process the private list */
spin_unlock
(
&
pbd_delwrite_lock
);
list_for_each_safe
(
curr
,
next
,
&
tmp
)
{
pb
=
list_entry
(
curr
,
page_buf_t
,
pb_list
);
if
(
flags
&
PBDF_WAIT
)
pb
->
pb_flags
&=
~
PBF_ASYNC
;
else
list_del_init
(
curr
);
pagebuf_lock
(
pb
);
pagebuf_iostrategy
(
pb
);
if
(
++
flush_cnt
>
32
)
{
blk_run_queues
();
flush_cnt
=
0
;
}
spin_lock
(
&
pbd_delwrite_lock
);
}
spin_unlock
(
&
pbd_delwrite_lock
);
blk_run_queues
();
if
(
pinptr
)
*
pinptr
=
pincount
;
if
((
flags
&
PBDF_WAIT
)
==
0
)
return
;
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
page_buf_t
,
pb_list
);
...
...
@@ -1792,6 +1779,9 @@ pagebuf_delwri_flush(
pagebuf_unlock
(
pb
);
pagebuf_rele
(
pb
);
}
if
(
pinptr
)
*
pinptr
=
pincount
;
}
STATIC
int
...
...
@@ -1846,14 +1836,18 @@ pb_stats_clear_handler(
void
*
buffer
,
size_t
*
lenp
)
{
int
ret
;
int
c
,
ret
;
int
*
valp
=
ctl
->
data
;
ret
=
proc_doulongvec_minmax
(
ctl
,
write
,
filp
,
buffer
,
lenp
);
if
(
!
ret
&&
write
&&
*
valp
)
{
printk
(
"XFS Clearing pbstats
\n
"
);
memset
(
&
pbstats
,
0
,
sizeof
(
pbstats
));
for
(
c
=
0
;
c
<
NR_CPUS
;
c
++
)
{
if
(
!
cpu_possible
(
c
))
continue
;
memset
(
&
per_cpu
(
pbstats
,
c
),
0
,
sizeof
(
struct
pbstats
));
}
pb_params
.
stats_clear
.
val
=
0
;
}
...
...
@@ -1907,13 +1901,17 @@ pagebuf_readstats(
int
*
eof
,
void
*
data
)
{
int
i
,
len
;
int
c
,
i
,
len
,
val
;
len
=
0
;
len
+=
sprintf
(
buffer
+
len
,
"pagebuf"
);
for
(
i
=
0
;
i
<
sizeof
(
pbstats
)
/
sizeof
(
u_int32_t
);
i
++
)
{
len
+=
sprintf
(
buffer
+
len
,
" %u"
,
*
(((
u_int32_t
*
)
&
pbstats
)
+
i
));
for
(
i
=
0
;
i
<
sizeof
(
struct
pbstats
)
/
sizeof
(
u_int32_t
);
i
++
)
{
val
=
0
;
for
(
c
=
0
;
c
<
NR_CPUS
;
c
++
)
{
if
(
!
cpu_possible
(
c
))
continue
;
val
+=
*
(((
u_int32_t
*
)
&
per_cpu
(
pbstats
,
c
)
+
i
));
}
len
+=
sprintf
(
buffer
+
len
,
" %u"
,
val
);
}
buffer
[
len
++
]
=
'\n'
;
...
...
fs/xfs/pagebuf/page_buf.h
View file @
aa641e8a
...
...
@@ -368,7 +368,6 @@ extern int pagebuf_ispin( /* check if buffer is pinned */
/* Delayed Write Buffer Routines */
#define PBDF_WAIT 0x01
#define PBDF_TRYLOCK 0x02
extern
void
pagebuf_delwri_flush
(
pb_target_t
*
,
unsigned
long
,
...
...
fs/xfs/pagebuf/page_buf_internal.h
View file @
aa641e8a
...
...
@@ -37,6 +37,7 @@
#ifndef __PAGE_BUF_PRIVATE_H__
#define __PAGE_BUF_PRIVATE_H__
#include <linux/percpu.h>
#include "page_buf.h"
#define _PAGE_BUF_INTERNAL_
...
...
@@ -120,9 +121,11 @@ struct pbstats {
u_int32_t
pb_get_read
;
};
extern
struct
pbstats
pbstats
;
DECLARE_PER_CPU
(
struct
pbstats
,
pbstats
)
;
#define PB_STATS_INC(count) ( count ++ )
/* We don't disable preempt, not too worried about poking the
* wrong cpu's stat for now */
#define PB_STATS_INC(count) (__get_cpu_var(pbstats).count++)
#ifndef STATIC
# define STATIC static
...
...
fs/xfs/xfs_bmap.c
View file @
aa641e8a
...
...
@@ -5553,7 +5553,7 @@ xfs_getbmap(
&&
DM_EVENT_ENABLED
(
vp
->
v_vfsp
,
ip
,
DM_EVENT_READ
)
&&
whichfork
==
XFS_DATA_FORK
)
{
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
bd
p
,
0
,
0
,
0
,
NULL
);
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_READ
,
v
p
,
0
,
0
,
0
,
NULL
);
if
(
error
)
return
XFS_ERROR
(
error
);
}
...
...
fs/xfs/xfs_log_recover.c
View file @
aa641e8a
...
...
@@ -3938,7 +3938,7 @@ xlog_recover_finish(
xlog_recover_check_summary
(
log
);
cmn_err
(
CE_NOTE
,
"Ending XFS recovery on filesystem: %s (dev: %
d/%d
)"
,
"Ending XFS recovery on filesystem: %s (dev: %
s
)"
,
log
->
l_mp
->
m_fsname
,
XFS_BUFTARG_NAME
(
log
->
l_targ
));
log
->
l_flags
&=
~
XLOG_RECOVERY_NEEDED
;
}
else
{
...
...
fs/xfs/xfs_mount.h
View file @
aa641e8a
...
...
@@ -91,7 +91,7 @@ struct xfs_bmap_free;
* Prototypes and functions for the Data Migration subsystem.
*/
typedef
int
(
*
xfs_send_data_t
)(
int
,
struct
bhv_desc
*
,
typedef
int
(
*
xfs_send_data_t
)(
int
,
struct
vnode
*
,
xfs_off_t
,
size_t
,
int
,
vrwlock_t
*
);
typedef
int
(
*
xfs_send_mmap_t
)(
struct
vm_area_struct
*
,
uint
);
typedef
int
(
*
xfs_send_destroy_t
)(
struct
vnode
*
,
dm_right_t
);
...
...
@@ -109,8 +109,8 @@ typedef struct xfs_dmops {
xfs_send_unmount_t
xfs_send_unmount
;
}
xfs_dmops_t
;
#define XFS_SEND_DATA(mp, ev,
bd
p,off,len,fl,lock) \
(*(mp)->m_dm_ops.xfs_send_data)(ev,
bd
p,off,len,fl,lock)
#define XFS_SEND_DATA(mp, ev,
v
p,off,len,fl,lock) \
(*(mp)->m_dm_ops.xfs_send_data)(ev,
v
p,off,len,fl,lock)
#define XFS_SEND_MMAP(mp, vma,fl) \
(*(mp)->m_dm_ops.xfs_send_mmap)(vma,fl)
#define XFS_SEND_DESTROY(mp, vp,right) \
...
...
fs/xfs/xfs_types.h
View file @
aa641e8a
...
...
@@ -83,7 +83,7 @@ typedef __uint64_t __psunsigned_t;
* XFS_BIG_INUMS needs the VFS inode number to be 64 bits, as well
* as requiring XFS_BIG_BLKNOS to be set.
*/
#if defined(CONFIG_LBD) || (
defined(HAVE_SECTOR_T) && (BITS_PER_LONG == 64)
)
#if defined(CONFIG_LBD) || (
BITS_PER_LONG == 64
)
# define XFS_BIG_BLKNOS 1
# if BITS_PER_LONG == 64
# define XFS_BIG_INUMS 1
...
...
fs/xfs/xfs_vnodeops.c
View file @
aa641e8a
...
...
@@ -144,11 +144,9 @@ xfs_getattr(
xfs_ilock
(
ip
,
XFS_ILOCK_SHARED
);
vap
->
va_size
=
ip
->
i_d
.
di_size
;
if
(
vap
->
va_mask
==
XFS_AT_SIZE
)
{
if
(
!
(
flags
&
ATTR_LAZY
))
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
return
0
;
}
if
(
vap
->
va_mask
==
XFS_AT_SIZE
)
goto
all_done
;
vap
->
va_nblocks
=
XFS_FSB_TO_BB
(
mp
,
ip
->
i_d
.
di_nblocks
+
ip
->
i_delayed_blks
);
vap
->
va_nodeid
=
ip
->
i_ino
;
...
...
@@ -162,11 +160,8 @@ xfs_getattr(
*/
if
((
vap
->
va_mask
&
~
(
XFS_AT_SIZE
|
XFS_AT_FSID
|
XFS_AT_NODEID
|
XFS_AT_NLINK
|
XFS_AT_BLKSIZE
))
==
0
)
{
if
(
!
(
flags
&
ATTR_LAZY
))
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
return
0
;
}
XFS_AT_NLINK
|
XFS_AT_BLKSIZE
))
==
0
)
goto
all_done
;
/*
* Copy from in-core inode.
...
...
@@ -250,37 +245,44 @@ xfs_getattr(
*/
if
((
vap
->
va_mask
&
(
XFS_AT_XFLAGS
|
XFS_AT_EXTSIZE
|
XFS_AT_NEXTENTS
|
XFS_AT_ANEXTENTS
|
XFS_AT_GENCOUNT
|
XFS_AT_VCODE
))
==
0
)
{
if
(
!
(
flags
&
ATTR_LAZY
))
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
return
0
;
}
XFS_AT_GENCOUNT
|
XFS_AT_VCODE
))
==
0
)
goto
all_done
;
/*
* convert di_flags to xflags
*/
vap
->
va_xflags
=
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
XFS_XFLAG_REALTIME
:
0
)
|
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PREALLOC
)
?
XFS_XFLAG_PREALLOC
:
0
)
|
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_IMMUTABLE
)
?
XFS_XFLAG_IMMUTABLE
:
0
)
|
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_APPEND
)
?
XFS_XFLAG_APPEND
:
0
)
|
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_SYNC
)
?
XFS_XFLAG_SYNC
:
0
)
|
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_NOATIME
)
?
XFS_XFLAG_NOATIME
:
0
)
|
((
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_NODUMP
)
?
XFS_XFLAG_NODUMP:
0
)
|
(
XFS_IFORK_Q
(
ip
)
?
XFS_XFLAG_HASATTR
:
0
);
vap
->
va_xflags
=
0
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
vap
->
va_xflags
|=
XFS_XFLAG_REALTIME
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_PREALLOC
)
vap
->
va_xflags
|=
XFS_XFLAG_PREALLOC
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_IMMUTABLE
)
vap
->
va_xflags
|=
XFS_XFLAG_IMMUTABLE
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_APPEND
)
vap
->
va_xflags
|=
XFS_XFLAG_APPEND
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_SYNC
)
vap
->
va_xflags
|=
XFS_XFLAG_SYNC
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_NOATIME
)
vap
->
va_xflags
|=
XFS_XFLAG_NOATIME
;
if
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_NODUMP
)
vap
->
va_xflags
|=
XFS_XFLAG_NODUMP
;
if
(
XFS_IFORK_Q
(
ip
))
vap
->
va_xflags
|=
XFS_XFLAG_HASATTR
;
/*
* Exit for inode revalidate. See if any of the rest of
* the fields to be filled in are needed.
*/
if
((
vap
->
va_mask
&
(
XFS_AT_EXTSIZE
|
XFS_AT_NEXTENTS
|
XFS_AT_ANEXTENTS
|
XFS_AT_GENCOUNT
|
XFS_AT_VCODE
))
==
0
)
goto
all_done
;
vap
->
va_extsize
=
ip
->
i_d
.
di_extsize
<<
mp
->
m_sb
.
sb_blocklog
;
vap
->
va_nextents
=
(
ip
->
i_df
.
if_flags
&
XFS_IFEXTENTS
)
?
ip
->
i_df
.
if_bytes
/
sizeof
(
xfs_bmbt_rec_t
)
:
ip
->
i_d
.
di_nextents
;
if
(
ip
->
i_afp
!=
NULL
)
if
(
ip
->
i_afp
)
vap
->
va_anextents
=
(
ip
->
i_afp
->
if_flags
&
XFS_IFEXTENTS
)
?
ip
->
i_afp
->
if_bytes
/
sizeof
(
xfs_bmbt_rec_t
)
:
...
...
@@ -290,6 +292,7 @@ xfs_getattr(
vap
->
va_gencount
=
ip
->
i_d
.
di_gen
;
vap
->
va_vcode
=
0L
;
all_done:
if
(
!
(
flags
&
ATTR_LAZY
))
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
return
0
;
...
...
@@ -414,7 +417,7 @@ xfs_setattr(
}
else
{
if
(
DM_EVENT_ENABLED
(
vp
->
v_vfsp
,
ip
,
DM_EVENT_TRUNCATE
)
&&
!
(
flags
&
ATTR_DMI
))
{
code
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_TRUNCATE
,
bd
p
,
code
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_TRUNCATE
,
v
p
,
vap
->
va_size
,
0
,
AT_DELAY_FLAG
(
flags
),
NULL
);
if
(
code
)
{
lock_flags
=
0
;
...
...
@@ -4162,7 +4165,7 @@ xfs_alloc_file_space(
end_dmi_offset
=
offset
+
len
;
if
(
end_dmi_offset
>
ip
->
i_d
.
di_size
)
end_dmi_offset
=
ip
->
i_d
.
di_size
;
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_WRITE
,
XFS_ITO
BH
V
(
ip
),
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_WRITE
,
XFS_ITOV
(
ip
),
offset
,
end_dmi_offset
-
offset
,
0
,
NULL
);
if
(
error
)
...
...
@@ -4409,7 +4412,7 @@ xfs_free_file_space(
DM_EVENT_ENABLED
(
XFS_MTOVFS
(
mp
),
ip
,
DM_EVENT_WRITE
))
{
if
(
end_dmi_offset
>
ip
->
i_d
.
di_size
)
end_dmi_offset
=
ip
->
i_d
.
di_size
;
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_WRITE
,
XFS_ITO
BH
V
(
ip
),
error
=
XFS_SEND_DATA
(
mp
,
DM_EVENT_WRITE
,
XFS_ITOV
(
ip
),
offset
,
end_dmi_offset
-
offset
,
AT_DELAY_FLAG
(
attr_flags
),
NULL
);
if
(
error
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment