Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2c4d3eb6
Commit
2c4d3eb6
authored
Dec 14, 2002
by
Christoph Hellwig
Browse files
Options
Browse Files
Download
Plain Diff
Merge hera.kernel.org:/home/torvalds/BK/linux-2.5
into hera.kernel.org:/home/hch/BK/xfs/linux-2.5
parents
bda0e956
8c88cd21
Changes
17
Hide whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
241 additions
and
309 deletions
+241
-309
fs/block_dev.c
fs/block_dev.c
+80
-0
fs/super.c
fs/super.c
+13
-46
fs/xfs/linux/xfs_linux.h
fs/xfs/linux/xfs_linux.h
+0
-1
fs/xfs/linux/xfs_lrw.c
fs/xfs/linux/xfs_lrw.c
+9
-17
fs/xfs/linux/xfs_lrw.h
fs/xfs/linux/xfs_lrw.h
+3
-0
fs/xfs/linux/xfs_super.c
fs/xfs/linux/xfs_super.c
+6
-27
fs/xfs/linux/xfs_super.h
fs/xfs/linux/xfs_super.h
+3
-1
fs/xfs/pagebuf/page_buf.c
fs/xfs/pagebuf/page_buf.c
+64
-139
fs/xfs/pagebuf/page_buf.h
fs/xfs/pagebuf/page_buf.h
+8
-10
fs/xfs/pagebuf/page_buf_internal.h
fs/xfs/pagebuf/page_buf_internal.h
+3
-23
fs/xfs/pagebuf/page_buf_locking.c
fs/xfs/pagebuf/page_buf_locking.c
+4
-4
fs/xfs/xfs_mount.h
fs/xfs/xfs_mount.h
+0
-5
fs/xfs/xfs_vfsops.c
fs/xfs/xfs_vfsops.c
+17
-13
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.c
+1
-0
fs/xfs/xfsidbg.c
fs/xfs/xfsidbg.c
+22
-22
include/linux/fs.h
include/linux/fs.h
+6
-1
kernel/ksyms.c
kernel/ksyms.c
+2
-0
No files found.
fs/block_dev.c
View file @
2c4d3eb6
...
...
@@ -22,6 +22,7 @@
#include <linux/mpage.h>
#include <linux/mount.h>
#include <linux/uio.h>
#include <linux/namei.h>
#include <asm/uaccess.h>
...
...
@@ -796,3 +797,82 @@ const char *__bdevname(dev_t dev)
sprintf
(
buffer
,
"%s(%d,%d)"
,
name
,
MAJOR
(
dev
),
MINOR
(
dev
));
return
buffer
;
}
/**
* open_bdev_excl - open a block device by name and set it up for use
*
* @path: special file representing the block device
* @flags: %MS_RDONLY for opening read-only
* @kind: usage (same as the 4th paramter to blkdev_get)
* @holder: owner for exclusion
*
* Open the blockdevice described by the special file at @path, claim it
* for the @holder and properly set it up for @kind usage.
*/
struct
block_device
*
open_bdev_excl
(
const
char
*
path
,
int
flags
,
int
kind
,
void
*
holder
)
{
struct
inode
*
inode
;
struct
block_device
*
bdev
;
struct
nameidata
nd
;
mode_t
mode
=
FMODE_READ
;
int
error
=
0
;
if
(
!
path
||
!*
path
)
return
ERR_PTR
(
-
EINVAL
);
error
=
path_lookup
(
path
,
LOOKUP_FOLLOW
,
&
nd
);
if
(
error
)
return
ERR_PTR
(
error
);
inode
=
nd
.
dentry
->
d_inode
;
error
=
-
ENOTBLK
;
if
(
!
S_ISBLK
(
inode
->
i_mode
))
goto
path_release
;
error
=
-
EACCES
;
if
(
nd
.
mnt
->
mnt_flags
&
MNT_NODEV
)
goto
path_release
;
error
=
bd_acquire
(
inode
);
if
(
error
)
goto
path_release
;
bdev
=
inode
->
i_bdev
;
/* Done with lookups */
path_release
(
&
nd
);
if
(
!
(
flags
&
MS_RDONLY
))
mode
|=
FMODE_WRITE
;
error
=
blkdev_get
(
bdev
,
mode
,
0
,
kind
);
if
(
error
)
return
ERR_PTR
(
error
);
error
=
-
EACCES
;
if
(
!
(
flags
&
MS_RDONLY
)
&&
bdev_read_only
(
bdev
))
goto
blkdev_put
;
error
=
bd_claim
(
bdev
,
holder
);
if
(
error
)
goto
blkdev_put
;
return
bdev
;
blkdev_put:
blkdev_put
(
bdev
,
BDEV_FS
);
return
ERR_PTR
(
error
);
path_release:
path_release
(
&
nd
);
return
ERR_PTR
(
error
);
}
/**
* close_bdev_excl - release a blockdevice openen by open_bdev_excl()
*
* @bdev: blockdevice to close
* @kind: usage (same as the 4th paramter to blkdev_get)
*
* This is the counterpart to open_bdev_excl().
*/
void
close_bdev_excl
(
struct
block_device
*
bdev
,
int
kind
)
{
bd_release
(
bdev
);
blkdev_put
(
bdev
,
kind
);
}
fs/super.c
View file @
2c4d3eb6
...
...
@@ -505,55 +505,25 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
int
flags
,
char
*
dev_name
,
void
*
data
,
int
(
*
fill_super
)(
struct
super_block
*
,
void
*
,
int
))
{
struct
inode
*
inode
;
struct
block_device
*
bdev
;
struct
super_block
*
s
;
struct
nameidata
nd
;
struct
super_block
*
s
;
int
error
=
0
;
mode_t
mode
=
FMODE_READ
;
/* we always need it ;-) */
/* What device it is? */
if
(
!
dev_name
||
!*
dev_name
)
return
ERR_PTR
(
-
EINVAL
);
error
=
path_lookup
(
dev_name
,
LOOKUP_FOLLOW
,
&
nd
);
if
(
error
)
return
ERR_PTR
(
error
);
inode
=
nd
.
dentry
->
d_inode
;
error
=
-
ENOTBLK
;
if
(
!
S_ISBLK
(
inode
->
i_mode
))
goto
out
;
error
=
-
EACCES
;
if
(
nd
.
mnt
->
mnt_flags
&
MNT_NODEV
)
goto
out
;
error
=
bd_acquire
(
inode
);
if
(
error
)
goto
out
;
bdev
=
inode
->
i_bdev
;
/* Done with lookups, semaphore down */
if
(
!
(
flags
&
MS_RDONLY
))
mode
|=
FMODE_WRITE
;
error
=
blkdev_get
(
bdev
,
mode
,
0
,
BDEV_FS
);
if
(
error
)
goto
out
;
error
=
-
EACCES
;
if
(
!
(
flags
&
MS_RDONLY
)
&&
bdev_read_only
(
bdev
))
goto
out1
;
error
=
bd_claim
(
bdev
,
fs_type
);
if
(
error
)
goto
out1
;
bdev
=
open_bdev_excl
(
dev_name
,
flags
,
BDEV_FS
,
fs_type
);
if
(
IS_ERR
(
bdev
))
return
(
struct
super_block
*
)
bdev
;
s
=
sget
(
fs_type
,
test_bdev_super
,
set_bdev_super
,
bdev
);
if
(
IS_ERR
(
s
))
{
bd_release
(
bdev
)
;
blkdev_put
(
bdev
,
BDEV_FS
);
}
else
if
(
s
->
s_root
)
{
if
(
IS_ERR
(
s
))
goto
out
;
if
(
s
->
s_root
)
{
if
((
flags
^
s
->
s_flags
)
&
MS_RDONLY
)
{
up_write
(
&
s
->
s_umount
);
deactivate_super
(
s
);
s
=
ERR_PTR
(
-
EBUSY
);
}
bd_release
(
bdev
);
blkdev_put
(
bdev
,
BDEV_FS
);
goto
out
;
}
else
{
s
->
s_flags
=
flags
;
strncpy
(
s
->
s_id
,
bdevname
(
bdev
),
sizeof
(
s
->
s_id
));
...
...
@@ -567,14 +537,12 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
}
else
s
->
s_flags
|=
MS_ACTIVE
;
}
path_release
(
&
nd
);
return
s
;
out1:
blkdev_put
(
bdev
,
BDEV_FS
);
out:
path_release
(
&
nd
);
return
ERR_PTR
(
error
)
;
close_bdev_excl
(
bdev
,
BDEV_FS
);
return
s
;
}
void
kill_block_super
(
struct
super_block
*
sb
)
...
...
@@ -582,8 +550,7 @@ void kill_block_super(struct super_block *sb)
struct
block_device
*
bdev
=
sb
->
s_bdev
;
generic_shutdown_super
(
sb
);
set_blocksize
(
bdev
,
sb
->
s_old_blocksize
);
bd_release
(
bdev
);
blkdev_put
(
bdev
,
BDEV_FS
);
close_bdev_excl
(
bdev
,
BDEV_FS
);
}
struct
super_block
*
get_sb_nodev
(
struct
file_system_type
*
fs_type
,
...
...
fs/xfs/linux/xfs_linux.h
View file @
2c4d3eb6
...
...
@@ -42,7 +42,6 @@
#include <linux/sched.h>
#include <linux/bitops.h>
#include <linux/major.h>
#include <linux/root_dev.h>
#include <asm/page.h>
#include <asm/div64.h>
...
...
fs/xfs/linux/xfs_lrw.c
View file @
2c4d3eb6
...
...
@@ -215,54 +215,46 @@ xfs_sendfile(
void
*
target
,
cred_t
*
credp
)
{
size_t
size
=
0
;
ssize_t
ret
;
xfs_fsize_t
n
;
xfs_inode_t
*
ip
;
xfs_mount_t
*
mp
;
vnode_t
*
vp
;
int
invisible
=
(
filp
->
f_mode
&
FINVIS
);
ip
=
XFS_BHVTOI
(
bdp
);
vp
=
BHV_TO_VNODE
(
bdp
);
mp
=
ip
->
i_mount
;
vn_trace_entry
(
vp
,
"xfs_sendfile"
,
(
inst_t
*
)
__return_address
);
XFS_STATS_INC
(
xfsstats
.
xs_read_calls
);
n
=
XFS_MAX_FILE_OFFSET
-
*
offp
;
if
((
n
<=
0
)
||
(
size
==
0
))
if
((
n
<=
0
)
||
(
count
==
0
))
return
0
;
if
(
n
<
size
)
size
=
n
;
if
(
n
<
count
)
count
=
n
;
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
{
if
(
XFS_FORCED_SHUTDOWN
(
ip
->
i_mount
))
return
-
EIO
;
}
xfs_ilock
(
ip
,
XFS_IOLOCK_SHARED
);
if
(
DM_EVENT_ENABLED
(
vp
->
v_vfsp
,
ip
,
DM_EVENT_READ
)
&&
!
(
filp
->
f_mode
&
FINVIS
))
{
int
error
;
if
(
DM_EVENT_ENABLED
(
vp
->
v_vfsp
,
ip
,
DM_EVENT_READ
)
&&
!
invisible
)
{
vrwlock_t
locktype
=
VRWLOCK_READ
;
int
error
;
error
=
xfs_dm_send_data_event
(
DM_EVENT_READ
,
bdp
,
*
offp
,
size
,
FILP_DELAY_FLAG
(
filp
),
&
locktype
);
count
,
FILP_DELAY_FLAG
(
filp
),
&
locktype
);
if
(
error
)
{
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
return
-
error
;
}
}
ret
=
generic_file_sendfile
(
filp
,
offp
,
count
,
actor
,
target
);
xfs_iunlock
(
ip
,
XFS_IOLOCK_SHARED
);
XFS_STATS_ADD
(
xfsstats
.
xs_read_bytes
,
ret
);
if
(
!
(
filp
->
f_mode
&
FINVIS
))
if
(
!
invisible
)
xfs_ichgtime
(
ip
,
XFS_ICHGTIME_ACC
);
return
ret
;
}
...
...
fs/xfs/linux/xfs_lrw.h
View file @
2c4d3eb6
...
...
@@ -58,6 +58,9 @@ extern ssize_t xfs_read (struct bhv_desc *, struct file *,
extern
ssize_t
xfs_write
(
struct
bhv_desc
*
,
struct
file
*
,
const
struct
iovec
*
,
unsigned
long
,
loff_t
*
,
struct
cred
*
);
extern
ssize_t
xfs_sendfile
(
struct
bhv_desc
*
,
struct
file
*
,
loff_t
*
,
size_t
,
read_actor_t
,
void
*
,
struct
cred
*
);
extern
int
xfs_dev_is_read_only
(
struct
xfs_mount
*
,
char
*
);
...
...
fs/xfs/linux/xfs_super.c
View file @
2c4d3eb6
...
...
@@ -468,27 +468,18 @@ xfs_initialize_vnode(
int
xfs_blkdev_get
(
xfs_mount_t
*
mp
,
const
char
*
name
,
struct
block_device
**
bdevp
)
{
struct
nameidata
nd
;
int
error
;
int
error
=
0
;
error
=
path_lookup
(
name
,
LOOKUP_FOLLOW
,
&
nd
);
if
(
error
)
{
*
bdevp
=
open_bdev_excl
(
name
,
0
,
BDEV_FS
,
mp
);
if
(
IS_ERR
(
*
bdevp
))
{
error
=
PTR_ERR
(
*
bdevp
);
printk
(
"XFS: Invalid device [%s], error=%d
\n
"
,
name
,
error
);
return
-
error
;
}
/* I think we actually want bd_acquire here.. --hch */
*
bdevp
=
bdget
(
kdev_t_to_nr
(
nd
.
dentry
->
d_inode
->
i_rdev
));
if
(
*
bdevp
)
{
error
=
blkdev_get
(
*
bdevp
,
FMODE_READ
|
FMODE_WRITE
,
0
,
BDEV_FS
);
}
else
{
error
=
-
ENOMEM
;
}
path_release
(
&
nd
);
return
-
error
;
}
...
...
@@ -497,7 +488,7 @@ xfs_blkdev_put(
struct
block_device
*
bdev
)
{
if
(
bdev
)
blkdev_put
(
bdev
,
BDEV_FS
);
close_bdev_excl
(
bdev
,
BDEV_FS
);
}
void
...
...
@@ -761,17 +752,6 @@ linvfs_clear_inode(
}
}
STATIC
void
linvfs_put_inode
(
struct
inode
*
ip
)
{
vnode_t
*
vp
=
LINVFS_GET_VP
(
ip
);
int
error
;
if
(
vp
&&
vp
->
v_fbhv
&&
(
atomic_read
(
&
ip
->
i_count
)
==
1
))
VOP_RELEASE
(
vp
,
error
);
}
STATIC
void
linvfs_put_super
(
struct
super_block
*
sb
)
...
...
@@ -989,7 +969,6 @@ STATIC struct super_operations linvfs_sops = {
.
alloc_inode
=
linvfs_alloc_inode
,
.
destroy_inode
=
linvfs_destroy_inode
,
.
write_inode
=
linvfs_write_inode
,
.
put_inode
=
linvfs_put_inode
,
.
clear_inode
=
linvfs_clear_inode
,
.
put_super
=
linvfs_put_super
,
.
write_super
=
linvfs_write_super
,
...
...
fs/xfs/linux/xfs_super.h
View file @
2c4d3eb6
...
...
@@ -78,12 +78,14 @@
#define LINVFS_SET_VFS(s, vfsp) \
((s)->s_fs_info = vfsp)
struct
xfs_mount
;
struct
pb_target
;
struct
block_device
;
extern
void
xfs_initialize_vnode
(
bhv_desc_t
*
,
vnode_t
*
,
bhv_desc_t
*
,
int
);
extern
int
xfs_blkdev_get
(
const
char
*
,
struct
block_device
**
);
extern
int
xfs_blkdev_get
(
struct
xfs_mount
*
,
const
char
*
,
struct
block_device
**
);
extern
void
xfs_blkdev_put
(
struct
block_device
*
);
extern
struct
pb_target
*
xfs_alloc_buftarg
(
struct
block_device
*
);
...
...
fs/xfs/pagebuf/page_buf.c
View file @
2c4d3eb6
...
...
@@ -104,7 +104,7 @@ pb_trace_func(
pb_trace
.
buf
[
j
].
event
=
event
;
pb_trace
.
buf
[
j
].
flags
=
pb
->
pb_flags
;
pb_trace
.
buf
[
j
].
hold
=
pb
->
pb_hold
.
counter
;
pb_trace
.
buf
[
j
].
lock_value
=
PBP
(
pb
)
->
pb_sema
.
count
.
counter
;
pb_trace
.
buf
[
j
].
lock_value
=
pb
->
pb_sema
.
count
.
counter
;
pb_trace
.
buf
[
j
].
task
=
(
void
*
)
current
;
pb_trace
.
buf
[
j
].
misc
=
misc
;
pb_trace
.
buf
[
j
].
ra
=
ra
;
...
...
@@ -118,7 +118,6 @@ pb_trace_func(
*/
STATIC
kmem_cache_t
*
pagebuf_cache
;
STATIC
pagebuf_daemon_t
*
pb_daemon
;
STATIC
void
pagebuf_daemon_wakeup
(
int
);
STATIC
struct
workqueue_struct
*
pagebuf_workqueue
;
...
...
@@ -272,12 +271,12 @@ _pagebuf_initialize(
*/
flags
&=
~
(
PBF_LOCK
|
PBF_MAPPED
|
PBF_DONT_BLOCK
|
PBF_READ_AHEAD
);
memset
(
pb
,
0
,
sizeof
(
page_buf_
private_
t
));
memset
(
pb
,
0
,
sizeof
(
page_buf_t
));
atomic_set
(
&
pb
->
pb_hold
,
1
);
init_MUTEX_LOCKED
(
&
pb
->
pb_iodonesema
);
INIT_LIST_HEAD
(
&
pb
->
pb_list
);
INIT_LIST_HEAD
(
&
pb
->
pb_hash_list
);
init_MUTEX_LOCKED
(
&
PBP
(
pb
)
->
pb_sema
);
/* held, no waiters */
init_MUTEX_LOCKED
(
&
pb
->
pb_sema
);
/* held, no waiters */
PB_SET_OWNER
(
pb
);
pb
->
pb_target
=
target
;
pb
->
pb_file_offset
=
range_base
;
...
...
@@ -289,8 +288,8 @@ _pagebuf_initialize(
pb
->
pb_buffer_length
=
pb
->
pb_count_desired
=
range_length
;
pb
->
pb_flags
=
flags
|
PBF_NONE
;
pb
->
pb_bn
=
PAGE_BUF_DADDR_NULL
;
atomic_set
(
&
PBP
(
pb
)
->
pb_pin_count
,
0
);
init_waitqueue_head
(
&
PBP
(
pb
)
->
pb_waiters
);
atomic_set
(
&
pb
->
pb_pin_count
,
0
);
init_waitqueue_head
(
&
pb
->
pb_waiters
);
PB_STATS_INC
(
pbstats
.
pb_create
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
get
),
target
);
...
...
@@ -657,7 +656,7 @@ _pagebuf_find( /* find buffer for block */
* if this does not work then we need to drop the
* spinlock and do a hard attempt on the semaphore.
*/
not_locked
=
down_trylock
(
&
PBP
(
pb
)
->
pb_sema
);
not_locked
=
down_trylock
(
&
pb
->
pb_sema
);
if
(
not_locked
)
{
if
(
!
(
flags
&
PBF_TRYLOCK
))
{
/* wait for buffer ownership */
...
...
@@ -787,28 +786,6 @@ pagebuf_get( /* allocate a buffer */
return
(
pb
);
}
/*
* Create a pagebuf and populate it with pages from the address
* space of the passed in inode.
*/
page_buf_t
*
pagebuf_lookup
(
struct
pb_target
*
target
,
struct
inode
*
inode
,
loff_t
ioff
,
size_t
isize
,
page_buf_flags_t
flags
)
{
page_buf_t
*
pb
=
NULL
;
flags
|=
_PBF_PRIVATE_BH
;
pb
=
pagebuf_allocate
(
flags
);
if
(
pb
)
{
_pagebuf_initialize
(
pb
,
target
,
ioff
,
isize
,
flags
);
}
return
pb
;
}
/*
* If we are not low on memory then do the readahead in a deadlock
* safe manner.
...
...
@@ -906,7 +883,7 @@ pagebuf_associate_memory(
pb
->
pb_locked
=
0
;
pb
->
pb_count_desired
=
pb
->
pb_buffer_length
=
len
;
pb
->
pb_flags
|=
PBF_MAPPED
|
_PBF_PRIVATE_BH
;
pb
->
pb_flags
|=
PBF_MAPPED
;
return
0
;
}
...
...
@@ -952,7 +929,7 @@ pagebuf_get_no_daddr(
/* otherwise pagebuf_free just ignores it */
pb
->
pb_flags
|=
_PBF_MEM_ALLOCATED
;
PB_CLEAR_OWNER
(
pb
);
up
(
&
PBP
(
pb
)
->
pb_sema
);
/* Return unlocked pagebuf */
up
(
&
pb
->
pb_sema
);
/* Return unlocked pagebuf */
PB_TRACE
(
pb
,
PB_TRACE_REC
(
no_daddr
),
rmem
);
...
...
@@ -1070,8 +1047,8 @@ void
pagebuf_pin
(
page_buf_t
*
pb
)
{
atomic_inc
(
&
PBP
(
pb
)
->
pb_pin_count
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
pin
),
PBP
(
pb
)
->
pb_pin_count
.
counter
);
atomic_inc
(
&
pb
->
pb_pin_count
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
pin
),
pb
->
pb_pin_count
.
counter
);
}
/*
...
...
@@ -1085,17 +1062,17 @@ void
pagebuf_unpin
(
page_buf_t
*
pb
)
{
if
(
atomic_dec_and_test
(
&
PBP
(
pb
)
->
pb_pin_count
))
{
wake_up_all
(
&
PBP
(
pb
)
->
pb_waiters
);
if
(
atomic_dec_and_test
(
&
pb
->
pb_pin_count
))
{
wake_up_all
(
&
pb
->
pb_waiters
);
}
PB_TRACE
(
pb
,
PB_TRACE_REC
(
unpin
),
PBP
(
pb
)
->
pb_pin_count
.
counter
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
unpin
),
pb
->
pb_pin_count
.
counter
);
}
int
pagebuf_ispin
(
page_buf_t
*
pb
)
{
return
atomic_read
(
&
PBP
(
pb
)
->
pb_pin_count
);
return
atomic_read
(
&
pb
->
pb_pin_count
);
}
/*
...
...
@@ -1111,19 +1088,19 @@ _pagebuf_wait_unpin(
{
DECLARE_WAITQUEUE
(
wait
,
current
);
if
(
atomic_read
(
&
PBP
(
pb
)
->
pb_pin_count
)
==
0
)
if
(
atomic_read
(
&
pb
->
pb_pin_count
)
==
0
)
return
;
add_wait_queue
(
&
PBP
(
pb
)
->
pb_waiters
,
&
wait
);
add_wait_queue
(
&
pb
->
pb_waiters
,
&
wait
);
for
(;;)
{
current
->
state
=
TASK_UNINTERRUPTIBLE
;
if
(
atomic_read
(
&
PBP
(
pb
)
->
pb_pin_count
)
==
0
)
{
if
(
atomic_read
(
&
pb
->
pb_pin_count
)
==
0
)
{
break
;
}
pagebuf_run_queues
(
pb
);
schedule
();
}
remove_wait_queue
(
&
PBP
(
pb
)
->
pb_waiters
,
&
wait
);
remove_wait_queue
(
&
pb
->
pb_waiters
,
&
wait
);
current
->
state
=
TASK_RUNNING
;
}
...
...
@@ -1502,44 +1479,6 @@ pagebuf_offset(
return
(
caddr_t
)
page_address
(
page
)
+
(
offset
&
(
PAGE_CACHE_SIZE
-
1
));
}
/*
* pagebuf_segment
*
* pagebuf_segment is used to retrieve the various contiguous
* segments of a buffer. The variable addressed by the
* loff_t * should be initialized to 0, and successive
* calls will update to point to the segment following the one
* returned.
*/
STATIC
void
pagebuf_segment
(
page_buf_t
*
pb
,
/* buffer to examine */
loff_t
*
boff_p
,
/* offset in buffer of next */
/* next segment (updated) */
struct
page
**
spage_p
,
/* page (updated) */
/* (NULL if not in page array) */
size_t
*
soff_p
,
/* offset in page (updated) */
size_t
*
ssize_p
)
/* segment length (updated) */
{
loff_t
kpboff
;
/* offset in pagebuf */
int
kpi
;
/* page index in pagebuf */
size_t
slen
;
/* segment length */
kpboff
=
*
boff_p
;
kpi
=
page_buf_btoct
(
kpboff
+
pb
->
pb_offset
);
*
spage_p
=
pb
->
pb_pages
[
kpi
];
*
soff_p
=
page_buf_poff
(
kpboff
+
pb
->
pb_offset
);
slen
=
PAGE_CACHE_SIZE
-
*
soff_p
;
if
(
slen
>
(
pb
->
pb_count_desired
-
kpboff
))
slen
=
(
pb
->
pb_count_desired
-
kpboff
);
*
ssize_p
=
slen
;
*
boff_p
=
*
boff_p
+
slen
;
}
/*
* pagebuf_iomove
*
...
...
@@ -1548,21 +1487,21 @@ pagebuf_segment(
void
pagebuf_iomove
(
page_buf_t
*
pb
,
/* buffer to process */
off
_t
boff
,
/* starting buffer offset */
size
_t
boff
,
/* starting buffer offset */
size_t
bsize
,
/* length to copy */
caddr_t
data
,
/* data address */
page_buf_rw_t
mode
)
/* read/write flag */
{
loff_t
cboff
;
size_t
cpoff
;
size_t
csize
;
size_t
bend
,
cpoff
,
csize
;
struct
page
*
page
;
cboff
=
boff
;
boff
+=
bsize
;
/* last */
bend
=
boff
+
bsize
;
while
(
boff
<
bend
)
{
page
=
pb
->
pb_pages
[
page_buf_btoct
(
boff
+
pb
->
pb_offset
)];
cpoff
=
page_buf_poff
(
boff
+
pb
->
pb_offset
);
csize
=
min_t
(
size_t
,
PAGE_CACHE_SIZE
-
cpoff
,
pb
->
pb_count_desired
-
boff
);
while
(
cboff
<
boff
)
{
pagebuf_segment
(
pb
,
&
cboff
,
&
page
,
&
cpoff
,
&
csize
);
ASSERT
(((
csize
+
cpoff
)
<=
PAGE_CACHE_SIZE
));
switch
(
mode
)
{
...
...
@@ -1576,6 +1515,7 @@ pagebuf_iomove(
memcpy
(
page_address
(
page
)
+
cpoff
,
data
,
csize
);
}
boff
+=
csize
;
data
+=
csize
;
}
}
...
...
@@ -1584,13 +1524,17 @@ pagebuf_iomove(
* Pagebuf delayed write buffer handling
*/
STATIC
int
pbd_active
=
1
;
STATIC
LIST_HEAD
(
pbd_delwrite_queue
);
STATIC
spinlock_t
pbd_delwrite_lock
=
SPIN_LOCK_UNLOCKED
;
void
pagebuf_delwri_queue
(
page_buf_t
*
pb
,
int
unlock
)
{
PB_TRACE
(
pb
,
PB_TRACE_REC
(
delwri_q
),
unlock
);
spin_lock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_lock
(
&
pb
d
_delwrite_lock
);
/* If already in the queue, dequeue and place at tail */
if
(
!
list_empty
(
&
pb
->
pb_list
))
{
if
(
unlock
)
{
...
...
@@ -1599,9 +1543,9 @@ pagebuf_delwri_queue(
list_del
(
&
pb
->
pb_list
);
}
list_add_tail
(
&
pb
->
pb_list
,
&
pb
_daemon
->
pb_delwrite_l
);
PBP
(
pb
)
->
pb_flushtime
=
jiffies
+
pb_params
.
p_un
.
age_buffer
;
spin_unlock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
list_add_tail
(
&
pb
->
pb_list
,
&
pb
d_delwrite_queue
);
pb
->
pb_flushtime
=
jiffies
+
pb_params
.
p_un
.
age_buffer
;
spin_unlock
(
&
pb
d
_delwrite_lock
);
if
(
unlock
&&
(
pb
->
pb_flags
&
_PBF_LOCKABLE
))
{
pagebuf_unlock
(
pb
);
...
...
@@ -1613,10 +1557,10 @@ pagebuf_delwri_dequeue(
page_buf_t
*
pb
)
{
PB_TRACE
(
pb
,
PB_TRACE_REC
(
delwri_uq
),
0
);
spin_lock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_lock
(
&
pb
d
_delwrite_lock
);
list_del_init
(
&
pb
->
pb_list
);
pb
->
pb_flags
&=
~
PBF_DELWRI
;
spin_unlock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_unlock
(
&
pb
d
_delwrite_lock
);
}
/* Defines for pagebuf daemon */
...
...
@@ -1659,7 +1603,7 @@ pagebuf_daemon(
INIT_LIST_HEAD
(
&
tmp
);
do
{
if
(
pb
_daemon
->
active
==
1
)
{
if
(
pb
d_
active
==
1
)
{
del_timer
(
&
pb_daemon_timer
);
pb_daemon_timer
.
expires
=
jiffies
+
pb_params
.
p_un
.
flush_interval
;
...
...
@@ -1667,14 +1611,14 @@ pagebuf_daemon(
interruptible_sleep_on
(
&
pbd_waitq
);
}
if
(
pb
_daemon
->
active
==
0
)
{
if
(
pb
d_
active
==
0
)
{
del_timer
(
&
pb_daemon_timer
);
}
spin_lock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_lock
(
&
pb
d
_delwrite_lock
);
count
=
0
;
list_for_each_safe
(
curr
,
next
,
&
pb
_daemon
->
pb_delwrite_l
)
{
list_for_each_safe
(
curr
,
next
,
&
pb
d_delwrite_queue
)
{
pb
=
list_entry
(
curr
,
page_buf_t
,
pb_list
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
walkq1
),
pagebuf_ispin
(
pb
));
...
...
@@ -1683,8 +1627,8 @@ pagebuf_daemon(
(((
pb
->
pb_flags
&
_PBF_LOCKABLE
)
==
0
)
||
!
pagebuf_cond_lock
(
pb
)))
{
if
(
!
force_flush
&&
time_before
(
jiffies
,
PBP
(
pb
)
->
pb_flushtime
))
{
if
(
!
force_flush
&&
time_before
(
jiffies
,
pb
->
pb_flushtime
))
{
pagebuf_unlock
(
pb
);
break
;
}
...
...
@@ -1696,7 +1640,7 @@ pagebuf_daemon(
}
}
spin_unlock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_unlock
(
&
pb
d
_delwrite_lock
);
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
page_buf_t
,
pb_list
);
list_del_init
(
&
pb
->
pb_list
);
...
...
@@ -1712,9 +1656,9 @@ pagebuf_daemon(
pagebuf_run_queues
(
NULL
);
force_flush
=
0
;
}
while
(
pb
_daemon
->
active
==
1
);
}
while
(
pb
d_
active
==
1
);
pb
_daemon
->
active
=
-
1
;
pb
d_
active
=
-
1
;
wake_up_interruptible
(
&
pbd_waitq
);
return
0
;
...
...
@@ -1730,10 +1674,10 @@ pagebuf_delwri_flush(
struct
list_head
*
curr
,
*
next
,
tmp
;
int
pincount
=
0
;
spin_lock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_lock
(
&
pb
d
_delwrite_lock
);
INIT_LIST_HEAD
(
&
tmp
);
list_for_each_safe
(
curr
,
next
,
&
pb
_daemon
->
pb_delwrite_l
)
{
list_for_each_safe
(
curr
,
next
,
&
pb
d_delwrite_queue
)
{
pb
=
list_entry
(
curr
,
page_buf_t
,
pb_list
);
/*
...
...
@@ -1764,7 +1708,7 @@ pagebuf_delwri_flush(
pb
->
pb_flags
&=
~
PBF_ASYNC
;
}
spin_unlock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_unlock
(
&
pb
d
_delwrite_lock
);
if
((
flags
&
PBDF_TRYLOCK
)
==
0
)
{
pagebuf_lock
(
pb
);
...
...
@@ -1775,10 +1719,10 @@ pagebuf_delwri_flush(
__pagebuf_iorequest
(
pb
);
spin_lock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_lock
(
&
pb
d
_delwrite_lock
);
}
spin_unlock
(
&
pb
_daemon
->
pb
_delwrite_lock
);
spin_unlock
(
&
pb
d
_delwrite_lock
);
pagebuf_run_queues
(
NULL
);
...
...
@@ -1802,26 +1746,17 @@ pagebuf_delwri_flush(
STATIC
int
pagebuf_daemon_start
(
void
)
{
if
(
!
pb_daemon
)
{
pb_daemon
=
(
pagebuf_daemon_t
*
)
kmalloc
(
sizeof
(
pagebuf_daemon_t
),
GFP_KERNEL
);
if
(
!
pb_daemon
)
{
return
-
1
;
/* error */
}
int
rval
;
pb_daemon
->
active
=
1
;
pb_daemon
->
pb_delwrite_lock
=
SPIN_LOCK_UNLOCKED
;
INIT_LIST_HEAD
(
&
pb_daemon
->
pb_delwrite_l
);
pagebuf_workqueue
=
create_workqueue
(
"pagebuf"
);
if
(
!
pagebuf_workqueue
)
return
-
ENOMEM
;
kernel_thread
(
pagebuf_daemon
,
(
void
*
)
pb_daemon
,
CLONE_FS
|
CLONE_FILES
|
CLONE_VM
);
rval
=
kernel_thread
(
pagebuf_daemon
,
NULL
,
CLONE_FS
|
CLONE_FILES
);
if
(
rval
<
0
)
destroy_workqueue
(
pagebuf_workqueue
);
pagebuf_workqueue
=
create_workqueue
(
"pagebuf"
);
if
(
!
pagebuf_workqueue
)
return
-
1
;
}
return
0
;
return
rval
;
}
/*
...
...
@@ -1832,19 +1767,10 @@ pagebuf_daemon_start(void)
STATIC
void
pagebuf_daemon_stop
(
void
)
{
if
(
pb_daemon
)
{
destroy_workqueue
(
pagebuf_workqueue
);
pb_daemon
->
active
=
0
;
wake_up_interruptible
(
&
pbd_waitq
);
while
(
pb_daemon
->
active
==
0
)
{
interruptible_sleep_on
(
&
pbd_waitq
);
}
kfree
(
pb_daemon
);
pb_daemon
=
NULL
;
}
pbd_active
=
0
;
wake_up_interruptible
(
&
pbd_waitq
);
wait_event_interruptible
(
pbd_waitq
,
pbd_active
);
destroy_workqueue
(
pagebuf_workqueue
);
}
...
...
@@ -1965,8 +1891,7 @@ pagebuf_init(void)
"fs/pagebuf/stat"
,
0
,
0
,
pagebuf_readstats
,
NULL
);
#endif
pagebuf_cache
=
kmem_cache_create
(
"page_buf_t"
,
sizeof
(
page_buf_private_t
),
0
,
pagebuf_cache
=
kmem_cache_create
(
"page_buf_t"
,
sizeof
(
page_buf_t
),
0
,
SLAB_HWCACHE_ALIGN
,
NULL
,
NULL
);
if
(
pagebuf_cache
==
NULL
)
{
printk
(
"pagebuf: couldn't init pagebuf cache
\n
"
);
...
...
fs/xfs/pagebuf/page_buf.h
View file @
2c4d3eb6
...
...
@@ -109,7 +109,6 @@ typedef enum page_buf_flags_e { /* pb_flags values */
/* flags used only internally */
_PBF_LOCKABLE
=
(
1
<<
19
),
/* page_buf_t may be locked */
_PBF_PRIVATE_BH
=
(
1
<<
20
),
/* do not use public buffer heads */
_PBF_ALL_PAGES_MAPPED
=
(
1
<<
21
),
/* all pages in rage are mapped */
_PBF_ADDR_ALLOCATED
=
(
1
<<
22
),
...
...
@@ -195,6 +194,10 @@ typedef int (*page_buf_bdstrat_t)(struct page_buf_s *);
#define PB_PAGES 4
typedef
struct
page_buf_s
{
struct
semaphore
pb_sema
;
/* semaphore for lockables */
unsigned
long
pb_flushtime
;
/* time to flush pagebuf */
atomic_t
pb_pin_count
;
/* pin count */
wait_queue_head_t
pb_waiters
;
/* unpin waiters */
struct
list_head
pb_list
;
page_buf_flags_t
pb_flags
;
/* status flags */
struct
list_head
pb_hash_list
;
...
...
@@ -221,6 +224,9 @@ typedef struct page_buf_s {
unsigned
char
pb_hash_index
;
/* hash table index */
struct
page
**
pb_pages
;
/* array of page pointers */
struct
page
*
pb_page_array
[
PB_PAGES
];
/* inline pages */
#ifdef PAGEBUF_LOCK_TRACKING
int
pb_last_holder
;
#endif
}
page_buf_t
;
...
...
@@ -244,14 +250,6 @@ extern page_buf_t *pagebuf_get( /* allocate a buffer */
page_buf_flags_t
);
/* PBF_LOCK, PBF_READ, */
/* PBF_ASYNC */
extern
page_buf_t
*
pagebuf_lookup
(
struct
pb_target
*
,
struct
inode
*
,
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
);
/* PBF_READ, PBF_WRITE, */
/* PBF_FORCEIO, _PBF_LOCKABLE */
extern
page_buf_t
*
pagebuf_get_empty
(
/* allocate pagebuf struct with */
/* no memory or disk address */
struct
pb_target
*
);
/* mount point "fake" inode */
...
...
@@ -340,7 +338,7 @@ extern caddr_t pagebuf_offset(page_buf_t *, off_t);
extern
void
pagebuf_iomove
(
/* move data in/out of pagebuf */
page_buf_t
*
,
/* buffer to manipulate */
off
_t
,
/* starting buffer offset */
size
_t
,
/* starting buffer offset */
size_t
,
/* length in buffer */
caddr_t
,
/* data pointer */
page_buf_rw_t
);
/* direction */
...
...
fs/xfs/pagebuf/page_buf_internal.h
View file @
2c4d3eb6
...
...
@@ -48,24 +48,10 @@
#define page_has_buffers(page) ((page)->buffers)
#endif
typedef
struct
page_buf_private_s
{
page_buf_t
pb_common
;
/* public part of structure */
struct
semaphore
pb_sema
;
/* semaphore for lockables */
unsigned
long
pb_flushtime
;
/* time to flush pagebuf */
atomic_t
pb_pin_count
;
/* pin count */
wait_queue_head_t
pb_waiters
;
/* unpin waiters */
#ifdef PAGEBUF_LOCK_TRACKING
int
pb_last_holder
;
#endif
}
page_buf_private_t
;
#define PBC(pb) (&((pb)->pb_common))
#define PBP(pb) ((page_buf_private_t *) (pb))
#ifdef PAGEBUF_LOCK_TRACKING
#define PB_SET_OWNER(pb) (PBP(pb)->pb_last_holder = current->pid)
#define PB_CLEAR_OWNER(pb) (PBP(pb)->pb_last_holder = -1)
#define PB_GET_OWNER(pb) (PBP(pb)->pb_last_holder)
#define PB_SET_OWNER(pb) (pb->pb_last_holder = current->pid)
#define PB_CLEAR_OWNER(pb) (pb->pb_last_holder = -1)
#define PB_GET_OWNER(pb) (pb->pb_last_holder)
#else
#define PB_SET_OWNER(pb)
#define PB_CLEAR_OWNER(pb)
...
...
@@ -95,12 +81,6 @@ struct pagebuf_trace_buf {
#define PB_TRACE_BUFSIZE 1024
#define CIRC_INC(i) (((i) + 1) & (PB_TRACE_BUFSIZE - 1))
typedef
struct
pagebuf_daemon
{
int
active
;
spinlock_t
pb_delwrite_lock
;
struct
list_head
pb_delwrite_l
;
}
pagebuf_daemon_t
;
/*
* Tunable pagebuf parameters
*/
...
...
fs/xfs/pagebuf/page_buf_locking.c
View file @
2c4d3eb6
...
...
@@ -75,7 +75,7 @@ pagebuf_cond_lock( /* lock buffer, if not locked */
ASSERT
(
pb
->
pb_flags
&
_PBF_LOCKABLE
);
locked
=
down_trylock
(
&
PBP
(
pb
)
->
pb_sema
)
==
0
;
locked
=
down_trylock
(
&
pb
->
pb_sema
)
==
0
;
if
(
locked
)
{
PB_SET_OWNER
(
pb
);
}
...
...
@@ -95,7 +95,7 @@ pagebuf_lock_value(
page_buf_t
*
pb
)
{
ASSERT
(
pb
->
pb_flags
&
_PBF_LOCKABLE
);
return
(
atomic_read
(
&
PBP
(
pb
)
->
pb_sema
.
count
));
return
(
atomic_read
(
&
pb
->
pb_sema
.
count
));
}
/*
...
...
@@ -114,7 +114,7 @@ pagebuf_lock(
PB_TRACE
(
pb
,
PB_TRACE_REC
(
lock
),
0
);
pagebuf_run_queues
(
pb
);
down
(
&
PBP
(
pb
)
->
pb_sema
);
down
(
&
pb
->
pb_sema
);
PB_SET_OWNER
(
pb
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
locked
),
0
);
return
0
;
...
...
@@ -133,6 +133,6 @@ pagebuf_unlock( /* unlock buffer */
{
ASSERT
(
pb
->
pb_flags
&
_PBF_LOCKABLE
);
PB_CLEAR_OWNER
(
pb
);
up
(
&
PBP
(
pb
)
->
pb_sema
);
up
(
&
pb
->
pb_sema
);
PB_TRACE
(
pb
,
PB_TRACE_REC
(
unlock
),
0
);
}
fs/xfs/xfs_mount.h
View file @
2c4d3eb6
...
...
@@ -428,11 +428,6 @@ int xfs_syncsub(xfs_mount_t *, int, int, int *);
void
xfs_initialize_perag
(
xfs_mount_t
*
,
int
);
void
xfs_xlatesb
(
void
*
,
struct
xfs_sb
*
,
int
,
xfs_arch_t
,
__int64_t
);
int
xfs_blkdev_get
(
const
char
*
,
struct
block_device
**
);
void
xfs_blkdev_put
(
struct
block_device
*
);
struct
xfs_buftarg
*
xfs_alloc_buftarg
(
struct
block_device
*
);
void
xfs_free_buftarg
(
struct
xfs_buftarg
*
);
/*
* Flags for freeze operations.
*/
...
...
fs/xfs/xfs_vfsops.c
View file @
2c4d3eb6
...
...
@@ -381,8 +381,9 @@ xfs_finish_flags(
* (2) logical volume with data and log subvolumes.
* (3) logical volume with data, log, and realtime subvolumes.
*
* The Linux VFS took care of finding and opening the data volume for
* us. We have to handle the other two (if present) here.
* We only have to handle opening the log and realtime volumes here if
* they are present. The data subvolume has already been opened by
* get_sb_bdev() and is stored in vfsp->vfs_super->s_bdev.
*/
STATIC
int
xfs_mount
(
...
...
@@ -398,19 +399,24 @@ xfs_mount(
ddev
=
vfsp
->
vfs_super
->
s_bdev
;
logdev
=
rtdev
=
NULL
;
/*
* Allocate VFS private data (xfs mount structure).
*/
mp
=
xfs_mount_init
();
/*
* Open real time and log devices - order is important.
*/
if
(
args
->
logname
[
0
])
{
error
=
xfs_blkdev_get
(
args
->
logname
,
&
logdev
);
error
=
xfs_blkdev_get
(
mp
,
args
->
logname
,
&
logdev
);
if
(
error
)
return
error
;
goto
free_mp
;
}
if
(
args
->
rtname
[
0
])
{
error
=
xfs_blkdev_get
(
args
->
rtname
,
&
rtdev
);
error
=
xfs_blkdev_get
(
mp
,
args
->
rtname
,
&
rtdev
);
if
(
error
)
{
xfs_blkdev_put
(
logdev
);
return
error
;
goto
free_mp
;
}
if
(
rtdev
==
ddev
||
rtdev
==
logdev
)
{
...
...
@@ -418,15 +424,11 @@ xfs_mount(
"XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."
);
xfs_blkdev_put
(
logdev
);
xfs_blkdev_put
(
rtdev
);
return
EINVAL
;
error
=
EINVAL
;
goto
free_mp
;
}
}
/*
* Allocate VFS private data (xfs mount structure).
*/
mp
=
xfs_mount_init
();
vfs_insertbhv
(
vfsp
,
&
mp
->
m_bhv
,
&
xfs_vfsops
,
mp
);
mp
->
m_ddev_targp
=
xfs_alloc_buftarg
(
ddev
);
...
...
@@ -459,7 +461,7 @@ xfs_mount(
xfs_size_buftarg
(
mp
->
m_logdev_targp
,
mp
->
m_sb
.
sb_blocksize
,
ss
);
}
if
(
rtdev
)
xfs_size_buftarg
(
mp
->
m_
log
dev_targp
,
mp
->
m_sb
.
sb_blocksize
,
xfs_size_buftarg
(
mp
->
m_
rt
dev_targp
,
mp
->
m_sb
.
sb_blocksize
,
mp
->
m_sb
.
sb_blocksize
);
error
=
xfs_mountfs
(
vfsp
,
mp
,
ddev
->
bd_dev
,
flags
);
...
...
@@ -476,6 +478,8 @@ xfs_mount(
xfs_binval
(
mp
->
m_rtdev_targp
);
}
xfs_unmountfs_close
(
mp
,
NULL
);
free_mp:
xfs_mount_free
(
mp
,
1
);
return
error
;
}
...
...
fs/xfs/xfs_vnodeops.c
View file @
2c4d3eb6
...
...
@@ -4870,6 +4870,7 @@ vnodeops_t xfs_vnodeops = {
BHV_IDENTITY_INIT
(
VN_BHV_XFS
,
VNODE_POSITION_XFS
),
.
vop_open
=
xfs_open
,
.
vop_read
=
xfs_read
,
.
vop_sendfile
=
xfs_sendfile
,
.
vop_write
=
xfs_write
,
.
vop_ioctl
=
xfs_ioctl
,
.
vop_getattr
=
xfs_getattr
,
...
...
fs/xfs/xfsidbg.c
View file @
2c4d3eb6
...
...
@@ -1793,7 +1793,7 @@ kdbm_pb_flags(int argc, const char **argv, const char **envp, struct pt_regs *re
static
int
kdbm_pb
(
int
argc
,
const
char
**
argv
,
const
char
**
envp
,
struct
pt_regs
*
regs
)
{
page_buf_
private_
t
bp
;
page_buf_t
bp
;
unsigned
long
addr
;
long
offset
=
0
;
int
nextarg
;
...
...
@@ -1808,43 +1808,43 @@ kdbm_pb(int argc, const char **argv, const char **envp, struct pt_regs *regs)
return
diag
;
kdb_printf
(
"page_buf_t at 0x%lx
\n
"
,
addr
);
kdb_printf
(
" pb_flags %s
\n
"
,
pb_flags
(
bp
.
pb_
common
.
pb_
flags
));
kdb_printf
(
" pb_flags %s
\n
"
,
pb_flags
(
bp
.
pb_flags
));
kdb_printf
(
" pb_target 0x%p pb_hold %d pb_next 0x%p pb_prev 0x%p
\n
"
,
bp
.
pb_
common
.
pb_target
,
bp
.
pb_common
.
pb_hold
.
counter
,
bp
.
pb_
common
.
pb_list
.
next
,
bp
.
pb_common
.
pb_list
.
prev
);
bp
.
pb_
target
,
bp
.
pb_hold
.
counter
,
bp
.
pb_
list
.
next
,
bp
.
pb_list
.
prev
);
kdb_printf
(
" pb_hash_index %d pb_hash_next 0x%p pb_hash_prev 0x%p
\n
"
,
bp
.
pb_
common
.
pb_
hash_index
,
bp
.
pb_
common
.
pb_
hash_list
.
next
,
bp
.
pb_
common
.
pb_
hash_list
.
prev
);
bp
.
pb_hash_index
,
bp
.
pb_hash_list
.
next
,
bp
.
pb_hash_list
.
prev
);
kdb_printf
(
" pb_file_offset 0x%llx pb_buffer_length 0x%llx pb_addr 0x%p
\n
"
,
(
unsigned
long
long
)
bp
.
pb_
common
.
pb_
file_offset
,
(
unsigned
long
long
)
bp
.
pb_
common
.
pb_
buffer_length
,
bp
.
pb_
common
.
pb_
addr
);
(
unsigned
long
long
)
bp
.
pb_file_offset
,
(
unsigned
long
long
)
bp
.
pb_buffer_length
,
bp
.
pb_addr
);
kdb_printf
(
" pb_bn 0x%Lx pb_count_desired 0x%lx
\n
"
,
bp
.
pb_
common
.
pb_
bn
,
(
unsigned
long
)
bp
.
pb_co
mmon
.
pb_co
unt_desired
);
bp
.
pb_bn
,
(
unsigned
long
)
bp
.
pb_count_desired
);
kdb_printf
(
" pb_io_remaining %d pb_error %u
\n
"
,
bp
.
pb_
common
.
pb_
io_remaining
.
counter
,
bp
.
pb_
common
.
pb_
error
);
bp
.
pb_io_remaining
.
counter
,
bp
.
pb_error
);
kdb_printf
(
" pb_page_count %u pb_offset 0x%x pb_pages 0x%p
\n
"
,
bp
.
pb_
common
.
pb_page_count
,
bp
.
pb_common
.
pb_offset
,
bp
.
pb_
common
.
pb_
pages
);
bp
.
pb_
page_count
,
bp
.
pb_offset
,
bp
.
pb_pages
);
#ifdef PAGEBUF_LOCK_TRACKING
kdb_printf
(
" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d) last holder %d
\n
"
,
bp
.
pb_
common
.
pb_
iodonesema
.
count
.
counter
,
bp
.
pb_
common
.
pb_
iodonesema
.
sleepers
,
bp
.
pb_iodonesema
.
count
.
counter
,
bp
.
pb_iodonesema
.
sleepers
,
bp
.
pb_sema
.
count
.
counter
,
bp
.
pb_sema
.
sleepers
,
bp
.
pb_pin_count
.
counter
,
bp
.
pb_last_holder
);
#else
kdb_printf
(
" pb_iodonesema (%d,%d) pb_sema (%d,%d) pincount (%d)
\n
"
,
bp
.
pb_
common
.
pb_
iodonesema
.
count
.
counter
,
bp
.
pb_
common
.
pb_
iodonesema
.
sleepers
,
bp
.
pb_iodonesema
.
count
.
counter
,
bp
.
pb_iodonesema
.
sleepers
,
bp
.
pb_sema
.
count
.
counter
,
bp
.
pb_sema
.
sleepers
,
bp
.
pb_pin_count
.
counter
);
#endif
if
(
bp
.
pb_
common
.
pb_fspriv
||
bp
.
pb_common
.
pb_fspriv2
)
{
if
(
bp
.
pb_
fspriv
||
bp
.
pb_fspriv2
)
{
kdb_printf
(
"pb_fspriv 0x%p pb_fspriv2 0x%p
\n
"
,
bp
.
pb_
common
.
pb_fspriv
,
bp
.
pb_common
.
pb_fspriv2
);
bp
.
pb_
fspriv
,
bp
.
pb_fspriv2
);
}
return
0
;
...
...
include/linux/fs.h
View file @
2c4d3eb6
...
...
@@ -1098,15 +1098,20 @@ extern int bd_claim(struct block_device *, void *);
extern
void
bd_release
(
struct
block_device
*
);
extern
void
blk_run_queues
(
void
);
/* fs/
devices
.c */
/* fs/
char_dev
.c */
extern
int
register_chrdev
(
unsigned
int
,
const
char
*
,
struct
file_operations
*
);
extern
int
unregister_chrdev
(
unsigned
int
,
const
char
*
);
extern
int
chrdev_open
(
struct
inode
*
,
struct
file
*
);
/* fs/block_dev.c */
extern
const
char
*
__bdevname
(
dev_t
);
extern
inline
const
char
*
bdevname
(
struct
block_device
*
bdev
)
{
return
__bdevname
(
bdev
->
bd_dev
);
}
extern
struct
block_device
*
open_bdev_excl
(
const
char
*
,
int
,
int
,
void
*
);
extern
void
close_bdev_excl
(
struct
block_device
*
,
int
);
extern
const
char
*
cdevname
(
kdev_t
);
extern
const
char
*
kdevname
(
kdev_t
);
extern
void
init_special_inode
(
struct
inode
*
,
umode_t
,
dev_t
);
...
...
kernel/ksyms.c
View file @
2c4d3eb6
...
...
@@ -203,6 +203,8 @@ EXPORT_SYMBOL(bdget);
EXPORT_SYMBOL
(
bdput
);
EXPORT_SYMBOL
(
bd_claim
);
EXPORT_SYMBOL
(
bd_release
);
EXPORT_SYMBOL
(
open_bdev_excl
);
EXPORT_SYMBOL
(
close_bdev_excl
);
EXPORT_SYMBOL
(
__brelse
);
EXPORT_SYMBOL
(
__bforget
);
EXPORT_SYMBOL
(
ll_rw_block
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment