Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
20d7d526
Commit
20d7d526
authored
May 18, 2003
by
Christoph Hellwig
Browse files
Options
Browse Files
Download
Plain Diff
Merge
parents
7196d9df
ef2ead69
Changes
29
Hide whitespace changes
Inline
Side-by-side
Showing
29 changed files
with
1777 additions
and
1361 deletions
+1777
-1361
fs/xfs/Makefile
fs/xfs/Makefile
+0
-1
fs/xfs/linux/xfs_globals.c
fs/xfs/linux/xfs_globals.c
+1
-1
fs/xfs/linux/xfs_iops.c
fs/xfs/linux/xfs_iops.c
+0
-11
fs/xfs/linux/xfs_lrw.c
fs/xfs/linux/xfs_lrw.c
+7
-13
fs/xfs/linux/xfs_super.c
fs/xfs/linux/xfs_super.c
+88
-21
fs/xfs/linux/xfs_super.h
fs/xfs/linux/xfs_super.h
+2
-1
fs/xfs/linux/xfs_syncd.c
fs/xfs/linux/xfs_syncd.c
+86
-0
fs/xfs/linux/xfs_sysctl.c
fs/xfs/linux/xfs_sysctl.c
+6
-2
fs/xfs/linux/xfs_sysctl.h
fs/xfs/linux/xfs_sysctl.h
+2
-0
fs/xfs/linux/xfs_vfs.c
fs/xfs/linux/xfs_vfs.c
+1
-0
fs/xfs/linux/xfs_vfs.h
fs/xfs/linux/xfs_vfs.h
+5
-1
fs/xfs/linux/xfs_vnode.h
fs/xfs/linux/xfs_vnode.h
+1
-2
fs/xfs/pagebuf/page_buf.c
fs/xfs/pagebuf/page_buf.c
+9
-18
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm.c
+17
-11
fs/xfs/support/kmem.h
fs/xfs/support/kmem.h
+101
-14
fs/xfs/xfs_ag.h
fs/xfs/xfs_ag.h
+2
-3
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf.h
+5
-6
fs/xfs/xfs_iget.c
fs/xfs/xfs_iget.c
+11
-1
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.c
+15
-0
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode.h
+2
-1
fs/xfs/xfs_log.c
fs/xfs/xfs_log.c
+30
-33
fs/xfs/xfs_log.h
fs/xfs/xfs_log.h
+1
-2
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_priv.h
+21
-14
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_recover.c
+1235
-1082
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.c
+21
-19
fs/xfs/xfs_mount.h
fs/xfs/xfs_mount.h
+3
-2
fs/xfs/xfs_vfsops.c
fs/xfs/xfs_vfsops.c
+70
-66
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.c
+31
-33
fs/xfs/xfsidbg.c
fs/xfs/xfsidbg.c
+4
-3
No files found.
fs/xfs/Makefile
View file @
20d7d526
...
...
@@ -132,7 +132,6 @@ xfs-y += $(addprefix linux/, \
# Objects in support/
xfs-y
+=
$(
addprefix
support/,
\
debug.o
\
kmem.o
\
ktrace.o
\
move.o
\
mrlock.o
\
...
...
fs/xfs/linux/xfs_globals.c
View file @
20d7d526
...
...
@@ -48,7 +48,7 @@ unsigned long xfs_physmem;
* Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n,
* other XFS code uses these values.
*/
xfs_param_t
xfs_params
=
{
0
,
1
,
0
,
0
,
0
,
3
};
xfs_param_t
xfs_params
=
{
0
,
1
,
0
,
0
,
0
,
3
,
30
*
HZ
};
/*
* Global system credential structure.
...
...
fs/xfs/linux/xfs_iops.c
View file @
20d7d526
...
...
@@ -152,8 +152,6 @@ linvfs_mknod(
ip
->
i_rdev
=
to_kdev_t
(
rdev
);
validate_fields
(
dir
);
d_instantiate
(
dentry
,
ip
);
mark_inode_dirty_sync
(
ip
);
mark_inode_dirty_sync
(
dir
);
}
if
(
!
error
&&
have_default_acl
)
{
...
...
@@ -240,7 +238,6 @@ linvfs_link(
VN_HOLD
(
vp
);
validate_fields
(
ip
);
d_instantiate
(
dentry
,
ip
);
mark_inode_dirty_sync
(
ip
);
}
return
-
error
;
}
...
...
@@ -261,8 +258,6 @@ linvfs_unlink(
if
(
!
error
)
{
validate_fields
(
dir
);
/* For size only */
validate_fields
(
inode
);
mark_inode_dirty_sync
(
inode
);
mark_inode_dirty_sync
(
dir
);
}
return
-
error
;
...
...
@@ -296,8 +291,6 @@ linvfs_symlink(
d_instantiate
(
dentry
,
ip
);
validate_fields
(
dir
);
validate_fields
(
ip
);
/* size needs update */
mark_inode_dirty_sync
(
ip
);
mark_inode_dirty_sync
(
dir
);
}
return
-
error
;
}
...
...
@@ -315,8 +308,6 @@ linvfs_rmdir(
if
(
!
error
)
{
validate_fields
(
inode
);
validate_fields
(
dir
);
mark_inode_dirty_sync
(
inode
);
mark_inode_dirty_sync
(
dir
);
}
return
-
error
;
}
...
...
@@ -346,7 +337,6 @@ linvfs_rename(
validate_fields
(
odir
);
if
(
ndir
!=
odir
)
validate_fields
(
ndir
);
mark_inode_dirty
(
ndir
);
return
0
;
}
...
...
@@ -520,7 +510,6 @@ linvfs_setattr(
if
(
!
error
)
{
vn_revalidate
(
vp
);
mark_inode_dirty_sync
(
inode
);
}
return
error
;
}
...
...
fs/xfs/linux/xfs_lrw.c
View file @
20d7d526
...
...
@@ -887,29 +887,23 @@ xfsbdstrat(
return
(
xfs_bioerror_relse
(
bp
));
}
void
XFS_bflush
(
xfs_buftarg_t
*
target
)
{
pagebuf_delwri_flush
(
target
,
PBDF_WAIT
,
NULL
);
}
/*
* If the underlying (
log or data
) device is readonly, there are some
* If the underlying (
data/log/rt
) device is readonly, there are some
* operations that cannot proceed.
*/
int
xfs_dev_is_read_only
(
xfs_mount_t
*
mp
,
char
*
message
)
xfs_dev_is_read_only
(
xfs_mount_t
*
mp
,
char
*
message
)
{
if
(
bdev_read_only
(
mp
->
m_ddev_targp
->
pbr_bdev
)
||
bdev_read_only
(
mp
->
m_logdev_targp
->
pbr_bdev
)
||
(
mp
->
m_rtdev_targp
&&
bdev_read_only
(
mp
->
m_rtdev_targp
->
pbr_bdev
)))
{
if
(
xfs_readonly_buftarg
(
mp
->
m_ddev_targp
)
||
xfs_readonly_buftarg
(
mp
->
m_logdev_targp
)
||
(
mp
->
m_rtdev_targp
&&
xfs_readonly_buftarg
(
mp
->
m_rtdev_targp
)))
{
cmn_err
(
CE_NOTE
,
"XFS: %s required on read-only device."
,
message
);
cmn_err
(
CE_NOTE
,
"XFS: write access unavailable, cannot proceed."
);
return
EROFS
;
}
return
0
;
}
fs/xfs/linux/xfs_super.c
View file @
20d7d526
...
...
@@ -217,13 +217,27 @@ xfs_blkdev_put(
}
void
xfs_f
ree
_buftarg
(
xfs_f
lush
_buftarg
(
xfs_buftarg_t
*
btp
)
{
pagebuf_delwri_flush
(
btp
,
PBDF_WAIT
,
NULL
);
}
void
xfs_free_buftarg
(
xfs_buftarg_t
*
btp
)
{
xfs_flush_buftarg
(
btp
);
kmem_free
(
btp
,
sizeof
(
*
btp
));
}
int
xfs_readonly_buftarg
(
xfs_buftarg_t
*
btp
)
{
return
bdev_read_only
(
btp
->
pbr_bdev
);
}
void
xfs_relse_buftarg
(
xfs_buftarg_t
*
btp
)
...
...
@@ -331,9 +345,10 @@ destroy_inodecache( void )
}
/*
* We do not actually write the inode here, just mark the
* super block dirty so that sync_supers calls us and
* forces the flush.
* Attempt to flush the inode, this will actually fail
* if the inode is pinned, but we dirty the inode again
* at the point when it is unpinned after a log write,
* since this is when the inode itself becomes flushable.
*/
STATIC
void
linvfs_write_inode
(
...
...
@@ -348,8 +363,6 @@ linvfs_write_inode(
if
(
sync
)
flags
|=
FLUSH_SYNC
;
VOP_IFLUSH
(
vp
,
flags
,
error
);
if
(
error
==
EAGAIN
)
inode
->
i_sb
->
s_dirt
=
1
;
}
}
...
...
@@ -369,6 +382,61 @@ linvfs_clear_inode(
}
}
#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
STATIC
int
syncd
(
void
*
arg
)
{
vfs_t
*
vfsp
=
(
vfs_t
*
)
arg
;
int
error
;
daemonize
(
"xfs_syncd"
);
vfsp
->
vfs_sync_task
=
current
;
wmb
();
wake_up
(
&
vfsp
->
vfs_wait_sync_task
);
for
(;;)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
schedule_timeout
(
xfs_params
.
sync_interval
);
if
(
vfsp
->
vfs_flag
&
VFS_UMOUNT
)
break
;
if
(
vfsp
->
vfs_flag
&
VFS_RDONLY
)
continue
;
VFS_SYNC
(
vfsp
,
SYNCD_FLAGS
,
NULL
,
error
);
}
vfsp
->
vfs_sync_task
=
NULL
;
wmb
();
wake_up
(
&
vfsp
->
vfs_wait_sync_task
);
return
0
;
}
STATIC
int
linvfs_start_syncd
(
vfs_t
*
vfsp
)
{
int
pid
;
pid
=
kernel_thread
(
syncd
,
(
void
*
)
vfsp
,
CLONE_VM
|
CLONE_FS
|
CLONE_FILES
);
if
(
pid
<
0
)
return
pid
;
wait_event
(
vfsp
->
vfs_wait_sync_task
,
vfsp
->
vfs_sync_task
);
return
0
;
}
STATIC
void
linvfs_stop_syncd
(
vfs_t
*
vfsp
)
{
vfsp
->
vfs_flag
|=
VFS_UMOUNT
;
wmb
();
wake_up_process
(
vfsp
->
vfs_sync_task
);
wait_event
(
vfsp
->
vfs_wait_sync_task
,
!
vfsp
->
vfs_sync_task
);
}
STATIC
void
linvfs_put_super
(
struct
super_block
*
sb
)
...
...
@@ -376,8 +444,9 @@ linvfs_put_super(
vfs_t
*
vfsp
=
LINVFS_GET_VFS
(
sb
);
int
error
;
linvfs_stop_syncd
(
vfsp
);
VFS_SYNC
(
vfsp
,
SYNC_ATTR
|
SYNC_DELWRI
,
NULL
,
error
);
if
(
error
==
0
)
if
(
!
error
)
VFS_UNMOUNT
(
vfsp
,
0
,
NULL
,
error
);
if
(
error
)
{
printk
(
"XFS unmount got error %d
\n
"
,
error
);
...
...
@@ -395,10 +464,13 @@ linvfs_write_super(
vfs_t
*
vfsp
=
LINVFS_GET_VFS
(
sb
);
int
error
;
sb
->
s_dirt
=
0
;
if
(
sb
->
s_flags
&
MS_RDONLY
)
if
(
sb
->
s_flags
&
MS_RDONLY
)
{
sb
->
s_dirt
=
0
;
/* paranoia */
return
;
VFS_SYNC
(
vfsp
,
SYNC_FSDATA
|
SYNC_BDFLUSH
|
SYNC_ATTR
,
NULL
,
error
);
}
/* Push the log and superblock a little */
VFS_SYNC
(
vfsp
,
SYNC_FSDATA
,
NULL
,
error
);
sb
->
s_dirt
=
0
;
}
STATIC
int
...
...
@@ -424,12 +496,8 @@ linvfs_remount(
int
error
;
VFS_PARSEARGS
(
vfsp
,
options
,
args
,
1
,
error
);
if
(
error
)
goto
out
;
VFS_MNTUPDATE
(
vfsp
,
flags
,
args
,
error
);
out:
if
(
!
error
)
VFS_MNTUPDATE
(
vfsp
,
flags
,
args
,
error
);
kmem_free
(
args
,
sizeof
(
*
args
));
return
error
;
}
...
...
@@ -438,11 +506,10 @@ STATIC void
linvfs_freeze_fs
(
struct
super_block
*
sb
)
{
vfs_t
*
vfsp
;
vfs_t
*
vfsp
=
LINVFS_GET_VFS
(
sb
)
;
vnode_t
*
vp
;
int
error
;
vfsp
=
LINVFS_GET_VFS
(
sb
);
if
(
sb
->
s_flags
&
MS_RDONLY
)
return
;
VFS_ROOT
(
vfsp
,
&
vp
,
error
);
...
...
@@ -454,11 +521,10 @@ STATIC void
linvfs_unfreeze_fs
(
struct
super_block
*
sb
)
{
vfs_t
*
vfsp
;
vfs_t
*
vfsp
=
LINVFS_GET_VFS
(
sb
)
;
vnode_t
*
vp
;
int
error
;
vfsp
=
LINVFS_GET_VFS
(
sb
);
VFS_ROOT
(
vfsp
,
&
vp
,
error
);
VOP_IOCTL
(
vp
,
LINVFS_GET_IP
(
vp
),
NULL
,
XFS_IOC_THAW
,
0
,
error
);
VN_RELE
(
vp
);
...
...
@@ -652,7 +718,8 @@ linvfs_fill_super(
goto
fail_vnrele
;
if
(
is_bad_inode
(
sb
->
s_root
->
d_inode
))
goto
fail_vnrele
;
if
(
linvfs_start_syncd
(
vfsp
))
goto
fail_vnrele
;
vn_trace_exit
(
rootvp
,
__FUNCTION__
,
(
inst_t
*
)
__return_address
);
kmem_free
(
args
,
sizeof
(
*
args
));
...
...
fs/xfs/linux/xfs_super.h
View file @
20d7d526
...
...
@@ -101,7 +101,8 @@ extern void xfs_blkdev_put(struct block_device *);
extern
struct
pb_target
*
xfs_alloc_buftarg
(
struct
block_device
*
);
extern
void
xfs_relse_buftarg
(
struct
pb_target
*
);
extern
void
xfs_free_buftarg
(
struct
pb_target
*
);
extern
void
xfs_flush_buftarg
(
struct
pb_target
*
);
extern
int
xfs_readonly_buftarg
(
struct
pb_target
*
);
extern
void
xfs_setsize_buftarg
(
struct
pb_target
*
,
unsigned
int
,
unsigned
int
);
extern
unsigned
int
xfs_getsize_buftarg
(
struct
pb_target
*
);
...
...
fs/xfs/linux/xfs_syncd.c
0 → 100644
View file @
20d7d526
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
int
syncd
(
void
*
arg
)
{
vfs_t
*
vfsp
=
(
vfs_t
*
)
arg
;
int
error
;
daemonize
(
"xfs_syncd"
);
vfsp
->
vfs_sync_task
=
current
;
wmb
();
wake_up
(
&
vfsp
->
vfs_wait_sync_task
);
for
(;;)
{
set_current_state
(
TASK_INTERRUPTIBLE
);
schedule_timeout
(
xfs_params
.
sync_interval
);
if
(
vfsp
->
vfs_flag
&
VFS_UMOUNT
)
break
;
if
(
vfsp
->
vfs_flag
&
VFS_RDONLY
);
continue
;
VFS_SYNC
(
vfsp
,
SYNCD_FLAGS
,
NULL
,
error
);
}
vfsp
->
vfs_sync_task
=
NULL
;
wmb
();
wake_up
(
&
vfsp
->
vfs_wait_sync_task
);
return
0
;
}
int
linvfs_start_syncd
(
vfs_t
*
vfsp
)
{
int
pid
;
pid
=
kernel_thread
(
syncd
,
(
void
*
)
vfsp
,
CLONE_VM
|
CLONE_FS
|
CLONE_FILES
);
if
(
pid
<
0
)
return
pid
;
wait_event
(
vfsp
->
vfs_wait_sync_task
,
vfsp
->
vfs_sync_task
);
return
0
;
}
void
linvfs_stop_syncd
(
vfs_t
*
vfsp
)
{
vfsp
->
vfs_flag
|=
VFS_UMOUNT
;
wmb
();
wake_up_process
(
vfsp
->
vfs_sync_task
);
wait_event
(
vfsp
->
vfs_wait_sync_task
,
!
vfsp
->
vfs_sync_task
);
}
fs/xfs/linux/xfs_sysctl.c
View file @
20d7d526
...
...
@@ -36,8 +36,8 @@
#include <linux/proc_fs.h>
STATIC
ulong
xfs_min
[
XFS_PARAM
]
=
{
0
,
0
,
0
,
0
,
0
,
0
};
STATIC
ulong
xfs_max
[
XFS_PARAM
]
=
{
1
,
1
,
1
,
1
,
127
,
3
};
STATIC
ulong
xfs_min
[
XFS_PARAM
]
=
{
0
,
0
,
0
,
0
,
0
,
0
,
HZ
};
STATIC
ulong
xfs_max
[
XFS_PARAM
]
=
{
1
,
1
,
1
,
1
,
127
,
3
,
HZ
*
60
};
static
struct
ctl_table_header
*
xfs_table_header
;
...
...
@@ -92,6 +92,10 @@ STATIC ctl_table xfs_table[] = {
sizeof
(
ulong
),
0644
,
NULL
,
&
proc_doulongvec_minmax
,
&
sysctl_intvec
,
NULL
,
&
xfs_min
[
5
],
&
xfs_max
[
5
]},
{
XFS_SYNC_INTERVAL
,
"sync_interval"
,
&
xfs_params
.
sync_interval
,
sizeof
(
ulong
),
0644
,
NULL
,
&
proc_doulongvec_minmax
,
&
sysctl_intvec
,
NULL
,
&
xfs_min
[
6
],
&
xfs_max
[
6
]},
{
0
}
};
...
...
fs/xfs/linux/xfs_sysctl.h
View file @
20d7d526
...
...
@@ -49,6 +49,7 @@ typedef struct xfs_param {
ulong
symlink_mode
;
/* Symlink creat mode affected by umask. */
ulong
panic_mask
;
/* bitmask to specify panics on errors. */
ulong
error_level
;
/* Degree of reporting for internal probs*/
ulong
sync_interval
;
/* time between sync calls */
}
xfs_param_t
;
/*
...
...
@@ -73,6 +74,7 @@ enum {
XFS_SYMLINK_MODE
=
4
,
XFS_PANIC_MASK
=
5
,
XFS_ERRLEVEL
=
6
,
XFS_SYNC_INTERVAL
=
7
,
};
extern
xfs_param_t
xfs_params
;
...
...
fs/xfs/linux/xfs_vfs.c
View file @
20d7d526
...
...
@@ -238,6 +238,7 @@ vfs_allocate( void )
vfsp
=
kmem_zalloc
(
sizeof
(
vfs_t
),
KM_SLEEP
);
bhv_head_init
(
VFS_BHVHEAD
(
vfsp
),
"vfs"
);
init_waitqueue_head
(
&
vfsp
->
vfs_wait_sync_task
);
return
vfsp
;
}
...
...
fs/xfs/linux/xfs_vfs.h
View file @
20d7d526
...
...
@@ -48,6 +48,8 @@ typedef struct vfs {
fsid_t
*
vfs_altfsid
;
/* An ID fixed for life of FS */
bhv_head_t
vfs_bh
;
/* head of vfs behavior chain */
struct
super_block
*
vfs_super
;
/* Linux superblock structure */
struct
task_struct
*
vfs_sync_task
;
wait_queue_head_t
vfs_wait_sync_task
;
}
vfs_t
;
#define vfs_fbhv vfs_bh.bh_first
/* 1st on vfs behavior chain */
...
...
@@ -78,7 +80,8 @@ typedef enum {
#define VFS_RDONLY 0x0001
/* read-only vfs */
#define VFS_GRPID 0x0002
/* group-ID assigned from directory */
#define VFS_DMI 0x0004
/* filesystem has the DMI enabled */
#define VFS_END 0x0004
/* max flag */
#define VFS_UMOUNT 0x0008
/* unmount in progress */
#define VFS_END 0x0008
/* max flag */
#define SYNC_ATTR 0x0001
/* sync attributes */
#define SYNC_CLOSE 0x0002
/* close file system down */
...
...
@@ -87,6 +90,7 @@ typedef enum {
#define SYNC_FSDATA 0x0020
/* flush fs data (e.g. superblocks) */
#define SYNC_BDFLUSH 0x0010
/* BDFLUSH is calling -- don't block */
typedef
int
(
*
vfs_mount_t
)(
bhv_desc_t
*
,
struct
xfs_mount_args
*
,
struct
cred
*
);
typedef
int
(
*
vfs_parseargs_t
)(
bhv_desc_t
*
,
char
*
,
...
...
fs/xfs/linux/xfs_vnode.h
View file @
20d7d526
...
...
@@ -562,8 +562,7 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
(!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared))))
#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages)
#define VN_DIRTY(vp) (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->dirty_pages)))
#define VMODIFY(vp) { VN_FLAGSET(vp, VMODIFIED); \
mark_inode_dirty(LINVFS_GET_IP(vp)); }
#define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED)
#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED)
/*
...
...
fs/xfs/pagebuf/page_buf.c
View file @
20d7d526
...
...
@@ -68,7 +68,7 @@
#define BN_ALIGN_MASK ((1 << (PAGE_CACHE_SHIFT - BBSHIFT)) - 1)
#ifndef GFP_READAHEAD
#define GFP_READAHEAD
__GFP_NOWARN
#define GFP_READAHEAD
(__GFP_NOWARN|__GFP_NORETRY)
#endif
/*
...
...
@@ -76,11 +76,11 @@
*/
#ifdef PAGEBUF_TRACE
static
spinlock_t
pb_trace_lock
=
SPIN_LOCK_UNLOCKED
;
static
spinlock_t
pb_trace_lock
=
SPIN_LOCK_UNLOCKED
;
struct
pagebuf_trace_buf
pb_trace
;
EXPORT_SYMBOL
(
pb_trace
);
EXPORT_SYMBOL
(
pb_trace_func
);
#define CIRC_INC(i)
(((i) + 1) & (PB_TRACE_BUFSIZE - 1))
#define CIRC_INC(i)
(((i) + 1) & (PB_TRACE_BUFSIZE - 1))
void
pb_trace_func
(
...
...
@@ -181,7 +181,7 @@ _bhash(
* dev_t is 16 bits, loff_t is always 64 bits
*/
base
^=
dev
;
for
(
bit
=
hval
=
0
;
base
!=
0
&&
bit
<
sizeof
(
base
)
*
8
;
bit
+=
NBITS
)
{
for
(
bit
=
hval
=
0
;
base
&&
bit
<
sizeof
(
base
)
*
8
;
bit
+=
NBITS
)
{
hval
^=
(
int
)
base
&
(
NHASH
-
1
);
base
>>=
NBITS
;
}
...
...
@@ -189,18 +189,18 @@ _bhash(
}
/*
* Mapping of multi-page buffers into conti
ng
ous virtual space
* Mapping of multi-page buffers into conti
gu
ous virtual space
*/
STATIC
void
*
pagebuf_mapout_locked
(
page_buf_t
*
);
STATIC
spinlock_t
as_lock
=
SPIN_LOCK_UNLOCKED
;
typedef
struct
a_list
{
void
*
vm_addr
;
void
*
vm_addr
;
struct
a_list
*
next
;
}
a_list_t
;
STATIC
a_list_t
*
as_free_head
;
STATIC
int
as_list_len
;
STATIC
a_list_t
*
as_free_head
;
STATIC
int
as_list_len
;
STATIC
spinlock_t
as_lock
=
SPIN_LOCK_UNLOCKED
;
/*
...
...
@@ -1897,13 +1897,6 @@ pagebuf_readstats(
}
#endif
/* CONFIG_PROC_FS */
STATIC
void
pagebuf_shaker
(
void
)
{
pagebuf_daemon_wakeup
(
1
);
}
/*
* Initialization and Termination
*/
...
...
@@ -1943,7 +1936,6 @@ pagebuf_init(void)
#endif
pagebuf_daemon_start
();
kmem_shake_register
(
pagebuf_shaker
);
return
0
;
}
...
...
@@ -1959,7 +1951,6 @@ pagebuf_terminate(void)
pagebuf_daemon_stop
();
kmem_cache_destroy
(
pagebuf_cache
);
kmem_shake_deregister
(
pagebuf_shaker
);
unregister_sysctl_table
(
pagebuf_table_header
);
#ifdef CONFIG_PROC_FS
...
...
fs/xfs/quota/xfs_qm.c
View file @
20d7d526
...
...
@@ -88,7 +88,7 @@ STATIC void xfs_qm_list_destroy(xfs_dqlist_t *);
STATIC
int
xfs_qm_quotacheck
(
xfs_mount_t
*
);
STATIC
int
xfs_qm_init_quotainos
(
xfs_mount_t
*
);
STATIC
void
xfs_qm_shake
(
void
);
STATIC
int
xfs_qm_shake
(
int
,
unsigned
int
);
#ifdef DEBUG
extern
mutex_t
qcheck_lock
;
...
...
@@ -112,6 +112,8 @@ extern mutex_t qcheck_lock;
#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
#endif
struct
shrinker
*
xfs_qm_shrinker
;
/*
* Initialize the XQM structure.
* Note that there is not one quota manager per file system.
...
...
@@ -161,7 +163,7 @@ xfs_Gqm_init(void)
}
else
xqm
->
qm_dqzone
=
qm_dqzone
;
kmem_shake_register
(
xfs_qm_shake
);
xfs_qm_shrinker
=
set_shrinker
(
DEFAULT_SEEKS
,
xfs_qm_shake
);
/*
* The t_dqinfo portion of transactions.
...
...
@@ -193,7 +195,8 @@ xfs_qm_destroy(
ASSERT
(
xqm
!=
NULL
);
ASSERT
(
xqm
->
qm_nrefs
==
0
);
kmem_shake_deregister
(
xfs_qm_shake
);
remove_shrinker
(
xfs_qm_shrinker
);
hsize
=
xqm
->
qm_dqhashmask
+
1
;
for
(
i
=
0
;
i
<
hsize
;
i
++
)
{
xfs_qm_list_destroy
(
&
(
xqm
->
qm_usr_dqhtable
[
i
]));
...
...
@@ -2088,7 +2091,7 @@ xfs_qm_shake_freelist(
xfs_dqunlock
(
dqp
);
xfs_qm_freelist_unlock
(
xfs_Gqm
);
if
(
++
restarts
>=
XFS_QM_RECLAIM_MAX_RESTARTS
)
return
(
nreclaimed
!=
howmany
)
;
goto
out
;
XQM_STATS_INC
(
xqmstats
.
xs_qm_dqwants
);
goto
tryagain
;
}
...
...
@@ -2163,7 +2166,7 @@ xfs_qm_shake_freelist(
XFS_DQ_HASH_UNLOCK
(
hash
);
xfs_qm_freelist_unlock
(
xfs_Gqm
);
if
(
++
restarts
>=
XFS_QM_RECLAIM_MAX_RESTARTS
)
return
(
nreclaimed
!=
howmany
)
;
goto
out
;
goto
tryagain
;
}
xfs_dqtrace_entry
(
dqp
,
"DQSHAKE: UNLINKING"
);
...
...
@@ -2188,7 +2191,8 @@ xfs_qm_shake_freelist(
dqp
=
nextdqp
;
}
xfs_qm_freelist_unlock
(
xfs_Gqm
);
return
(
nreclaimed
!=
howmany
);
out:
return
nreclaimed
;
}
...
...
@@ -2197,13 +2201,15 @@ xfs_qm_shake_freelist(
* running low.
*/
/* ARGSUSED */
STATIC
void
xfs_qm_shake
(
void
)
STATIC
int
xfs_qm_shake
(
int
nr_to_scan
,
unsigned
int
gfp_mask
)
{
int
ndqused
,
nfree
,
n
;
if
(
!
(
gfp_mask
&
__GFP_WAIT
))
return
0
;
if
(
!
xfs_Gqm
)
return
;
return
0
;
nfree
=
xfs_Gqm
->
qm_dqfreelist
.
qh_nelems
;
/* free dquots */
/* incore dquots in all f/s's */
...
...
@@ -2212,12 +2218,12 @@ xfs_qm_shake(void)
ASSERT
(
ndqused
>=
0
);
if
(
nfree
<=
ndqused
&&
nfree
<
ndquot
)
return
;
return
0
;
ndqused
*=
xfs_Gqm
->
qm_dqfree_ratio
;
/* target # of free dquots */
n
=
nfree
-
ndqused
-
ndquot
;
/* # over target */
(
void
)
xfs_qm_shake_freelist
(
MAX
(
nfree
,
n
));
return
xfs_qm_shake_freelist
(
MAX
(
nfree
,
n
));
}
...
...
fs/xfs/support/kmem.h
View file @
20d7d526
...
...
@@ -32,31 +32,118 @@
#ifndef __XFS_SUPPORT_KMEM_H__
#define __XFS_SUPPORT_KMEM_H__
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
/*
*
memory management routines
*
Cutoff point to use vmalloc instead of kmalloc.
*/
#define MAX_SLAB_SIZE 0x10000
/*
* XFS uses slightly different names for these due to the
* IRIX heritage.
*/
#define kmem_zone kmem_cache_s
#define kmem_zone_t kmem_cache_t
#define KM_SLEEP 0x0001
#define KM_NOSLEEP 0x0002
#define KM_NOFS 0x0004
#define kmem_zone kmem_cache_s
#define kmem_zone_t kmem_cache_t
extern
kmem_zone_t
*
kmem_zone_init
(
int
,
char
*
);
extern
void
*
kmem_zone_zalloc
(
kmem_zone_t
*
,
int
);
extern
void
*
kmem_zone_alloc
(
kmem_zone_t
*
,
int
);
extern
void
kmem_zone_free
(
kmem_zone_t
*
,
void
*
);
/*
* XXX get rid of the unconditional __GFP_NOFAIL by adding
* a KM_FAIL flag and using it where we're allowed to fail.
*/
static
__inline
unsigned
int
flag_convert
(
int
flags
)
{
#if DEBUG
if
(
unlikely
(
flags
&
~
(
KM_SLEEP
|
KM_NOSLEEP
|
KM_NOFS
)))
{
printk
(
KERN_WARNING
"XFS: memory allocation with wrong flags (%x)
\n
"
,
flags
);
BUG
();
}
#endif
if
(
flags
&
KM_NOSLEEP
)
return
GFP_ATOMIC
;
/* If we're in a transaction, FS activity is not ok */
else
if
((
current
->
flags
&
PF_FSTRANS
)
||
(
flags
&
KM_NOFS
))
return
GFP_NOFS
|
__GFP_NOFAIL
;
return
GFP_KERNEL
|
__GFP_NOFAIL
;
}
static
__inline
void
*
kmem_alloc
(
size_t
size
,
int
flags
)
{
if
(
unlikely
(
MAX_SLAB_SIZE
<
size
))
/* Avoid doing filesystem sensitive stuff to get this */
return
__vmalloc
(
size
,
flag_convert
(
flags
),
PAGE_KERNEL
);
return
kmalloc
(
size
,
flag_convert
(
flags
));
}
static
__inline
void
*
kmem_zalloc
(
size_t
size
,
int
flags
)
{
void
*
ptr
=
kmem_alloc
(
size
,
flags
);
if
(
likely
(
ptr
!=
NULL
))
memset
(
ptr
,
0
,
size
);
return
ptr
;
}
static
__inline
void
kmem_free
(
void
*
ptr
,
size_t
size
)
{
if
(
unlikely
((
unsigned
long
)
ptr
<
VMALLOC_START
||
(
unsigned
long
)
ptr
>=
VMALLOC_END
))
kfree
(
ptr
);
else
vfree
(
ptr
);
}
static
__inline
void
*
kmem_realloc
(
void
*
ptr
,
size_t
newsize
,
size_t
oldsize
,
int
flags
)
{
void
*
new
=
kmem_alloc
(
newsize
,
flags
);
if
(
likely
(
ptr
!=
NULL
))
{
if
(
likely
(
new
!=
NULL
))
memcpy
(
new
,
ptr
,
min
(
oldsize
,
newsize
));
kmem_free
(
ptr
,
oldsize
);
}
return
new
;
}
static
__inline
kmem_zone_t
*
kmem_zone_init
(
int
size
,
char
*
zone_name
)
{
return
kmem_cache_create
(
zone_name
,
size
,
0
,
0
,
NULL
,
NULL
);
}
extern
void
*
kmem_alloc
(
size_t
,
int
);
extern
void
*
kmem_realloc
(
void
*
,
size_t
,
size_t
,
int
);
extern
void
*
kmem_zalloc
(
size_t
,
int
);
extern
void
kmem_free
(
void
*
,
size_t
);
static
__inline
void
*
kmem_zone_alloc
(
kmem_zone_t
*
zone
,
int
flags
)
{
return
kmem_cache_alloc
(
zone
,
flag_convert
(
flags
));
}
typedef
void
(
*
kmem_shake_func_t
)(
void
);
static
__inline
void
*
kmem_zone_zalloc
(
kmem_zone_t
*
zone
,
int
flags
)
{
void
*
ptr
=
kmem_zone_alloc
(
zone
,
flags
);
if
(
likely
(
ptr
!=
NULL
))
memset
(
ptr
,
0
,
kmem_cache_size
(
zone
));
return
ptr
;
}
extern
void
kmem_shake_register
(
kmem_shake_func_t
);
extern
void
kmem_shake_deregister
(
kmem_shake_func_t
);
static
__inline
void
kmem_zone_free
(
kmem_zone_t
*
zone
,
void
*
ptr
)
{
kmem_cache_free
(
zone
,
ptr
);
}
#endif
/* __XFS_SUPPORT_KMEM_H__ */
fs/xfs/xfs_ag.h
View file @
20d7d526
/*
* Copyright (c) 2000-200
2
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-200
3
Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
...
...
@@ -185,9 +185,8 @@ xfs_agblock_t xfs_agfl_block(struct xfs_mount *mp);
#endif
#define XFS_AGFL_SIZE(mp) ((mp)->m_sb.sb_sectsize / sizeof(xfs_agblock_t))
/* -- nathans TODO ... use of BBSIZE here - should be sector size -- */
typedef
struct
xfs_agfl
{
xfs_agblock_t
agfl_bno
[
BBSIZE
/
sizeof
(
xfs_agblock_t
)];
xfs_agblock_t
agfl_bno
[
1
];
/* actually XFS_AGFL_SIZE(mp) */
}
xfs_agfl_t
;
/*
...
...
fs/xfs/xfs_buf.h
View file @
20d7d526
/*
* Copyright (c) 2000-200
2
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-200
3
Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
...
...
@@ -283,7 +283,6 @@ static inline int XFS_bwrite(page_buf_t *pb)
return
error
;
}
#define XFS_bdwrite(pb) \
pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
...
...
@@ -307,15 +306,15 @@ static inline int xfs_bdwrite(void *mp, page_buf_t *bp)
* of its metadata.
*/
extern
void
XFS_bflush
(
xfs_buftarg_t
*
);
#define xfs_binval(buftarg) XFS_bflush(buftarg)
#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg)
#define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg)
#define xfs_incore_relse(buftarg,delwri_only,wait) \
xfs_relse_buftarg(buftarg)
#define xfs_baread(target, rablkno, ralen) \
pagebuf_readahead((target), (rablkno), \
(ralen), PBF_DONT_BLOCK)
pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
#define XFS_getrbuf(sleep,mp) \
pagebuf_get_empty((mp)->m_ddev_targp)
...
...
fs/xfs/xfs_iget.c
View file @
20d7d526
...
...
@@ -252,6 +252,11 @@ xfs_iget_core(
if
(
newnode
)
{
xfs_iocore_inode_reinit
(
ip
);
}
XFS_MOUNT_ILOCK
(
mp
);
list_del_init
(
&
ip
->
i_reclaim
);
XFS_MOUNT_IUNLOCK
(
mp
);
vn_trace_exit
(
vp
,
"xfs_iget.found"
,
(
inst_t
*
)
__return_address
);
goto
return_ip
;
...
...
@@ -467,8 +472,10 @@ xfs_iget(
}
bdp
=
vn_bhv_lookup
(
VN_BHV_HEAD
(
vp
),
&
xfs_vnodeops
);
if
(
bdp
==
NULL
)
if
(
bdp
==
NULL
)
{
XFS_STATS_INC
(
xfsstats
.
xs_ig_dup
);
goto
inode_allocate
;
}
ip
=
XFS_BHVTOI
(
bdp
);
if
(
lock_flags
!=
0
)
xfs_ilock
(
ip
,
lock_flags
);
...
...
@@ -720,6 +727,9 @@ xfs_iextract(
}
}
/* Deal with the deleted inodes list */
list_del_init
(
&
ip
->
i_reclaim
);
mp
->
m_ireclaims
++
;
XFS_MOUNT_IUNLOCK
(
mp
);
}
...
...
fs/xfs/xfs_inode.c
View file @
20d7d526
...
...
@@ -656,7 +656,9 @@ xfs_iformat_extents(
int
nex
;
int
real_size
;
int
size
;
#if ARCH_CONVERT != ARCH_NOCONVERT
int
i
;
#endif
ifp
=
XFS_IFORK_PTR
(
ip
,
whichfork
);
nex
=
XFS_DFORK_NEXTENTS_ARCH
(
dip
,
whichfork
,
ARCH_CONVERT
);
...
...
@@ -976,6 +978,8 @@ xfs_iread(
XFS_IFORK_DSIZE
(
ip
)
/
(
uint
)
sizeof
(
xfs_bmbt_rec_t
);
}
INIT_LIST_HEAD
(
&
ip
->
i_reclaim
);
/*
* The inode format changed when we moved the link count and
* made it 32 bits long. If this is an old format inode,
...
...
@@ -2625,6 +2629,15 @@ xfs_iunpin(
ASSERT
(
atomic_read
(
&
ip
->
i_pincount
)
>
0
);
if
(
atomic_dec_and_test
(
&
ip
->
i_pincount
))
{
vnode_t
*
vp
=
XFS_ITOV_NULL
(
ip
);
/* make sync come back and flush this inode */
if
(
vp
)
{
struct
inode
*
inode
=
LINVFS_GET_IP
(
vp
);
mark_inode_dirty_sync
(
inode
);
}
wake_up
(
&
ip
->
i_ipin_wait
);
}
}
...
...
@@ -3640,6 +3653,8 @@ xfs_ichgtime(xfs_inode_t *ip,
*/
SYNCHRONIZE
();
ip
->
i_update_core
=
1
;
if
(
!
(
inode
->
i_state
&
I_LOCK
))
mark_inode_dirty
(
inode
);
}
#ifdef XFS_ILOCK_TRACE
...
...
fs/xfs/xfs_inode.h
View file @
20d7d526
...
...
@@ -243,6 +243,7 @@ typedef struct xfs_inode {
struct
xfs_inode
*
i_mprev
;
/* ptr to prev inode */
struct
xfs_inode
**
i_prevp
;
/* ptr to prev i_next */
struct
xfs_mount
*
i_mount
;
/* fs mount struct ptr */
struct
list_head
i_reclaim
;
/* reclaim list */
struct
bhv_desc
i_bhv_desc
;
/* inode behavior descriptor*/
struct
xfs_dquot
*
i_udquot
;
/* user dquot */
struct
xfs_dquot
*
i_gdquot
;
/* group dquot */
...
...
@@ -477,7 +478,7 @@ void xfs_iunlock_map_shared(xfs_inode_t *, uint);
void
xfs_ifunlock
(
xfs_inode_t
*
);
void
xfs_ireclaim
(
xfs_inode_t
*
);
int
xfs_finish_reclaim
(
xfs_inode_t
*
,
int
,
int
);
int
xfs_finish_reclaim_all
(
struct
xfs_mount
*
);
int
xfs_finish_reclaim_all
(
struct
xfs_mount
*
,
int
);
/*
* xfs_inode.c prototypes.
...
...
fs/xfs/xfs_log.c
View file @
20d7d526
/*
* Copyright (c) 2000-200
2
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-200
3
Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
...
...
@@ -413,19 +413,6 @@ xfs_log_release_iclog(xfs_mount_t *mp,
return
0
;
}
/*
* Initialize log manager data. This routine is intended to be called when
* a system boots up. It is not a per filesystem initialization.
*
* As you can see, we currently do nothing.
*/
int
xfs_log_init
(
void
)
{
return
(
0
);
}
/*
* 1. Reserve an amount of on-disk log space and return a ticket corresponding
* to the reservation.
...
...
@@ -497,8 +484,6 @@ xfs_log_mount(xfs_mount_t *mp,
xfs_daddr_t
blk_offset
,
int
num_bblks
)
{
xlog_t
*
log
;
if
(
!
(
mp
->
m_flags
&
XFS_MOUNT_NORECOVERY
))
cmn_err
(
CE_NOTE
,
"XFS mounting filesystem %s"
,
mp
->
m_fsname
);
else
{
...
...
@@ -508,7 +493,7 @@ xfs_log_mount(xfs_mount_t *mp,
ASSERT
(
XFS_MTOVFS
(
mp
)
->
vfs_flag
&
VFS_RDONLY
);
}
mp
->
m_log
=
log
=
xlog_alloc_log
(
mp
,
log_dev
,
blk_offset
,
num_bblks
);
mp
->
m_log
=
xlog_alloc_log
(
mp
,
log_dev
,
blk_offset
,
num_bblks
);
#if defined(DEBUG) || defined(XLOG_NOLOG)
if
(
!
xlog_debug
)
{
...
...
@@ -528,19 +513,19 @@ xfs_log_mount(xfs_mount_t *mp,
if
(
readonly
)
vfsp
->
vfs_flag
&=
~
VFS_RDONLY
;
error
=
xlog_recover
(
log
,
readonly
);
error
=
xlog_recover
(
mp
->
m_
log
,
readonly
);
if
(
readonly
)
vfsp
->
vfs_flag
|=
VFS_RDONLY
;
if
(
error
)
{
cmn_err
(
CE_WARN
,
"XFS: log mount/recovery failed"
);
xlog_unalloc_log
(
log
);
xlog_unalloc_log
(
mp
->
m_
log
);
return
error
;
}
}
/* Normal transactions can now occur */
log
->
l_flags
&=
~
XLOG_ACTIVE_RECOVERY
;
mp
->
m_
log
->
l_flags
&=
~
XLOG_ACTIVE_RECOVERY
;
/* End mounting message in xfs_log_mount_finish */
return
0
;
...
...
@@ -1191,28 +1176,39 @@ xlog_alloc_log(xfs_mount_t *mp,
int
i
;
int
iclogsize
;
log
=
(
void
*
)
kmem_zalloc
(
sizeof
(
xlog_t
),
KM_SLEEP
);
log
=
(
xlog_t
*
)
kmem_zalloc
(
sizeof
(
xlog_t
),
KM_SLEEP
);
log
->
l_mp
=
mp
;
log
->
l_dev
=
log_dev
;
log
->
l_logsize
=
BBTOB
(
num_bblks
);
log
->
l_logBBstart
=
blk_offset
;
log
->
l_logBBsize
=
num_bblks
;
log
->
l_roundoff
=
0
;
log
->
l_covered_state
=
XLOG_STATE_COVER_IDLE
;
log
->
l_flags
|=
XLOG_ACTIVE_RECOVERY
;
log
->
l_prev_block
=
-
1
;
ASSIGN_ANY_LSN
(
log
->
l_tail_lsn
,
1
,
0
,
ARCH_NOCONVERT
);
/* log->l_tail_lsn
= 0x100000000LL; cycle = 1; current block = 0 */
/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
log
->
l_last_sync_lsn
=
log
->
l_tail_lsn
;
log
->
l_curr_cycle
=
1
;
/* 0 is bad since this is initial value */
log
->
l_curr_block
=
0
;
/* filled in by xlog_recover */
log
->
l_grant_reserve_bytes
=
0
;
log
->
l_grant_reserve_cycle
=
1
;
log
->
l_grant_write_bytes
=
0
;
log
->
l_grant_write_cycle
=
1
;
log
->
l_quotaoffs_flag
=
0
;
/* XFS_LI_QUOTAOFF logitems */
if
(
XFS_SB_VERSION_HASLOGV2
(
&
mp
->
m_sb
))
{
if
(
mp
->
m_sb
.
sb_logsunit
<=
1
)
{
log
->
l_stripemask
=
1
;
}
else
{
log
->
l_stripemask
=
1
<<
xfs_highbit32
(
mp
->
m_sb
.
sb_logsunit
>>
BBSHIFT
);
}
}
if
(
XFS_SB_VERSION_HASSECTOR
(
&
mp
->
m_sb
))
{
log
->
l_sectbb_log
=
mp
->
m_sb
.
sb_logsectlog
-
BBSHIFT
;
ASSERT
(
log
->
l_sectbb_log
<=
mp
->
m_sectbb_log
);
ASSERT
(
XFS_SB_VERSION_HASLOGV2
(
&
mp
->
m_sb
));
ASSERT
(
mp
->
m_sb
.
sb_logsectlog
>=
BBSHIFT
);
}
log
->
l_sectbb_mask
=
(
1
<<
log
->
l_sectbb_log
)
-
1
;
xlog_get_iclog_buffer_size
(
mp
,
log
);
...
...
@@ -2811,10 +2807,9 @@ xlog_state_switch_iclogs(xlog_t *log,
/* Round up to next log-sunit */
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
if
(
log
->
l_curr_block
&
(
log
->
l_mp
->
m_lstripemask
-
1
))
{
roundup
=
log
->
l_mp
->
m_lstripemask
-
(
log
->
l_curr_block
&
(
log
->
l_mp
->
m_lstripemask
-
1
));
if
(
log
->
l_curr_block
&
(
log
->
l_stripemask
-
1
))
{
roundup
=
log
->
l_stripemask
-
(
log
->
l_curr_block
&
(
log
->
l_stripemask
-
1
));
}
else
{
roundup
=
0
;
}
...
...
@@ -3293,15 +3288,17 @@ xlog_verify_disk_cycle_no(xlog_t *log,
{
xfs_buf_t
*
bp
;
uint
cycle_no
;
xfs_caddr_t
ptr
;
xfs_daddr_t
i
;
if
(
BLOCK_LSN
(
iclog
->
ic_header
.
h_lsn
,
ARCH_CONVERT
)
<
10
)
{
cycle_no
=
CYCLE_LSN
(
iclog
->
ic_header
.
h_lsn
,
ARCH_CONVERT
);
bp
=
xlog_get_bp
(
1
,
log
->
l_mp
);
bp
=
xlog_get_bp
(
log
,
1
);
ASSERT
(
bp
);
for
(
i
=
0
;
i
<
BLOCK_LSN
(
iclog
->
ic_header
.
h_lsn
,
ARCH_CONVERT
);
i
++
)
{
xlog_bread
(
log
,
i
,
1
,
bp
);
if
(
GET_CYCLE
(
XFS_BUF_PTR
(
bp
),
ARCH_CONVERT
)
!=
cycle_no
)
ptr
=
xlog_align
(
log
,
i
,
1
,
bp
);
if
(
GET_CYCLE
(
ptr
,
ARCH_CONVERT
)
!=
cycle_no
)
xlog_warn
(
"XFS: xlog_verify_disk_cycle_no: bad cycle no"
);
}
xlog_put_bp
(
bp
);
...
...
fs/xfs/xfs_log.h
View file @
20d7d526
/*
* Copyright (c) 2000-200
2
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-200
3
Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
...
...
@@ -153,7 +153,6 @@ xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
int
xfs_log_force
(
struct
xfs_mount
*
mp
,
xfs_lsn_t
lsn
,
uint
flags
);
int
xfs_log_init
(
void
);
int
xfs_log_mount
(
struct
xfs_mount
*
mp
,
dev_t
log_dev
,
xfs_daddr_t
start_block
,
...
...
fs/xfs/xfs_log_priv.h
View file @
20d7d526
/*
* Copyright (c) 2000-200
2
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-200
3
Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
...
...
@@ -73,6 +73,9 @@ int xlog_btolrbb(int b);
#define XLOG_HEADER_SIZE 512
#define XLOG_REC_SHIFT(log) \
BTOBB(1 << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \
XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
#define XLOG_TOTAL_REC_SHIFT(log) \
BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \
XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
...
...
@@ -202,9 +205,9 @@ void xlog_grant_add_space(struct log *log, int bytes, int type);
#define LOG_LOCK(log) mutex_spinlock(&(log)->l_icloglock)
#define LOG_UNLOCK(log, s) mutex_spinunlock(&(log)->l_icloglock, s)
#define xlog_panic(
s) {cmn_err(CE_PANIC, s); }
#define xlog_exit(
s) {cmn_err(CE_PANIC, s); }
#define xlog_warn(
s) {cmn_err(CE_WARN, s); }
#define xlog_panic(
args...) cmn_err(CE_PANIC, ## args)
#define xlog_exit(
args...) cmn_err(CE_PANIC, ## args)
#define xlog_warn(
args...) cmn_err(CE_WARN, ## args)
/*
* In core log state
...
...
@@ -403,6 +406,7 @@ typedef struct xlog_rec_ext_header {
uint
xh_cycle
;
/* write cycle of log : 4 */
uint
xh_cycle_data
[
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
];
/* : 256 */
}
xlog_rec_ext_header_t
;
#ifdef __KERNEL__
/*
* - A log record header is 512 bytes. There is plenty of room to grow the
...
...
@@ -441,12 +445,10 @@ typedef struct xlog_iclog_fields {
char
*
ic_datap
;
/* pointer to iclog data */
}
xlog_iclog_fields_t
;
typedef
struct
xlog_in_core2
{
union
{
xlog_rec_header_t
hic_header
;
xlog_rec_ext_header_t
hic_xheader
;
char
hic_sector
[
XLOG_HEADER_SIZE
];
}
ic_h
;
typedef
union
xlog_in_core2
{
xlog_rec_header_t
hic_header
;
xlog_rec_ext_header_t
hic_xheader
;
char
hic_sector
[
XLOG_HEADER_SIZE
];
}
xlog_in_core_2_t
;
typedef
struct
xlog_in_core
{
...
...
@@ -473,7 +475,7 @@ typedef struct xlog_in_core {
#define ic_bwritecnt hic_fields.ic_bwritecnt
#define ic_state hic_fields.ic_state
#define ic_datap hic_fields.ic_datap
#define ic_header hic_data->
ic_h.
hic_header
#define ic_header hic_data->hic_header
/*
* The reservation head lsn is not made up of a cycle number and block number.
...
...
@@ -530,8 +532,11 @@ typedef struct log {
uint
l_flags
;
uint
l_quotaoffs_flag
;
/* XFS_DQ_*, if QUOTAOFFs found */
struct
xfs_buf_cancel
**
l_buf_cancel_table
;
int
l_stripemask
;
/* log stripe mask */
int
l_iclog_hsize
;
/* size of iclog header */
int
l_iclog_heads
;
/* number of iclog header sectors */
uint
l_sectbb_log
;
/* log2 of sector size in bbs */
uint
l_sectbb_mask
;
/* sector size in bbs alignment mask */
}
xlog_t
;
...
...
@@ -546,11 +551,13 @@ extern int xlog_print_find_oldest(xlog_t *log, xfs_daddr_t *last_blk);
extern
int
xlog_recover
(
xlog_t
*
log
,
int
readonly
);
extern
int
xlog_recover_finish
(
xlog_t
*
log
,
int
mfsi_flags
);
extern
void
xlog_pack_data
(
xlog_t
*
log
,
xlog_in_core_t
*
iclog
);
extern
struct
xfs_buf
*
xlog_get_bp
(
int
,
xfs_mount_t
*
);
extern
void
xlog_put_bp
(
struct
xfs_buf
*
);
extern
int
xlog_bread
(
xlog_t
*
,
xfs_daddr_t
blkno
,
int
bblks
,
struct
xfs_buf
*
bp
);
extern
void
xlog_recover_process_iunlinks
(
xlog_t
*
log
);
extern
struct
xfs_buf
*
xlog_get_bp
(
xlog_t
*
,
int
);
extern
void
xlog_put_bp
(
struct
xfs_buf
*
);
extern
int
xlog_bread
(
xlog_t
*
,
xfs_daddr_t
,
int
,
struct
xfs_buf
*
);
extern
xfs_caddr_t
xlog_align
(
xlog_t
*
,
xfs_daddr_t
,
int
,
struct
xfs_buf
*
);
#define XLOG_TRACE_GRAB_FLUSH 1
#define XLOG_TRACE_REL_FLUSH 2
#define XLOG_TRACE_SLEEP_FLUSH 3
...
...
fs/xfs/xfs_log_recover.c
View file @
20d7d526
/*
* Copyright (c) 2000-200
2
Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-200
3
Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
...
...
@@ -65,53 +65,68 @@
#include "xfs_quota.h"
#include "xfs_rw.h"
STATIC
int
xlog_find_zeroed
(
struct
log
*
log
,
xfs_daddr_t
*
blk_no
);
STATIC
int
xlog_clear_stale_blocks
(
xlog_t
*
log
,
xfs_lsn_t
tail_lsn
);
STATIC
int
xlog_find_zeroed
(
xlog_t
*
,
xfs_daddr_t
*
);
STATIC
int
xlog_clear_stale_blocks
(
xlog_t
*
,
xfs_lsn_t
);
STATIC
void
xlog_recover_insert_item_backq
(
xlog_recover_item_t
**
q
,
xlog_recover_item_t
*
item
);
#if defined(DEBUG)
STATIC
void
xlog_recover_check_summary
(
xlog_t
*
log
);
STATIC
void
xlog_recover_check_ail
(
xfs_mount_t
*
mp
,
xfs_log_item_t
*
lip
,
int
gen
);
STATIC
void
xlog_recover_check_summary
(
xlog_t
*
);
STATIC
void
xlog_recover_check_ail
(
xfs_mount_t
*
,
xfs_log_item_t
*
,
int
);
#else
#define xlog_recover_check_summary(log)
#define xlog_recover_check_ail(mp, lip, gen)
#endif
/* DEBUG */
#endif
/*
* Sector aligned buffer routines for buffer create/read/write/access
*/
#define XLOG_SECTOR_ROUNDUP_BBCOUNT(log, bbs) \
( ((log)->l_sectbb_mask && (bbs & (log)->l_sectbb_mask)) ? \
((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
xfs_buf_t
*
xlog_get_bp
(
int
num_bblks
,
xfs_mount_t
*
mp
)
xlog_get_bp
(
xlog_t
*
log
,
int
num_bblks
)
{
xfs_buf_t
*
bp
;
ASSERT
(
num_bblks
>
0
);
bp
=
XFS_ngetrbuf
(
BBTOB
(
num_bblks
),
mp
);
return
bp
;
}
/* xlog_get_bp */
if
(
log
->
l_sectbb_log
)
{
if
(
num_bblks
>
1
)
num_bblks
+=
XLOG_SECTOR_ROUNDUP_BBCOUNT
(
log
,
1
);
num_bblks
=
XLOG_SECTOR_ROUNDUP_BBCOUNT
(
log
,
num_bblks
);
}
return
XFS_ngetrbuf
(
BBTOB
(
num_bblks
),
log
->
l_mp
);
}
void
xlog_put_bp
(
xfs_buf_t
*
bp
)
xlog_put_bp
(
xfs_buf_t
*
bp
)
{
XFS_nfreerbuf
(
bp
);
}
/* xlog_put_bp */
}
/*
* nbblks should be uint, but oh well. Just want to catch that 32-bit length.
*/
int
xlog_bread
(
xlog_t
*
log
,
xfs_daddr_t
blk_no
,
int
nbblks
,
xfs_buf_t
*
bp
)
xlog_bread
(
xlog_t
*
log
,
xfs_daddr_t
blk_no
,
int
nbblks
,
xfs_buf_t
*
bp
)
{
int
error
;
int
error
;
if
(
log
->
l_sectbb_log
)
{
blk_no
=
XLOG_SECTOR_ROUNDDOWN_BLKNO
(
log
,
blk_no
);
nbblks
=
XLOG_SECTOR_ROUNDUP_BBCOUNT
(
log
,
nbblks
);
}
ASSERT
(
log
);
ASSERT
(
nbblks
>
0
);
ASSERT
(
BBTOB
(
nbblks
)
<=
XFS_BUF_SIZE
(
bp
));
ASSERT
(
bp
);
...
...
@@ -123,14 +138,11 @@ xlog_bread(xlog_t *log,
XFS_BUF_SET_TARGET
(
bp
,
log
->
l_mp
->
m_logdev_targp
);
xfsbdstrat
(
log
->
l_mp
,
bp
);
if
((
error
=
xfs_iowait
(
bp
)))
{
if
((
error
=
xfs_iowait
(
bp
)))
xfs_ioerror_alert
(
"xlog_bread"
,
log
->
l_mp
,
bp
,
XFS_BUF_ADDR
(
bp
));
return
(
error
);
}
return
error
;
}
/* xlog_bread */
}
/*
* Write out the buffer at the given block for the given number of blocks.
...
...
@@ -139,12 +151,17 @@ xlog_bread(xlog_t *log,
*/
int
xlog_bwrite
(
xlog_t
*
log
,
in
t
blk_no
,
int
nbblks
,
xlog_t
*
log
,
xfs_daddr_
t
blk_no
,
int
nbblks
,
xfs_buf_t
*
bp
)
{
int
error
;
int
error
;
if
(
log
->
l_sectbb_log
)
{
blk_no
=
XLOG_SECTOR_ROUNDDOWN_BLKNO
(
log
,
blk_no
);
nbblks
=
XLOG_SECTOR_ROUNDUP_BBCOUNT
(
log
,
nbblks
);
}
ASSERT
(
nbblks
>
0
);
ASSERT
(
BBTOB
(
nbblks
)
<=
XFS_BUF_SIZE
(
bp
));
...
...
@@ -160,94 +177,109 @@ xlog_bwrite(
if
((
error
=
xfs_bwrite
(
log
->
l_mp
,
bp
)))
xfs_ioerror_alert
(
"xlog_bwrite"
,
log
->
l_mp
,
bp
,
XFS_BUF_ADDR
(
bp
));
return
error
;
}
return
(
error
);
}
/* xlog_bwrite */
xfs_caddr_t
xlog_align
(
xlog_t
*
log
,
xfs_daddr_t
blk_no
,
int
nbblks
,
xfs_buf_t
*
bp
)
{
xfs_caddr_t
ptr
;
if
(
!
log
->
l_sectbb_log
)
return
XFS_BUF_PTR
(
bp
);
ptr
=
XFS_BUF_PTR
(
bp
)
+
BBTOB
((
int
)
blk_no
&
log
->
l_sectbb_mask
);
ASSERT
(
XFS_BUF_SIZE
(
bp
)
>=
BBTOB
(
nbblks
+
(
blk_no
&
log
->
l_sectbb_mask
)));
return
ptr
;
}
#ifdef DEBUG
/*
*
check log record header for recovery
*
dump debug superblock and log record information
*/
static
void
xlog_header_check_dump
(
xfs_mount_t
*
mp
,
xlog_rec_header_t
*
head
)
STATIC
void
xlog_header_check_dump
(
xfs_mount_t
*
mp
,
xlog_rec_header_t
*
head
)
{
int
b
;
printk
(
"%s: SB : uuid = "
,
__FUNCTION__
);
for
(
b
=
0
;
b
<
16
;
b
++
)
printk
(
"%02x"
,((
unsigned
char
*
)
&
mp
->
m_sb
.
sb_uuid
)[
b
]);
printk
(
", fmt = %d
\n
"
,
XLOG_FMT
);
printk
(
" log : uuid = "
);
for
(
b
=
0
;
b
<
16
;
b
++
)
printk
(
"%02x"
,((
unsigned
char
*
)
&
head
->
h_fs_uuid
)[
b
]);
printk
(
", fmt = %d
\n
"
,
INT_GET
(
head
->
h_fmt
,
ARCH_CONVERT
));
int
b
;
printk
(
"%s: SB : uuid = "
,
__FUNCTION__
);
for
(
b
=
0
;
b
<
16
;
b
++
)
printk
(
"%02x"
,((
unsigned
char
*
)
&
mp
->
m_sb
.
sb_uuid
)[
b
]);
printk
(
", fmt = %d
\n
"
,
XLOG_FMT
);
printk
(
" log : uuid = "
);
for
(
b
=
0
;
b
<
16
;
b
++
)
printk
(
"%02x"
,((
unsigned
char
*
)
&
head
->
h_fs_uuid
)[
b
]);
printk
(
", fmt = %d
\n
"
,
INT_GET
(
head
->
h_fmt
,
ARCH_CONVERT
));
}
#else
#define xlog_header_check_dump(mp, head)
#endif
/*
* check log record header for recovery
*/
STATIC
int
xlog_header_check_recover
(
xfs_mount_t
*
mp
,
xlog_rec_header_t
*
head
)
xlog_header_check_recover
(
xfs_mount_t
*
mp
,
xlog_rec_header_t
*
head
)
{
ASSERT
(
INT_GET
(
head
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
ASSERT
(
INT_GET
(
head
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
/*
* IRIX doesn't write the h_fmt field and leaves it zeroed
* (XLOG_FMT_UNKNOWN). This stops us from trying to recover
* a dirty log created in IRIX.
*/
if
(
unlikely
(
INT_GET
(
head
->
h_fmt
,
ARCH_CONVERT
)
!=
XLOG_FMT
))
{
xlog_warn
(
"XFS: dirty log written in incompatible format - can't recover"
);
#ifdef DEBUG
xlog_header_check_dump
(
mp
,
head
);
#endif
XFS_ERROR_REPORT
(
"xlog_header_check_recover(1)"
,
XFS_ERRLEVEL_HIGH
,
mp
);
return
XFS_ERROR
(
EFSCORRUPTED
);
}
else
if
(
unlikely
(
!
uuid_equal
(
&
mp
->
m_sb
.
sb_uuid
,
&
head
->
h_fs_uuid
)))
{
xlog_warn
(
"XFS: dirty log entry has mismatched uuid - can't recover"
);
#ifdef DEBUG
xlog_header_check_dump
(
mp
,
head
);
#endif
XFS_ERROR_REPORT
(
"xlog_header_check_recover(2)"
,
XFS_ERRLEVEL_HIGH
,
mp
);
return
XFS_ERROR
(
EFSCORRUPTED
);
}
return
0
;
/*
* IRIX doesn't write the h_fmt field and leaves it zeroed
* (XLOG_FMT_UNKNOWN). This stops us from trying to recover
* a dirty log created in IRIX.
*/
if
(
unlikely
(
INT_GET
(
head
->
h_fmt
,
ARCH_CONVERT
)
!=
XLOG_FMT
))
{
xlog_warn
(
"XFS: dirty log written in incompatible format - can't recover"
);
xlog_header_check_dump
(
mp
,
head
);
XFS_ERROR_REPORT
(
"xlog_header_check_recover(1)"
,
XFS_ERRLEVEL_HIGH
,
mp
);
return
XFS_ERROR
(
EFSCORRUPTED
);
}
else
if
(
unlikely
(
!
uuid_equal
(
&
mp
->
m_sb
.
sb_uuid
,
&
head
->
h_fs_uuid
)))
{
xlog_warn
(
"XFS: dirty log entry has mismatched uuid - can't recover"
);
xlog_header_check_dump
(
mp
,
head
);
XFS_ERROR_REPORT
(
"xlog_header_check_recover(2)"
,
XFS_ERRLEVEL_HIGH
,
mp
);
return
XFS_ERROR
(
EFSCORRUPTED
);
}
return
0
;
}
/*
* read the head block of the log and check the header
*/
STATIC
int
xlog_header_check_mount
(
xfs_mount_t
*
mp
,
xlog_rec_header_t
*
head
)
xlog_header_check_mount
(
xfs_mount_t
*
mp
,
xlog_rec_header_t
*
head
)
{
ASSERT
(
INT_GET
(
head
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
if
(
uuid_is_nil
(
&
head
->
h_fs_uuid
))
{
ASSERT
(
INT_GET
(
head
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
/*
* IRIX doesn't write the h_fs_uuid or h_fmt fields. If
* h_fs_uuid is nil, we assume this log was last mounted
* by IRIX and continue.
*/
xlog_warn
(
"XFS: nil uuid in log - IRIX style log"
);
}
else
if
(
unlikely
(
!
uuid_equal
(
&
mp
->
m_sb
.
sb_uuid
,
&
head
->
h_fs_uuid
)))
{
xlog_warn
(
"XFS: log has mismatched uuid - can't recover"
);
#ifdef DEBUG
xlog_header_check_dump
(
mp
,
head
);
#endif
XFS_ERROR_REPORT
(
"xlog_header_check_mount"
,
XFS_ERRLEVEL_HIGH
,
mp
);
return
XFS_ERROR
(
EFSCORRUPTED
);
}
return
0
;
if
(
uuid_is_nil
(
&
head
->
h_fs_uuid
))
{
/*
* IRIX doesn't write the h_fs_uuid or h_fmt fields. If
* h_fs_uuid is nil, we assume this log was last mounted
* by IRIX and continue.
*/
xlog_warn
(
"XFS: nil uuid in log - IRIX style log"
);
}
else
if
(
unlikely
(
!
uuid_equal
(
&
mp
->
m_sb
.
sb_uuid
,
&
head
->
h_fs_uuid
)))
{
xlog_warn
(
"XFS: log has mismatched uuid - can't recover"
);
xlog_header_check_dump
(
mp
,
head
);
XFS_ERROR_REPORT
(
"xlog_header_check_mount"
,
XFS_ERRLEVEL_HIGH
,
mp
);
return
XFS_ERROR
(
EFSCORRUPTED
);
}
return
0
;
}
STATIC
void
...
...
@@ -255,6 +287,7 @@ xlog_recover_iodone(
struct
xfs_buf
*
bp
)
{
xfs_mount_t
*
mp
;
ASSERT
(
XFS_BUF_FSPRIVATE
(
bp
,
void
*
));
if
(
XFS_BUF_GETERROR
(
bp
))
{
...
...
@@ -279,12 +312,14 @@ xlog_recover_iodone(
* necessarily be perfect.
*/
int
xlog_find_cycle_start
(
xlog_t
*
log
,
xfs_buf_t
*
bp
,
xfs_daddr_t
first_blk
,
xfs_daddr_t
*
last_blk
,
uint
cycle
)
xlog_find_cycle_start
(
xlog_t
*
log
,
xfs_buf_t
*
bp
,
xfs_daddr_t
first_blk
,
xfs_daddr_t
*
last_blk
,
uint
cycle
)
{
xfs_caddr_t
offset
;
xfs_daddr_t
mid_blk
;
uint
mid_cycle
;
int
error
;
...
...
@@ -293,7 +328,8 @@ xlog_find_cycle_start(xlog_t *log,
while
(
mid_blk
!=
first_blk
&&
mid_blk
!=
*
last_blk
)
{
if
((
error
=
xlog_bread
(
log
,
mid_blk
,
1
,
bp
)))
return
error
;
mid_cycle
=
GET_CYCLE
(
XFS_BUF_PTR
(
bp
),
ARCH_CONVERT
);
offset
=
xlog_align
(
log
,
mid_blk
,
1
,
bp
);
mid_cycle
=
GET_CYCLE
(
offset
,
ARCH_CONVERT
);
if
(
mid_cycle
==
cycle
)
{
*
last_blk
=
mid_blk
;
/* last_half_cycle == mid_cycle */
...
...
@@ -307,8 +343,7 @@ xlog_find_cycle_start(xlog_t *log,
(
mid_blk
==
*
last_blk
&&
mid_blk
-
1
==
first_blk
));
return
0
;
}
/* xlog_find_cycle_start */
}
/*
* Check that the range of blocks does not contain the cycle number
...
...
@@ -320,27 +355,27 @@ xlog_find_cycle_start(xlog_t *log,
* Set blkno to -1 if we encounter no errors. This is an invalid block number
* since we don't ever expect logs to get this large.
*/
STATIC
int
xlog_find_verify_cycle
(
xlog_t
*
log
,
xfs_daddr_t
start_blk
,
int
nbblks
,
uint
stop_on_cycle_no
,
xfs_daddr_t
*
new_blk
)
xlog_find_verify_cycle
(
xlog_t
*
log
,
xfs_daddr_t
start_blk
,
int
nbblks
,
uint
stop_on_cycle_no
,
xfs_daddr_t
*
new_blk
)
{
xfs_daddr_t
i
,
j
;
uint
cycle
;
xfs_buf_t
*
bp
;
char
*
buf
=
NULL
;
int
error
=
0
;
xfs_daddr_t
bufblks
;
xfs_daddr_t
i
,
j
;
uint
cycle
;
xfs_buf_t
*
bp
;
xfs_daddr_t
bufblks
;
xfs_caddr_t
buf
=
NULL
;
int
error
=
0
;
bufblks
=
1
<<
ffs
(
nbblks
);
while
(
!
(
bp
=
xlog_get_bp
(
bufblks
,
log
->
l_mp
)))
{
while
(
!
(
bp
=
xlog_get_bp
(
log
,
bufblks
)))
{
/* can't get enough memory to do everything in one big buffer */
bufblks
>>=
1
;
if
(
!
bufblks
)
if
(
bufblks
<=
log
->
l_sectbb_log
)
return
ENOMEM
;
}
...
...
@@ -352,7 +387,7 @@ xlog_find_verify_cycle( xlog_t *log,
if
((
error
=
xlog_bread
(
log
,
i
,
bcount
,
bp
)))
goto
out
;
buf
=
XFS_BUF_PTR
(
bp
);
buf
=
xlog_align
(
log
,
i
,
bcount
,
bp
);
for
(
j
=
0
;
j
<
bcount
;
j
++
)
{
cycle
=
GET_CYCLE
(
buf
,
ARCH_CONVERT
);
if
(
cycle
==
stop_on_cycle_no
)
{
...
...
@@ -368,10 +403,8 @@ xlog_find_verify_cycle( xlog_t *log,
out:
xlog_put_bp
(
bp
);
return
error
;
}
/* xlog_find_verify_cycle */
}
/*
* Potentially backup over partial log record write.
...
...
@@ -385,98 +418,103 @@ xlog_find_verify_cycle( xlog_t *log,
* extra_bblks is the number of blocks potentially verified on a previous
* call to this routine.
*/
STATIC
int
xlog_find_verify_log_record
(
xlog_t
*
log
,
xfs_daddr_t
start_blk
,
xfs_daddr_t
*
last_blk
,
int
extra_bblks
)
xlog_find_verify_log_record
(
xlog_t
*
log
,
xfs_daddr_t
start_blk
,
xfs_daddr_t
*
last_blk
,
int
extra_bblks
)
{
xfs_daddr_t
i
;
xfs_buf_t
*
bp
;
char
*
buf
=
NULL
;
xlog_rec_header_t
*
head
=
NULL
;
int
error
=
0
;
int
smallmem
=
0
;
int
num_blks
=
*
last_blk
-
start_blk
;
int
xhdrs
;
ASSERT
(
start_blk
!=
0
||
*
last_blk
!=
start_blk
);
if
(
!
(
bp
=
xlog_get_bp
(
num_blks
,
log
->
l_mp
)))
{
if
(
!
(
bp
=
xlog_get_bp
(
1
,
log
->
l_mp
)))
return
ENOMEM
;
smallmem
=
1
;
buf
=
XFS_BUF_PTR
(
bp
);
}
else
{
if
((
error
=
xlog_bread
(
log
,
start_blk
,
num_blks
,
bp
)))
goto
out
;
buf
=
XFS_BUF_PTR
(
bp
)
+
((
num_blks
-
1
)
<<
BBSHIFT
);
}
for
(
i
=
(
*
last_blk
)
-
1
;
i
>=
0
;
i
--
)
{
if
(
i
<
start_blk
)
{
/* legal log record not found */
xlog_warn
(
"XFS: Log inconsistent (didn't find previous header)"
);
ASSERT
(
0
);
error
=
XFS_ERROR
(
EIO
);
goto
out
;
}
if
(
smallmem
&&
(
error
=
xlog_bread
(
log
,
i
,
1
,
bp
)))
goto
out
;
head
=
(
xlog_rec_header_t
*
)
buf
;
if
(
INT_GET
(
head
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
)
break
;
if
(
!
smallmem
)
buf
-=
BBSIZE
;
}
/*
* We hit the beginning of the physical log & still no header. Return
* to caller. If caller can handle a return of -1, then this routine
* will be called again for the end of the physical log.
*/
if
(
i
==
-
1
)
{
error
=
-
1
;
goto
out
;
}
/* we have the final block of the good log (the first block
* of the log record _before_ the head. So we check the uuid.
*/
if
((
error
=
xlog_header_check_mount
(
log
->
l_mp
,
head
)))
goto
out
;
/*
* We may have found a log record header before we expected one.
* last_blk will be the 1st block # with a given cycle #. We may end
* up reading an entire log record. In this case, we don't want to
* reset last_blk. Only when last_blk points in the middle of a log
* record do we update last_blk.
*/
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
uint
h_size
=
INT_GET
(
head
->
h_size
,
ARCH_CONVERT
);
xhdrs
=
h_size
/
XLOG_HEADER_CYCLE_SIZE
;
if
(
h_size
%
XLOG_HEADER_CYCLE_SIZE
)
xhdrs
++
;
}
else
{
xhdrs
=
1
;
}
if
(
*
last_blk
-
i
+
extra_bblks
!=
BTOBB
(
INT_GET
(
head
->
h_len
,
ARCH_CONVERT
))
+
xhdrs
)
*
last_blk
=
i
;
xfs_daddr_t
i
;
xfs_buf_t
*
bp
;
xfs_caddr_t
offset
=
NULL
;
xlog_rec_header_t
*
head
=
NULL
;
int
error
=
0
;
int
smallmem
=
0
;
int
num_blks
=
*
last_blk
-
start_blk
;
int
xhdrs
;
out:
xlog_put_bp
(
bp
);
ASSERT
(
start_blk
!=
0
||
*
last_blk
!=
start_blk
);
return
error
;
}
/* xlog_find_verify_log_record */
if
(
!
(
bp
=
xlog_get_bp
(
log
,
num_blks
)))
{
if
(
!
(
bp
=
xlog_get_bp
(
log
,
1
)))
return
ENOMEM
;
smallmem
=
1
;
}
else
{
if
((
error
=
xlog_bread
(
log
,
start_blk
,
num_blks
,
bp
)))
goto
out
;
offset
=
xlog_align
(
log
,
start_blk
,
num_blks
,
bp
);
offset
+=
((
num_blks
-
1
)
<<
BBSHIFT
);
}
for
(
i
=
(
*
last_blk
)
-
1
;
i
>=
0
;
i
--
)
{
if
(
i
<
start_blk
)
{
/* legal log record not found */
xlog_warn
(
"XFS: Log inconsistent (didn't find previous header)"
);
ASSERT
(
0
);
error
=
XFS_ERROR
(
EIO
);
goto
out
;
}
if
(
smallmem
)
{
if
((
error
=
xlog_bread
(
log
,
i
,
1
,
bp
)))
goto
out
;
offset
=
xlog_align
(
log
,
i
,
1
,
bp
);
}
head
=
(
xlog_rec_header_t
*
)
offset
;
if
(
XLOG_HEADER_MAGIC_NUM
==
INT_GET
(
head
->
h_magicno
,
ARCH_CONVERT
))
break
;
if
(
!
smallmem
)
offset
-=
BBSIZE
;
}
/*
* We hit the beginning of the physical log & still no header. Return
* to caller. If caller can handle a return of -1, then this routine
* will be called again for the end of the physical log.
*/
if
(
i
==
-
1
)
{
error
=
-
1
;
goto
out
;
}
/*
* We have the final block of the good log (the first block
* of the log record _before_ the head. So we check the uuid.
*/
if
((
error
=
xlog_header_check_mount
(
log
->
l_mp
,
head
)))
goto
out
;
/*
* We may have found a log record header before we expected one.
* last_blk will be the 1st block # with a given cycle #. We may end
* up reading an entire log record. In this case, we don't want to
* reset last_blk. Only when last_blk points in the middle of a log
* record do we update last_blk.
*/
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
uint
h_size
=
INT_GET
(
head
->
h_size
,
ARCH_CONVERT
);
xhdrs
=
h_size
/
XLOG_HEADER_CYCLE_SIZE
;
if
(
h_size
%
XLOG_HEADER_CYCLE_SIZE
)
xhdrs
++
;
}
else
{
xhdrs
=
1
;
}
if
(
*
last_blk
-
i
+
extra_bblks
!=
BTOBB
(
INT_GET
(
head
->
h_len
,
ARCH_CONVERT
))
+
xhdrs
)
*
last_blk
=
i
;
out:
xlog_put_bp
(
bp
);
return
error
;
}
/*
* Head is defined to be the point of the log where the next log write
...
...
@@ -489,252 +527,257 @@ xlog_find_verify_log_record(xlog_t *log,
* last_blk contains the block number of the first block with a given
* cycle number.
*
* Also called from xfs_log_print.c
*
* Return: zero if normal, non-zero if error.
*/
int
xlog_find_head
(
xlog_t
*
log
,
xfs_daddr_t
*
return_head_blk
)
xlog_find_head
(
xlog_t
*
log
,
xfs_daddr_t
*
return_head_blk
)
{
xfs_buf_t
*
bp
;
xfs_daddr_t
new_blk
,
first_blk
,
start_blk
,
last_blk
,
head_blk
;
int
num_scan_bblks
;
uint
first_half_cycle
,
last_half_cycle
;
uint
stop_on_cycle
;
int
error
,
log_bbnum
=
log
->
l_logBBsize
;
/* Is the end of the log device zeroed? */
if
((
error
=
xlog_find_zeroed
(
log
,
&
first_blk
))
==
-
1
)
{
*
return_head_blk
=
first_blk
;
/* is the whole lot zeroed? */
if
(
!
first_blk
)
{
/* Linux XFS shouldn't generate totally zeroed logs -
* mkfs etc write a dummy unmount record to a fresh
* log so we can store the uuid in there
*/
xlog_warn
(
"XFS: totally zeroed log"
);
xfs_buf_t
*
bp
;
xfs_caddr_t
offset
;
xfs_daddr_t
new_blk
,
first_blk
,
start_blk
,
last_blk
,
head_blk
;
int
num_scan_bblks
;
uint
first_half_cycle
,
last_half_cycle
;
uint
stop_on_cycle
;
int
error
,
log_bbnum
=
log
->
l_logBBsize
;
/* Is the end of the log device zeroed? */
if
((
error
=
xlog_find_zeroed
(
log
,
&
first_blk
))
==
-
1
)
{
*
return_head_blk
=
first_blk
;
/* Is the whole lot zeroed? */
if
(
!
first_blk
)
{
/* Linux XFS shouldn't generate totally zeroed logs -
* mkfs etc write a dummy unmount record to a fresh
* log so we can store the uuid in there
*/
xlog_warn
(
"XFS: totally zeroed log"
);
}
return
0
;
}
else
if
(
error
)
{
xlog_warn
(
"XFS: empty log check failed"
);
return
error
;
}
return
0
;
}
else
if
(
error
)
{
xlog_warn
(
"XFS: empty log check failed"
);
return
error
;
}
first_blk
=
0
;
/* get cycle # of 1st block */
bp
=
xlog_get_bp
(
1
,
log
->
l_mp
);
if
(
!
bp
)
return
ENOMEM
;
if
((
error
=
xlog_bread
(
log
,
0
,
1
,
bp
)))
goto
bp_err
;
first_half_cycle
=
GET_CYCLE
(
XFS_BUF_PTR
(
bp
),
ARCH_CONVERT
);
last_blk
=
head_blk
=
log_bbnum
-
1
;
/* get cycle # of last block */
if
((
error
=
xlog_bread
(
log
,
last_blk
,
1
,
bp
)))
goto
bp_err
;
last_half_cycle
=
GET_CYCLE
(
XFS_BUF_PTR
(
bp
),
ARCH_CONVERT
);
ASSERT
(
last_half_cycle
!=
0
);
/*
* If the 1st half cycle number is equal to the last half cycle number,
* then the entire log is stamped with the same cycle number. In this
* case, head_blk can't be set to zero (which makes sense). The below
* math doesn't work out properly with head_blk equal to zero. Instead,
* we set it to log_bbnum which is an illegal block number, but this
* value makes the math correct. If head_blk doesn't changed through
* all the tests below, *head_blk is set to zero at the very end rather
* than log_bbnum. In a sense, log_bbnum and zero are the same block
* in a circular file.
*/
if
(
first_half_cycle
==
last_half_cycle
)
{
/*
* In this case we believe that the entire log should have cycle
* number last_half_cycle. We need to scan backwards from the
* end verifying that there are no holes still containing
* last_half_cycle - 1. If we find such a hole, then the start
* of that hole will be the new head. The simple case looks like
* x | x ... | x - 1 | x
* Another case that fits this picture would be
* x | x + 1 | x ... | x
* In this case the head really is somwhere at the end of the
* log, as one of the latest writes at the beginning was incomplete.
* One more case is
* x | x + 1 | x ... | x - 1 | x
* This is really the combination of the above two cases, and the
* head has to end up at the start of the x-1 hole at the end of
* the log.
*
* In the 256k log case, we will read from the beginning to the
* end of the log and search for cycle numbers equal to x-1. We
* don't worry about the x+1 blocks that we encounter, because
* we know that they cannot be the head since the log started with
* x.
*/
head_blk
=
log_bbnum
;
stop_on_cycle
=
last_half_cycle
-
1
;
}
else
{
first_blk
=
0
;
/* get cycle # of 1st block */
bp
=
xlog_get_bp
(
log
,
1
);
if
(
!
bp
)
return
ENOMEM
;
if
((
error
=
xlog_bread
(
log
,
0
,
1
,
bp
)))
goto
bp_err
;
offset
=
xlog_align
(
log
,
0
,
1
,
bp
);
first_half_cycle
=
GET_CYCLE
(
offset
,
ARCH_CONVERT
);
last_blk
=
head_blk
=
log_bbnum
-
1
;
/* get cycle # of last block */
if
((
error
=
xlog_bread
(
log
,
last_blk
,
1
,
bp
)))
goto
bp_err
;
offset
=
xlog_align
(
log
,
last_blk
,
1
,
bp
);
last_half_cycle
=
GET_CYCLE
(
offset
,
ARCH_CONVERT
);
ASSERT
(
last_half_cycle
!=
0
);
/*
* In this case we want to find the first block with cycle number
* matching last_half_cycle. We expect the log to be some
* variation on
* x + 1 ... | x ...
* The first block with cycle number x (last_half_cycle) will be
* where the new head belongs. First we do a binary search for
* the first occurrence of last_half_cycle. The binary search
* may not be totally accurate, so then we scan back from there
* looking for occurrences of last_half_cycle before us. If
* that backwards scan wraps around the beginning of the log,
* then we look for occurrences of last_half_cycle - 1 at the
* end of the log. The cases we're looking for look like
* x + 1 ... | x | x + 1 | x ...
* ^ binary search stopped here
* or
* x + 1 ... | x ... | x - 1 | x
* <---------> less than scan distance
* If the 1st half cycle number is equal to the last half cycle number,
* then the entire log is stamped with the same cycle number. In this
* case, head_blk can't be set to zero (which makes sense). The below
* math doesn't work out properly with head_blk equal to zero. Instead,
* we set it to log_bbnum which is an illegal block number, but this
* value makes the math correct. If head_blk doesn't changed through
* all the tests below, *head_blk is set to zero at the very end rather
* than log_bbnum. In a sense, log_bbnum and zero are the same block
* in a circular file.
*/
stop_on_cycle
=
last_half_cycle
;
if
((
error
=
xlog_find_cycle_start
(
log
,
bp
,
first_blk
,
&
head_blk
,
last_half_cycle
)))
goto
bp_err
;
}
/*
* Now validate the answer. Scan back some number of maximum possible
* blocks and make sure each one has the expected cycle number. The
* maximum is determined by the total possible amount of buffering
* in the in-core log. The following number can be made tighter if
* we actually look at the block size of the filesystem.
*/
num_scan_bblks
=
XLOG_TOTAL_REC_SHIFT
(
log
);
if
(
head_blk
>=
num_scan_bblks
)
{
if
(
first_half_cycle
==
last_half_cycle
)
{
/*
* In this case we believe that the entire log should have
* cycle number last_half_cycle. We need to scan backwards
* from the end verifying that there are no holes still
* containing last_half_cycle - 1. If we find such a hole,
* then the start of that hole will be the new head. The
* simple case looks like
* x | x ... | x - 1 | x
* Another case that fits this picture would be
* x | x + 1 | x ... | x
* In this case the head really is somwhere at the end of the
* log, as one of the latest writes at the beginning was
* incomplete.
* One more case is
* x | x + 1 | x ... | x - 1 | x
* This is really the combination of the above two cases, and
* the head has to end up at the start of the x-1 hole at the
* end of the log.
*
* In the 256k log case, we will read from the beginning to the
* end of the log and search for cycle numbers equal to x-1.
* We don't worry about the x+1 blocks that we encounter,
* because we know that they cannot be the head since the log
* started with x.
*/
head_blk
=
log_bbnum
;
stop_on_cycle
=
last_half_cycle
-
1
;
}
else
{
/*
* In this case we want to find the first block with cycle
* number matching last_half_cycle. We expect the log to be
* some variation on
* x + 1 ... | x ...
* The first block with cycle number x (last_half_cycle) will
* be where the new head belongs. First we do a binary search
* for the first occurrence of last_half_cycle. The binary
* search may not be totally accurate, so then we scan back
* from there looking for occurrences of last_half_cycle before
* us. If that backwards scan wraps around the beginning of
* the log, then we look for occurrences of last_half_cycle - 1
* at the end of the log. The cases we're looking for look
* like
* x + 1 ... | x | x + 1 | x ...
* ^ binary search stopped here
* or
* x + 1 ... | x ... | x - 1 | x
* <---------> less than scan distance
*/
stop_on_cycle
=
last_half_cycle
;
if
((
error
=
xlog_find_cycle_start
(
log
,
bp
,
first_blk
,
&
head_blk
,
last_half_cycle
)))
goto
bp_err
;
}
/*
* We are guaranteed that the entire check can be performed
* in one buffer.
* Now validate the answer. Scan back some number of maximum possible
* blocks and make sure each one has the expected cycle number. The
* maximum is determined by the total possible amount of buffering
* in the in-core log. The following number can be made tighter if
* we actually look at the block size of the filesystem.
*/
start_blk
=
head_blk
-
num_scan_bblks
;
if
((
error
=
xlog_find_verify_cycle
(
log
,
start_blk
,
num_scan_bblks
,
stop_on_cycle
,
&
new_blk
)))
goto
bp_err
;
if
(
new_blk
!=
-
1
)
head_blk
=
new_blk
;
}
else
{
/* need to read 2 parts of log */
num_scan_bblks
=
XLOG_TOTAL_REC_SHIFT
(
log
);
if
(
head_blk
>=
num_scan_bblks
)
{
/*
* We are guaranteed that the entire check can be performed
* in one buffer.
*/
start_blk
=
head_blk
-
num_scan_bblks
;
if
((
error
=
xlog_find_verify_cycle
(
log
,
start_blk
,
num_scan_bblks
,
stop_on_cycle
,
&
new_blk
)))
goto
bp_err
;
if
(
new_blk
!=
-
1
)
head_blk
=
new_blk
;
}
else
{
/* need to read 2 parts of log */
/*
* We are going to scan backwards in the log in two parts.
* First we scan the physical end of the log. In this part
* of the log, we are looking for blocks with cycle number
* last_half_cycle - 1.
* If we find one, then we know that the log starts there, as
* we've found a hole that didn't get written in going around
* the end of the physical log. The simple case for this is
* x + 1 ... | x ... | x - 1 | x
* <---------> less than scan distance
* If all of the blocks at the end of the log have cycle number
* last_half_cycle, then we check the blocks at the start of
* the log looking for occurrences of last_half_cycle. If we
* find one, then our current estimate for the location of the
* first occurrence of last_half_cycle is wrong and we move
* back to the hole we've found. This case looks like
* x + 1 ... | x | x + 1 | x ...
* ^ binary search stopped here
* Another case we need to handle that only occurs in 256k
* logs is
* x + 1 ... | x ... | x+1 | x ...
* ^ binary search stops here
* In a 256k log, the scan at the end of the log will see the
* x + 1 blocks. We need to skip past those since that is
* certainly not the head of the log. By searching for
* last_half_cycle-1 we accomplish that.
*/
start_blk
=
log_bbnum
-
num_scan_bblks
+
head_blk
;
ASSERT
(
head_blk
<=
INT_MAX
&&
(
xfs_daddr_t
)
num_scan_bblks
-
head_blk
>=
0
);
if
((
error
=
xlog_find_verify_cycle
(
log
,
start_blk
,
num_scan_bblks
-
(
int
)
head_blk
,
(
stop_on_cycle
-
1
),
&
new_blk
)))
goto
bp_err
;
if
(
new_blk
!=
-
1
)
{
head_blk
=
new_blk
;
goto
bad_blk
;
}
/*
* Scan beginning of log now. The last part of the physical
* log is good. This scan needs to verify that it doesn't find
* the last_half_cycle.
*/
start_blk
=
0
;
ASSERT
(
head_blk
<=
INT_MAX
);
if
((
error
=
xlog_find_verify_cycle
(
log
,
start_blk
,
(
int
)
head_blk
,
stop_on_cycle
,
&
new_blk
)))
goto
bp_err
;
if
(
new_blk
!=
-
1
)
head_blk
=
new_blk
;
}
bad_blk:
/*
* We are going to scan backwards in the log in two parts. First
* we scan the physical end of the log. In this part of the log,
* we are looking for blocks with cycle number last_half_cycle - 1.
* If we find one, then we know that the log starts there, as we've
* found a hole that didn't get written in going around the end
* of the physical log. The simple case for this is
* x + 1 ... | x ... | x - 1 | x
* <---------> less than scan distance
* If all of the blocks at the end of the log have cycle number
* last_half_cycle, then we check the blocks at the start of the
* log looking for occurrences of last_half_cycle. If we find one,
* then our current estimate for the location of the first
* occurrence of last_half_cycle is wrong and we move back to the
* hole we've found. This case looks like
* x + 1 ... | x | x + 1 | x ...
* ^ binary search stopped here
* Another case we need to handle that only occurs in 256k logs is
* x + 1 ... | x ... | x+1 | x ...
* ^ binary search stops here
* In a 256k log, the scan at the end of the log will see the x+1
* blocks. We need to skip past those since that is certainly not
* the head of the log. By searching for last_half_cycle-1 we
* accomplish that.
* Now we need to make sure head_blk is not pointing to a block in
* the middle of a log record.
*/
start_blk
=
log_bbnum
-
num_scan_bblks
+
head_blk
;
ASSERT
(
head_blk
<=
INT_MAX
&&
(
xfs_daddr_t
)
num_scan_bblks
-
head_blk
>=
0
);
if
((
error
=
xlog_find_verify_cycle
(
log
,
start_blk
,
num_scan_bblks
-
(
int
)
head_blk
,
(
stop_on_cycle
-
1
),
&
new_blk
)))
goto
bp_err
;
if
(
new_blk
!=
-
1
)
{
head_blk
=
new_blk
;
goto
bad_blk
;
num_scan_bblks
=
XLOG_REC_SHIFT
(
log
);
if
(
head_blk
>=
num_scan_bblks
)
{
start_blk
=
head_blk
-
num_scan_bblks
;
/* don't read head_blk */
/* start ptr at last block ptr before head_blk */
if
((
error
=
xlog_find_verify_log_record
(
log
,
start_blk
,
&
head_blk
,
0
))
==
-
1
)
{
error
=
XFS_ERROR
(
EIO
);
goto
bp_err
;
}
else
if
(
error
)
goto
bp_err
;
}
else
{
start_blk
=
0
;
ASSERT
(
head_blk
<=
INT_MAX
);
if
((
error
=
xlog_find_verify_log_record
(
log
,
start_blk
,
&
head_blk
,
0
))
==
-
1
)
{
/* We hit the beginning of the log during our search */
start_blk
=
log_bbnum
-
num_scan_bblks
+
head_blk
;
new_blk
=
log_bbnum
;
ASSERT
(
start_blk
<=
INT_MAX
&&
(
xfs_daddr_t
)
log_bbnum
-
start_blk
>=
0
);
ASSERT
(
head_blk
<=
INT_MAX
);
if
((
error
=
xlog_find_verify_log_record
(
log
,
start_blk
,
&
new_blk
,
(
int
)
head_blk
))
==
-
1
)
{
error
=
XFS_ERROR
(
EIO
);
goto
bp_err
;
}
else
if
(
error
)
goto
bp_err
;
if
(
new_blk
!=
log_bbnum
)
head_blk
=
new_blk
;
}
else
if
(
error
)
goto
bp_err
;
}
xlog_put_bp
(
bp
);
if
(
head_blk
==
log_bbnum
)
*
return_head_blk
=
0
;
else
*
return_head_blk
=
head_blk
;
/*
* Scan beginning of log now. The last part of the physical log
* is good. This scan needs to verify that it doesn't find the
* last_half_cycle.
* When returning here, we have a good block number. Bad block
* means that during a previous crash, we didn't have a clean break
* from cycle number N to cycle number N-1. In this case, we need
* to find the first block with cycle number N-1.
*/
start_blk
=
0
;
ASSERT
(
head_blk
<=
INT_MAX
);
if
((
error
=
xlog_find_verify_cycle
(
log
,
start_blk
,
(
int
)
head_blk
,
stop_on_cycle
,
&
new_blk
)))
goto
bp_err
;
if
(
new_blk
!=
-
1
)
head_blk
=
new_blk
;
}
bad_blk:
/*
* Now we need to make sure head_blk is not pointing to a block in
* the middle of a log record.
*/
num_scan_bblks
=
BTOBB
(
XLOG_MAX_RECORD_BSIZE
);
if
(
head_blk
>=
num_scan_bblks
)
{
start_blk
=
head_blk
-
num_scan_bblks
;
/* don't read head_blk */
/* start ptr at last block ptr before head_blk */
if
((
error
=
xlog_find_verify_log_record
(
log
,
start_blk
,
&
head_blk
,
0
))
==
-
1
)
{
error
=
XFS_ERROR
(
EIO
);
goto
bp_err
;
}
else
if
(
error
)
goto
bp_err
;
}
else
{
start_blk
=
0
;
ASSERT
(
head_blk
<=
INT_MAX
);
if
((
error
=
xlog_find_verify_log_record
(
log
,
start_blk
,
&
head_blk
,
0
))
==
-
1
)
{
/* We hit the beginning of the log during our search */
start_blk
=
log_bbnum
-
num_scan_bblks
+
head_blk
;
new_blk
=
log_bbnum
;
ASSERT
(
start_blk
<=
INT_MAX
&&
(
xfs_daddr_t
)
log_bbnum
-
start_blk
>=
0
);
ASSERT
(
head_blk
<=
INT_MAX
);
if
((
error
=
xlog_find_verify_log_record
(
log
,
start_blk
,
&
new_blk
,
(
int
)
head_blk
))
==
-
1
)
{
error
=
XFS_ERROR
(
EIO
);
goto
bp_err
;
}
else
if
(
error
)
goto
bp_err
;
if
(
new_blk
!=
log_bbnum
)
head_blk
=
new_blk
;
}
else
if
(
error
)
goto
bp_err
;
}
xlog_put_bp
(
bp
);
if
(
head_blk
==
log_bbnum
)
*
return_head_blk
=
0
;
else
*
return_head_blk
=
head_blk
;
/*
* When returning here, we have a good block number. Bad block
* means that during a previous crash, we didn't have a clean break
* from cycle number N to cycle number N-1. In this case, we need
* to find the first block with cycle number N-1.
*/
return
0
;
return
0
;
bp_err:
bp_err:
xlog_put_bp
(
bp
);
if
(
error
)
xlog_warn
(
"XFS: failed to find log head"
);
return
error
;
}
/* xlog_find_head */
}
/*
* Find the sync block number or the tail of the log.
...
...
@@ -753,13 +796,15 @@ xlog_find_head(xlog_t *log,
* available.
*/
int
xlog_find_tail
(
xlog_t
*
log
,
xfs_daddr_t
*
head_blk
,
xfs_daddr_t
*
tail_blk
,
int
readonly
)
xlog_find_tail
(
xlog_t
*
log
,
xfs_daddr_t
*
head_blk
,
xfs_daddr_t
*
tail_blk
,
int
readonly
)
{
xlog_rec_header_t
*
rhead
;
xlog_op_header_t
*
op_head
;
xfs_caddr_t
offset
=
NULL
;
xfs_buf_t
*
bp
;
int
error
,
i
,
found
;
xfs_daddr_t
umount_data_blk
;
...
...
@@ -775,13 +820,14 @@ xlog_find_tail(xlog_t *log,
if
((
error
=
xlog_find_head
(
log
,
head_blk
)))
return
error
;
bp
=
xlog_get_bp
(
1
,
log
->
l_mp
);
bp
=
xlog_get_bp
(
log
,
1
);
if
(
!
bp
)
return
ENOMEM
;
if
(
*
head_blk
==
0
)
{
/* special case */
if
((
error
=
xlog_bread
(
log
,
0
,
1
,
bp
)))
goto
bread_err
;
if
(
GET_CYCLE
(
XFS_BUF_PTR
(
bp
),
ARCH_CONVERT
)
==
0
)
{
offset
=
xlog_align
(
log
,
0
,
1
,
bp
);
if
(
GET_CYCLE
(
offset
,
ARCH_CONVERT
)
==
0
)
{
*
tail_blk
=
0
;
/* leave all other log inited values alone */
goto
exit
;
...
...
@@ -795,8 +841,9 @@ xlog_find_tail(xlog_t *log,
for
(
i
=
(
int
)(
*
head_blk
)
-
1
;
i
>=
0
;
i
--
)
{
if
((
error
=
xlog_bread
(
log
,
i
,
1
,
bp
)))
goto
bread_err
;
offset
=
xlog_align
(
log
,
i
,
1
,
bp
);
if
(
XLOG_HEADER_MAGIC_NUM
==
INT_GET
(
*
(
uint
*
)
(
XFS_BUF_PTR
(
bp
))
,
ARCH_CONVERT
))
{
INT_GET
(
*
(
uint
*
)
offset
,
ARCH_CONVERT
))
{
found
=
1
;
break
;
}
...
...
@@ -811,8 +858,9 @@ xlog_find_tail(xlog_t *log,
for
(
i
=
log
->
l_logBBsize
-
1
;
i
>=
(
int
)(
*
head_blk
);
i
--
)
{
if
((
error
=
xlog_bread
(
log
,
i
,
1
,
bp
)))
goto
bread_err
;
offset
=
xlog_align
(
log
,
i
,
1
,
bp
);
if
(
XLOG_HEADER_MAGIC_NUM
==
INT_GET
(
*
(
uint
*
)
(
XFS_BUF_PTR
(
bp
))
,
ARCH_CONVERT
))
{
INT_GET
(
*
(
uint
*
)
offset
,
ARCH_CONVERT
))
{
found
=
2
;
break
;
}
...
...
@@ -825,7 +873,7 @@ xlog_find_tail(xlog_t *log,
}
/* find blk_no of tail of log */
rhead
=
(
xlog_rec_header_t
*
)
XFS_BUF_PTR
(
bp
)
;
rhead
=
(
xlog_rec_header_t
*
)
offset
;
*
tail_blk
=
BLOCK_LSN
(
rhead
->
h_tail_lsn
,
ARCH_CONVERT
);
/*
...
...
@@ -885,7 +933,8 @@ xlog_find_tail(xlog_t *log,
if
((
error
=
xlog_bread
(
log
,
umount_data_blk
,
1
,
bp
)))
{
goto
bread_err
;
}
op_head
=
(
xlog_op_header_t
*
)
XFS_BUF_PTR
(
bp
);
offset
=
xlog_align
(
log
,
umount_data_blk
,
1
,
bp
);
op_head
=
(
xlog_op_header_t
*
)
offset
;
if
(
op_head
->
oh_flags
&
XLOG_UNMOUNT_TRANS
)
{
/*
* Set tail and last sync so that newly written
...
...
@@ -900,7 +949,6 @@ xlog_find_tail(xlog_t *log,
}
}
#ifdef __KERNEL__
/*
* Make sure that there are no blocks in front of the head
* with the same cycle number as the head. This can happen
...
...
@@ -920,11 +968,9 @@ xlog_find_tail(xlog_t *log,
* But... if the -device- itself is readonly, just skip this.
* We can't recover this device anyway, so it won't matter.
*/
if
(
!
bdev_read_only
(
log
->
l_mp
->
m_logdev_targp
->
pbr_bdev
))
{
if
(
!
xfs_readonly_buftarg
(
log
->
l_mp
->
m_logdev_targp
))
{
error
=
xlog_clear_stale_blocks
(
log
,
tail_lsn
);
}
#endif
bread_err:
exit:
...
...
@@ -932,10 +978,8 @@ xlog_find_tail(xlog_t *log,
if
(
error
)
xlog_warn
(
"XFS: failed to locate log tail"
);
return
error
;
}
/* xlog_find_tail */
}
/*
* Is the log zeroed at all?
...
...
@@ -954,22 +998,25 @@ xlog_find_tail(xlog_t *log,
* >0 => error has occurred
*/
int
xlog_find_zeroed
(
struct
log
*
log
,
xfs_daddr_t
*
blk_no
)
xlog_find_zeroed
(
xlog_t
*
log
,
xfs_daddr_t
*
blk_no
)
{
xfs_buf_t
*
bp
;
xfs_caddr_t
offset
;
uint
first_cycle
,
last_cycle
;
xfs_daddr_t
new_blk
,
last_blk
,
start_blk
;
xfs_daddr_t
num_scan_bblks
;
int
error
,
log_bbnum
=
log
->
l_logBBsize
;
/* check totally zeroed log */
bp
=
xlog_get_bp
(
1
,
log
->
l_mp
);
bp
=
xlog_get_bp
(
log
,
1
);
if
(
!
bp
)
return
ENOMEM
;
if
((
error
=
xlog_bread
(
log
,
0
,
1
,
bp
)))
goto
bp_err
;
first_cycle
=
GET_CYCLE
(
XFS_BUF_PTR
(
bp
),
ARCH_CONVERT
);
offset
=
xlog_align
(
log
,
0
,
1
,
bp
);
first_cycle
=
GET_CYCLE
(
offset
,
ARCH_CONVERT
);
if
(
first_cycle
==
0
)
{
/* completely zeroed log */
*
blk_no
=
0
;
xlog_put_bp
(
bp
);
...
...
@@ -979,7 +1026,8 @@ xlog_find_zeroed(struct log *log,
/* check partially zeroed log */
if
((
error
=
xlog_bread
(
log
,
log_bbnum
-
1
,
1
,
bp
)))
goto
bp_err
;
last_cycle
=
GET_CYCLE
(
XFS_BUF_PTR
(
bp
),
ARCH_CONVERT
);
offset
=
xlog_align
(
log
,
log_bbnum
-
1
,
1
,
bp
);
last_cycle
=
GET_CYCLE
(
offset
,
ARCH_CONVERT
);
if
(
last_cycle
!=
0
)
{
/* log completely written to */
xlog_put_bp
(
bp
);
return
0
;
...
...
@@ -1040,67 +1088,106 @@ xlog_find_zeroed(struct log *log,
if
(
error
)
return
error
;
return
-
1
;
}
/* xlog_find_zeroed */
}
/*
* Th
is is simply a subroutine
used by xlog_clear_stale_blocks() below
* Th
ese are simple subroutines
used by xlog_clear_stale_blocks() below
* to initialize a buffer full of empty log record headers and write
* them into the log.
*/
STATIC
void
xlog_add_record
(
xlog_t
*
log
,
xfs_caddr_t
buf
,
int
cycle
,
int
block
,
int
tail_cycle
,
int
tail_block
)
{
xlog_rec_header_t
*
recp
=
(
xlog_rec_header_t
*
)
buf
;
memset
(
buf
,
0
,
BBSIZE
);
INT_SET
(
recp
->
h_magicno
,
ARCH_CONVERT
,
XLOG_HEADER_MAGIC_NUM
);
INT_SET
(
recp
->
h_cycle
,
ARCH_CONVERT
,
cycle
);
INT_SET
(
recp
->
h_version
,
ARCH_CONVERT
,
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
)
?
2
:
1
);
ASSIGN_ANY_LSN
(
recp
->
h_lsn
,
cycle
,
block
,
ARCH_CONVERT
);
ASSIGN_ANY_LSN
(
recp
->
h_tail_lsn
,
tail_cycle
,
tail_block
,
ARCH_CONVERT
);
INT_SET
(
recp
->
h_fmt
,
ARCH_CONVERT
,
XLOG_FMT
);
memcpy
(
&
recp
->
h_fs_uuid
,
&
log
->
l_mp
->
m_sb
.
sb_uuid
,
sizeof
(
uuid_t
));
}
STATIC
int
xlog_write_log_records
(
xlog_t
*
log
,
int
cycle
,
int
start_block
,
int
blocks
,
int
tail_cycle
,
int
tail_block
)
xlog_t
*
log
,
int
cycle
,
int
start_block
,
int
blocks
,
int
tail_cycle
,
int
tail_block
)
{
xlog_rec_header_t
*
recp
;
int
i
,
j
;
int
end_block
=
start_block
+
blocks
;
int
error
=
0
;
xfs_buf_t
*
bp
;
char
*
buf
;
int
bufblks
;
xfs_caddr_t
offset
;
xfs_buf_t
*
bp
;
int
balign
,
ealign
;
int
sectbb
=
XLOG_SECTOR_ROUNDUP_BBCOUNT
(
log
,
1
);
int
end_block
=
start_block
+
blocks
;
int
bufblks
;
int
error
=
0
;
int
i
,
j
=
0
;
bufblks
=
1
<<
ffs
(
blocks
);
while
(
!
(
bp
=
xlog_get_bp
(
bufblks
,
log
->
l_mp
)))
{
while
(
!
(
bp
=
xlog_get_bp
(
log
,
bufblks
)))
{
bufblks
>>=
1
;
if
(
!
bufblks
)
if
(
bufblks
<=
log
->
l_sectbb_log
)
return
ENOMEM
;
}
buf
=
XFS_BUF_PTR
(
bp
);
recp
=
(
xlog_rec_header_t
*
)
buf
;
memset
(
buf
,
0
,
BBSIZE
);
INT_SET
(
recp
->
h_magicno
,
ARCH_CONVERT
,
XLOG_HEADER_MAGIC_NUM
);
INT_SET
(
recp
->
h_cycle
,
ARCH_CONVERT
,
cycle
);
INT_SET
(
recp
->
h_version
,
ARCH_CONVERT
,
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
)
?
2
:
1
);
ASSIGN_ANY_LSN
(
recp
->
h_tail_lsn
,
tail_cycle
,
tail_block
,
ARCH_CONVERT
);
/* We may need to do a read at the start to fill in part of
* the buffer in the starting sector not covered by the first
* write below.
*/
balign
=
XLOG_SECTOR_ROUNDDOWN_BLKNO
(
log
,
start_block
);
if
(
balign
!=
start_block
)
{
if
((
error
=
xlog_bread
(
log
,
start_block
,
1
,
bp
)))
{
xlog_put_bp
(
bp
);
return
error
;
}
j
=
start_block
-
balign
;
}
for
(
i
=
start_block
;
i
<
end_block
;
i
+=
bufblks
)
{
int
bcount
=
min
(
bufblks
,
end_block
-
start_block
);
/* with plenty of memory, we duplicate the block
* right through the buffer and modify each entry
int
bcount
,
endcount
;
bcount
=
min
(
bufblks
,
end_block
-
start_block
);
endcount
=
bcount
-
j
;
/* We may need to do a read at the end to fill in part of
* the buffer in the final sector not covered by the write.
* If this is the same sector as the above read, skip it.
*/
ASSIGN_ANY_LSN
(
recp
->
h_lsn
,
cycle
,
i
,
ARCH_CONVERT
);
for
(
j
=
1
;
j
<
bcount
;
j
++
)
{
buf
+=
BBSIZE
;
recp
=
(
xlog_rec_header_t
*
)
buf
;
memcpy
(
buf
,
XFS_BUF_PTR
(
bp
),
BBSIZE
);
ASSIGN_ANY_LSN
(
recp
->
h_lsn
,
cycle
,
i
+
j
,
ARCH_CONVERT
);
ealign
=
XLOG_SECTOR_ROUNDDOWN_BLKNO
(
log
,
end_block
);
if
(
j
==
0
&&
(
start_block
+
endcount
>
ealign
))
{
offset
=
XFS_BUF_PTR
(
bp
);
balign
=
BBTOB
(
ealign
-
start_block
);
XFS_BUF_SET_PTR
(
bp
,
offset
+
balign
,
BBTOB
(
sectbb
));
if
((
error
=
xlog_bread
(
log
,
ealign
,
sectbb
,
bp
)))
break
;
XFS_BUF_SET_PTR
(
bp
,
offset
,
bufblks
);
}
offset
=
xlog_align
(
log
,
start_block
,
endcount
,
bp
);
for
(;
j
<
endcount
;
j
++
)
{
xlog_add_record
(
log
,
offset
,
cycle
,
i
+
j
,
tail_cycle
,
tail_block
);
offset
+=
BBSIZE
;
}
/* then write the whole lot out at once */
error
=
xlog_bwrite
(
log
,
start_block
,
bcount
,
bp
);
start_block
+=
bcount
;
buf
=
XFS_BUF_PTR
(
bp
)
;
recp
=
(
xlog_rec_header_t
*
)
buf
;
error
=
xlog_bwrite
(
log
,
start_block
,
endcount
,
bp
);
if
(
error
)
break
;
start_block
+=
endcount
;
j
=
0
;
}
xlog_put_bp
(
bp
);
return
error
;
}
...
...
@@ -1244,10 +1331,11 @@ xlog_clear_stale_blocks(
*/
STATIC
xlog_recover_t
*
xlog_recover_find_tid
(
xlog_recover_t
*
q
,
xlog_tid_t
tid
)
xlog_recover_find_tid
(
xlog_recover_t
*
q
,
xlog_tid_t
tid
)
{
xlog_recover_t
*
p
=
q
;
xlog_recover_t
*
p
=
q
;
while
(
p
!=
NULL
)
{
if
(
p
->
r_log_tid
==
tid
)
...
...
@@ -1255,42 +1343,43 @@ xlog_recover_find_tid(xlog_recover_t *q,
p
=
p
->
r_next
;
}
return
p
;
}
/* xlog_recover_find_tid */
}
STATIC
void
xlog_recover_put_hashq
(
xlog_recover_t
**
q
,
xlog_recover_t
*
trans
)
xlog_recover_put_hashq
(
xlog_recover_t
**
q
,
xlog_recover_t
*
trans
)
{
trans
->
r_next
=
*
q
;
*
q
=
trans
;
}
/* xlog_recover_put_hashq */
}
STATIC
void
xlog_recover_add_item
(
xlog_recover_item_t
**
itemq
)
xlog_recover_add_item
(
xlog_recover_item_t
**
itemq
)
{
xlog_recover_item_t
*
item
;
xlog_recover_item_t
*
item
;
item
=
kmem_zalloc
(
sizeof
(
xlog_recover_item_t
),
0
);
xlog_recover_insert_item_backq
(
itemq
,
item
);
}
/* xlog_recover_add_item */
}
STATIC
int
xlog_recover_add_to_cont_trans
(
xlog_recover_t
*
trans
,
xfs_caddr_t
dp
,
int
len
)
xlog_recover_add_to_cont_trans
(
xlog_recover_t
*
trans
,
xfs_caddr_t
dp
,
int
len
)
{
xlog_recover_item_t
*
item
;
xfs_caddr_t
ptr
,
old_ptr
;
xfs_caddr_t
ptr
,
old_ptr
;
int
old_len
;
item
=
trans
->
r_itemq
;
if
(
item
==
0
)
{
/* finish copying rest of trans header */
xlog_recover_add_item
(
&
trans
->
r_itemq
);
ptr
=
(
xfs_caddr_t
)
&
trans
->
r_theader
+
sizeof
(
xfs_trans_header_t
)
-
len
;
ptr
=
(
xfs_caddr_t
)
&
trans
->
r_theader
+
sizeof
(
xfs_trans_header_t
)
-
len
;
memcpy
(
ptr
,
dp
,
len
);
/* d, s, l */
return
0
;
}
...
...
@@ -1304,10 +1393,10 @@ xlog_recover_add_to_cont_trans(xlog_recover_t *trans,
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_len
+=
len
;
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_addr
=
ptr
;
return
0
;
}
/* xlog_recover_add_to_cont_trans */
}
/* The next region to add is the start of a new region. It could be
/*
* The next region to add is the start of a new region. It could be
* a whole region or it could be the first part of a new region. Because
* of this, the assumption here is that the type and size fields of all
* format structures fit into the first 32 bits of the structure.
...
...
@@ -1320,13 +1409,14 @@ xlog_recover_add_to_cont_trans(xlog_recover_t *trans,
* will appear in the current log item.
*/
STATIC
int
xlog_recover_add_to_trans
(
xlog_recover_t
*
trans
,
xfs_caddr_t
dp
,
int
len
)
xlog_recover_add_to_trans
(
xlog_recover_t
*
trans
,
xfs_caddr_t
dp
,
int
len
)
{
xfs_inode_log_format_t
*
in_f
;
/* any will do */
xlog_recover_item_t
*
item
;
xfs_caddr_t
ptr
;
xfs_inode_log_format_t
*
in_f
;
/* any will do */
xlog_recover_item_t
*
item
;
xfs_caddr_t
ptr
;
if
(
!
len
)
return
0
;
...
...
@@ -1339,7 +1429,7 @@ xlog_recover_add_to_trans(xlog_recover_t *trans,
return
0
;
}
ptr
=
kmem_alloc
(
len
,
0
);
ptr
=
kmem_alloc
(
len
,
KM_SLEEP
);
memcpy
(
ptr
,
dp
,
len
);
in_f
=
(
xfs_inode_log_format_t
*
)
ptr
;
...
...
@@ -1362,29 +1452,29 @@ xlog_recover_add_to_trans(xlog_recover_t *trans,
item
->
ri_buf
[
item
->
ri_cnt
].
i_len
=
len
;
item
->
ri_cnt
++
;
return
0
;
}
/* xlog_recover_add_to_trans */
}
STATIC
void
xlog_recover_new_tid
(
xlog_recover_t
**
q
,
xlog_tid_t
tid
,
xfs_lsn_t
lsn
)
xlog_recover_new_tid
(
xlog_recover_t
**
q
,
xlog_tid_t
tid
,
xfs_lsn_t
lsn
)
{
xlog_recover_t
*
trans
;
xlog_recover_t
*
trans
;
trans
=
kmem_zalloc
(
sizeof
(
xlog_recover_t
),
0
);
trans
=
kmem_zalloc
(
sizeof
(
xlog_recover_t
),
KM_SLEEP
);
trans
->
r_log_tid
=
tid
;
trans
->
r_lsn
=
lsn
;
xlog_recover_put_hashq
(
q
,
trans
);
}
/* xlog_recover_new_tid */
}
STATIC
int
xlog_recover_unlink_tid
(
xlog_recover_t
**
q
,
xlog_recover_t
*
trans
)
xlog_recover_unlink_tid
(
xlog_recover_t
**
q
,
xlog_recover_t
*
trans
)
{
xlog_recover_t
*
tp
;
int
found
=
0
;
xlog_recover_t
*
tp
;
int
found
=
0
;
ASSERT
(
trans
!=
0
);
if
(
trans
==
*
q
)
{
...
...
@@ -1407,11 +1497,12 @@ xlog_recover_unlink_tid(xlog_recover_t **q,
tp
->
r_next
=
tp
->
r_next
->
r_next
;
}
return
0
;
}
/* xlog_recover_unlink_tid */
}
STATIC
void
xlog_recover_insert_item_backq
(
xlog_recover_item_t
**
q
,
xlog_recover_item_t
*
item
)
xlog_recover_insert_item_backq
(
xlog_recover_item_t
**
q
,
xlog_recover_item_t
*
item
)
{
if
(
*
q
==
0
)
{
item
->
ri_prev
=
item
->
ri_next
=
item
;
...
...
@@ -1422,55 +1513,53 @@ xlog_recover_insert_item_backq(xlog_recover_item_t **q,
(
*
q
)
->
ri_prev
=
item
;
item
->
ri_prev
->
ri_next
=
item
;
}
}
/* xlog_recover_insert_item_backq */
}
STATIC
void
xlog_recover_insert_item_frontq
(
xlog_recover_item_t
**
q
,
xlog_recover_item_t
*
item
)
xlog_recover_insert_item_frontq
(
xlog_recover_item_t
**
q
,
xlog_recover_item_t
*
item
)
{
xlog_recover_insert_item_backq
(
q
,
item
);
*
q
=
item
;
}
/* xlog_recover_insert_item_frontq */
}
STATIC
int
xlog_recover_reorder_trans
(
xlog_t
*
log
,
xlog_recover_t
*
trans
)
xlog_recover_reorder_trans
(
xlog_t
*
log
,
xlog_recover_t
*
trans
)
{
xlog_recover_item_t
*
first_item
,
*
itemq
,
*
itemq_next
;
first_item
=
itemq
=
trans
->
r_itemq
;
trans
->
r_itemq
=
NULL
;
do
{
itemq_next
=
itemq
->
ri_next
;
switch
(
ITEM_TYPE
(
itemq
))
{
case
XFS_LI_BUF
:
case
XFS_LI_6_1_BUF
:
case
XFS_LI_5_3_BUF
:
{
xlog_recover_insert_item_frontq
(
&
trans
->
r_itemq
,
itemq
);
break
;
}
case
XFS_LI_INODE
:
case
XFS_LI_6_1_INODE
:
case
XFS_LI_5_3_INODE
:
case
XFS_LI_DQUOT
:
case
XFS_LI_QUOTAOFF
:
case
XFS_LI_EFD
:
case
XFS_LI_EFI
:
{
xlog_recover_insert_item_backq
(
&
trans
->
r_itemq
,
itemq
);
break
;
}
default:
{
xlog_warn
(
"XFS: xlog_recover_reorder_trans: unrecognized type of log operation"
);
ASSERT
(
0
);
return
XFS_ERROR
(
EIO
);
}
}
itemq
=
itemq_next
;
}
while
(
first_item
!=
itemq
);
return
0
;
}
/* xlog_recover_reorder_trans */
xlog_recover_item_t
*
first_item
,
*
itemq
,
*
itemq_next
;
first_item
=
itemq
=
trans
->
r_itemq
;
trans
->
r_itemq
=
NULL
;
do
{
itemq_next
=
itemq
->
ri_next
;
switch
(
ITEM_TYPE
(
itemq
))
{
case
XFS_LI_BUF
:
case
XFS_LI_6_1_BUF
:
case
XFS_LI_5_3_BUF
:
xlog_recover_insert_item_frontq
(
&
trans
->
r_itemq
,
itemq
);
break
;
case
XFS_LI_INODE
:
case
XFS_LI_6_1_INODE
:
case
XFS_LI_5_3_INODE
:
case
XFS_LI_DQUOT
:
case
XFS_LI_QUOTAOFF
:
case
XFS_LI_EFD
:
case
XFS_LI_EFI
:
xlog_recover_insert_item_backq
(
&
trans
->
r_itemq
,
itemq
);
break
;
default:
xlog_warn
(
"XFS: xlog_recover_reorder_trans: unrecognized type of log operation"
);
ASSERT
(
0
);
return
XFS_ERROR
(
EIO
);
}
itemq
=
itemq_next
;
}
while
(
first_item
!=
itemq
);
return
0
;
}
/*
* Build up the table of buf cancel records so that we don't replay
...
...
@@ -1485,17 +1574,18 @@ xlog_recover_reorder_trans(xlog_t *log,
* record during the second pass.
*/
STATIC
void
xlog_recover_do_buffer_pass1
(
xlog_t
*
log
,
xfs_buf_log_format_t
*
buf_f
)
xlog_recover_do_buffer_pass1
(
xlog_t
*
log
,
xfs_buf_log_format_t
*
buf_f
)
{
xfs_buf_cancel_t
*
bcp
;
xfs_buf_cancel_t
*
nextp
;
xfs_buf_cancel_t
*
prevp
;
xfs_buf_cancel_t
**
bucket
;
xfs_buf_log_format_v1_t
*
obuf_f
;
xfs_daddr_t
blkno
=
0
;
uint
len
=
0
;
ushort
flags
=
0
;
xfs_daddr_t
blkno
=
0
;
uint
len
=
0
;
ushort
flags
=
0
;
switch
(
buf_f
->
blf_type
)
{
case
XFS_LI_BUF
:
...
...
@@ -1515,9 +1605,8 @@ xlog_recover_do_buffer_pass1(xlog_t *log,
/*
* If this isn't a cancel buffer item, then just return.
*/
if
(
!
(
flags
&
XFS_BLI_CANCEL
))
{
if
(
!
(
flags
&
XFS_BLI_CANCEL
))
return
;
}
/*
* Insert an xfs_buf_cancel record into the hash table of
...
...
@@ -1531,8 +1620,8 @@ xlog_recover_do_buffer_pass1(xlog_t *log,
* the bucket.
*/
if
(
*
bucket
==
NULL
)
{
bcp
=
(
xfs_buf_cancel_t
*
)
kmem_alloc
(
sizeof
(
xfs_buf_cancel_t
),
KM_SLEEP
);
bcp
=
(
xfs_buf_cancel_t
*
)
kmem_alloc
(
sizeof
(
xfs_buf_cancel_t
),
KM_SLEEP
);
bcp
->
bc_blkno
=
blkno
;
bcp
->
bc_len
=
len
;
bcp
->
bc_refcount
=
1
;
...
...
@@ -1557,8 +1646,8 @@ xlog_recover_do_buffer_pass1(xlog_t *log,
nextp
=
nextp
->
bc_next
;
}
ASSERT
(
prevp
!=
NULL
);
bcp
=
(
xfs_buf_cancel_t
*
)
kmem_alloc
(
sizeof
(
xfs_buf_cancel_t
),
KM_SLEEP
);
bcp
=
(
xfs_buf_cancel_t
*
)
kmem_alloc
(
sizeof
(
xfs_buf_cancel_t
),
KM_SLEEP
);
bcp
->
bc_blkno
=
blkno
;
bcp
->
bc_len
=
len
;
bcp
->
bc_refcount
=
1
;
...
...
@@ -1580,17 +1669,17 @@ xlog_recover_do_buffer_pass1(xlog_t *log,
* made at that point.
*/
STATIC
int
xlog_recover_do_buffer_pass2
(
xlog_t
*
log
,
xfs_buf_log_format_t
*
buf_f
)
xlog_recover_do_buffer_pass2
(
xlog_t
*
log
,
xfs_buf_log_format_t
*
buf_f
)
{
xfs_buf_cancel_t
*
bcp
;
xfs_buf_cancel_t
*
prevp
;
xfs_buf_cancel_t
**
bucket
;
xfs_buf_log_format_v1_t
*
obuf_f
;
xfs_daddr_t
blkno
=
0
;
ushort
flags
=
0
;
uint
len
=
0
;
xfs_daddr_t
blkno
=
0
;
ushort
flags
=
0
;
uint
len
=
0
;
switch
(
buf_f
->
blf_type
)
{
case
XFS_LI_BUF
:
...
...
@@ -1667,7 +1756,6 @@ xlog_recover_do_buffer_pass2(xlog_t *log,
return
0
;
}
/*
* Perform recovery for a buffer full of inodes. In these buffers,
* the only data which should be recovered is that which corresponds
...
...
@@ -1682,10 +1770,11 @@ xlog_recover_do_buffer_pass2(xlog_t *log,
* sent to xlog_recover_do_reg_buffer() below during recovery.
*/
STATIC
int
xlog_recover_do_inode_buffer
(
xfs_mount_t
*
mp
,
xlog_recover_item_t
*
item
,
xfs_buf_t
*
bp
,
xfs_buf_log_format_t
*
buf_f
)
xlog_recover_do_inode_buffer
(
xfs_mount_t
*
mp
,
xlog_recover_item_t
*
item
,
xfs_buf_t
*
bp
,
xfs_buf_log_format_t
*
buf_f
)
{
int
i
;
int
item_index
;
...
...
@@ -1698,8 +1787,8 @@ xlog_recover_do_inode_buffer(xfs_mount_t *mp,
xfs_agino_t
*
logged_nextp
;
xfs_agino_t
*
buffer_nextp
;
xfs_buf_log_format_v1_t
*
obuf_f
;
unsigned
int
*
data_map
=
NULL
;
unsigned
int
map_size
=
0
;
unsigned
int
*
data_map
=
NULL
;
unsigned
int
map_size
=
0
;
switch
(
buf_f
->
blf_type
)
{
case
XFS_LI_BUF
:
...
...
@@ -1790,7 +1879,7 @@ xlog_recover_do_inode_buffer(xfs_mount_t *mp,
}
return
0
;
}
/* xlog_recover_do_inode_buffer */
}
/*
* Perform a 'normal' buffer recovery. Each logged region of the
...
...
@@ -1800,17 +1889,18 @@ xlog_recover_do_inode_buffer(xfs_mount_t *mp,
*/
/*ARGSUSED*/
STATIC
void
xlog_recover_do_reg_buffer
(
xfs_mount_t
*
mp
,
xlog_recover_item_t
*
item
,
xfs_buf_t
*
bp
,
xfs_buf_log_format_t
*
buf_f
)
xlog_recover_do_reg_buffer
(
xfs_mount_t
*
mp
,
xlog_recover_item_t
*
item
,
xfs_buf_t
*
bp
,
xfs_buf_log_format_t
*
buf_f
)
{
int
i
;
int
bit
;
int
nbits
;
xfs_buf_log_format_v1_t
*
obuf_f
;
unsigned
int
*
data_map
=
NULL
;
unsigned
int
map_size
=
0
;
unsigned
int
*
data_map
=
NULL
;
unsigned
int
map_size
=
0
;
int
error
;
switch
(
buf_f
->
blf_type
)
{
...
...
@@ -1860,7 +1950,7 @@ xlog_recover_do_reg_buffer(xfs_mount_t *mp,
/* Shouldn't be any more regions */
ASSERT
(
i
==
item
->
ri_total
);
}
/* xlog_recover_do_reg_buffer */
}
/*
* Do some primitive error checking on ondisk dquot data structures.
...
...
@@ -1991,7 +2081,7 @@ xlog_recover_do_dquot_buffer(
xfs_buf_t
*
bp
,
xfs_buf_log_format_t
*
buf_f
)
{
uint
type
;
uint
type
;
/*
* Filesystems are required to send in quota flags at mount time.
...
...
@@ -2038,9 +2128,10 @@ xlog_recover_do_dquot_buffer(
* for more details on the implementation of the table of cancel records.
*/
STATIC
int
xlog_recover_do_buffer_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
xlog_recover_do_buffer_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
{
xfs_buf_log_format_t
*
buf_f
;
xfs_buf_log_format_v1_t
*
obuf_f
;
...
...
@@ -2075,19 +2166,19 @@ xlog_recover_do_buffer_trans(xlog_t *log,
}
}
switch
(
buf_f
->
blf_type
)
{
case
XFS_LI_BUF
:
case
XFS_LI_BUF
:
blkno
=
buf_f
->
blf_blkno
;
len
=
buf_f
->
blf_len
;
flags
=
buf_f
->
blf_flags
;
break
;
case
XFS_LI_6_1_BUF
:
case
XFS_LI_5_3_BUF
:
case
XFS_LI_6_1_BUF
:
case
XFS_LI_5_3_BUF
:
obuf_f
=
(
xfs_buf_log_format_v1_t
*
)
buf_f
;
blkno
=
obuf_f
->
blf_blkno
;
len
=
obuf_f
->
blf_len
;
flags
=
obuf_f
->
blf_flags
;
break
;
default:
default:
xfs_fs_cmn_err
(
CE_ALERT
,
log
->
l_mp
,
"xfs_log_recover: unknown buffer type 0x%x, dev 0x%x"
,
buf_f
->
blf_type
,
log
->
l_dev
);
...
...
@@ -2152,12 +2243,13 @@ xlog_recover_do_buffer_trans(xlog_t *log,
}
return
(
error
);
}
/* xlog_recover_do_buffer_trans */
}
STATIC
int
xlog_recover_do_inode_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
xlog_recover_do_inode_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
{
xfs_inode_log_format_t
*
in_f
;
xfs_mount_t
*
mp
;
...
...
@@ -2377,7 +2469,6 @@ xlog_recover_do_inode_trans(xlog_t *log,
}
}
write_inode_buffer:
if
(
ITEM_TYPE
(
item
)
==
XFS_LI_INODE
)
{
ASSERT
(
XFS_BUF_FSPRIVATE
(
bp
,
void
*
)
==
NULL
||
...
...
@@ -2391,8 +2482,7 @@ xlog_recover_do_inode_trans(xlog_t *log,
}
return
(
error
);
}
/* xlog_recover_do_inode_trans */
}
/*
* Recover QUOTAOFF records. We simply make a note of it in the xlog_t
...
...
@@ -2400,11 +2490,12 @@ xlog_recover_do_inode_trans(xlog_t *log,
* of that type.
*/
STATIC
int
xlog_recover_do_quotaoff_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
xlog_recover_do_quotaoff_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
{
xfs_qoff_logformat_t
*
qoff_f
;
xfs_qoff_logformat_t
*
qoff_f
;
if
(
pass
==
XLOG_RECOVER_PASS2
)
{
return
(
0
);
...
...
@@ -2425,14 +2516,14 @@ xlog_recover_do_quotaoff_trans(xlog_t *log,
return
(
0
);
}
/*
* Recover a dquot record
*/
STATIC
int
xlog_recover_do_dquot_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
xlog_recover_do_dquot_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
{
xfs_mount_t
*
mp
;
xfs_buf_t
*
bp
;
...
...
@@ -2516,7 +2607,7 @@ xlog_recover_do_dquot_trans(xlog_t *log,
xfs_bdwrite
(
mp
,
bp
);
return
(
0
);
}
/* xlog_recover_do_dquot_trans */
}
/*
* This routine is called to create an in-core extent free intent
...
...
@@ -2526,10 +2617,11 @@ xlog_recover_do_dquot_trans(xlog_t *log,
* LSN.
*/
STATIC
void
xlog_recover_do_efi_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
xfs_lsn_t
lsn
,
int
pass
)
xlog_recover_do_efi_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
xfs_lsn_t
lsn
,
int
pass
)
{
xfs_mount_t
*
mp
;
xfs_efi_log_item_t
*
efip
;
...
...
@@ -2558,7 +2650,7 @@ xlog_recover_do_efi_trans(xlog_t *log,
* xfs_trans_update_ail() drops the AIL lock.
*/
xfs_trans_update_ail
(
mp
,
(
xfs_log_item_t
*
)
efip
,
lsn
,
s
);
}
/* xlog_recover_do_efi_trans */
}
/*
...
...
@@ -2570,13 +2662,14 @@ xlog_recover_do_efi_trans(xlog_t *log,
* AIL and free it.
*/
STATIC
void
xlog_recover_do_efd_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
xlog_recover_do_efd_trans
(
xlog_t
*
log
,
xlog_recover_item_t
*
item
,
int
pass
)
{
xfs_mount_t
*
mp
;
xfs_efd_log_format_t
*
efd_formatp
;
xfs_efi_log_item_t
*
efip
=
NULL
;
xfs_efi_log_item_t
*
efip
=
NULL
;
xfs_log_item_t
*
lip
;
int
gen
;
int
nexts
;
...
...
@@ -2629,9 +2722,9 @@ xlog_recover_do_efd_trans(xlog_t *log,
((
nexts
-
1
)
*
sizeof
(
xfs_extent_t
)));
}
else
{
kmem_zone_free
(
xfs_efi_zone
,
efip
);
}
}
}
}
/* xlog_recover_do_efd_trans */
}
/*
* Perform the transaction
...
...
@@ -2640,12 +2733,13 @@ xlog_recover_do_efd_trans(xlog_t *log,
* EFIs and EFDs get queued up by adding entries into the AIL for them.
*/
STATIC
int
xlog_recover_do_trans
(
xlog_t
*
log
,
xlog_recover_t
*
trans
,
int
pass
)
xlog_recover_do_trans
(
xlog_t
*
log
,
xlog_recover_t
*
trans
,
int
pass
)
{
int
error
=
0
;
xlog_recover_item_t
*
item
,
*
first_item
;
int
error
=
0
;
xlog_recover_item_t
*
item
,
*
first_item
;
if
((
error
=
xlog_recover_reorder_trans
(
log
,
trans
)))
return
error
;
...
...
@@ -2695,8 +2789,7 @@ xlog_recover_do_trans(xlog_t *log,
}
while
(
first_item
!=
item
);
return
error
;
}
/* xlog_recover_do_trans */
}
/*
* Free up any resources allocated by the transaction
...
...
@@ -2704,10 +2797,11 @@ xlog_recover_do_trans(xlog_t *log,
* Remember that EFIs, EFDs, and IUNLINKs are handled later.
*/
STATIC
void
xlog_recover_free_trans
(
xlog_recover_t
*
trans
)
xlog_recover_free_trans
(
xlog_recover_t
*
trans
)
{
xlog_recover_item_t
*
first_item
,
*
item
,
*
free_item
;
int
i
;
xlog_recover_item_t
*
first_item
,
*
item
,
*
free_item
;
int
i
;
item
=
first_item
=
trans
->
r_itemq
;
do
{
...
...
@@ -2725,16 +2819,16 @@ xlog_recover_free_trans(xlog_recover_t *trans)
}
while
(
first_item
!=
item
);
/* Free the transaction recover structure */
kmem_free
(
trans
,
sizeof
(
xlog_recover_t
));
}
/* xlog_recover_free_trans */
}
STATIC
int
xlog_recover_commit_trans
(
xlog_t
*
log
,
xlog_recover_t
**
q
,
xlog_recover_t
*
trans
,
int
pass
)
xlog_recover_commit_trans
(
xlog_t
*
log
,
xlog_recover_t
**
q
,
xlog_recover_t
*
trans
,
int
pass
)
{
int
error
;
int
error
;
if
((
error
=
xlog_recover_unlink_tid
(
q
,
trans
)))
return
error
;
...
...
@@ -2742,18 +2836,16 @@ xlog_recover_commit_trans(xlog_t *log,
return
error
;
xlog_recover_free_trans
(
trans
);
/* no error */
return
0
;
}
/* xlog_recover_commit_trans */
}
/*ARGSUSED*/
STATIC
int
xlog_recover_unmount_trans
(
xlog_recover_t
*
trans
)
xlog_recover_unmount_trans
(
xlog_recover_t
*
trans
)
{
/* Do nothing now */
xlog_warn
(
"XFS: xlog_recover_unmount_trans: Unmount LR"
);
return
(
0
);
}
/* xlog_recover_unmount_trans */
return
0
;
}
/*
* There are two valid states of the r_state field. 0 indicates that the
...
...
@@ -2765,97 +2857,101 @@ xlog_recover_unmount_trans(xlog_recover_t *trans)
* NOTE: skip LRs with 0 data length.
*/
STATIC
int
xlog_recover_process_data
(
xlog_t
*
log
,
xlog_recover_t
*
rhash
[],
xlog_rec_header_t
*
rhead
,
xfs_caddr_t
dp
,
int
pass
)
xlog_recover_process_data
(
xlog_t
*
log
,
xlog_recover_t
*
rhash
[],
xlog_rec_header_t
*
rhead
,
xfs_caddr_t
dp
,
int
pass
)
{
xfs_caddr_t
lp
=
dp
+
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
);
int
num_logops
=
INT_GET
(
rhead
->
h_num_logops
,
ARCH_CONVERT
);
xlog_op_header_t
*
ohead
;
xlog_recover_t
*
trans
;
xlog_tid_t
tid
;
int
error
;
unsigned
long
hash
;
uint
flags
;
/* check the log format matches our own - else we can't recover */
if
(
xlog_header_check_recover
(
log
->
l_mp
,
rhead
))
return
(
XFS_ERROR
(
EIO
));
while
((
dp
<
lp
)
&&
num_logops
)
{
ASSERT
(
dp
+
sizeof
(
xlog_op_header_t
)
<=
lp
);
ohead
=
(
xlog_op_header_t
*
)
dp
;
dp
+=
sizeof
(
xlog_op_header_t
);
if
(
ohead
->
oh_clientid
!=
XFS_TRANSACTION
&&
ohead
->
oh_clientid
!=
XFS_LOG
)
{
xlog_warn
(
"XFS: xlog_recover_process_data: bad clientid"
);
ASSERT
(
0
);
return
(
XFS_ERROR
(
EIO
));
}
tid
=
INT_GET
(
ohead
->
oh_tid
,
ARCH_CONVERT
);
hash
=
XLOG_RHASH
(
tid
);
trans
=
xlog_recover_find_tid
(
rhash
[
hash
],
tid
);
if
(
trans
==
NULL
)
{
/* not found; add new tid */
if
(
ohead
->
oh_flags
&
XLOG_START_TRANS
)
xlog_recover_new_tid
(
&
rhash
[
hash
],
tid
,
INT_GET
(
rhead
->
h_lsn
,
ARCH_CONVERT
));
}
else
{
ASSERT
(
dp
+
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
)
<=
lp
);
flags
=
ohead
->
oh_flags
&
~
XLOG_END_TRANS
;
if
(
flags
&
XLOG_WAS_CONT_TRANS
)
flags
&=
~
XLOG_CONTINUE_TRANS
;
switch
(
flags
)
{
case
XLOG_COMMIT_TRANS
:
{
error
=
xlog_recover_commit_trans
(
log
,
&
rhash
[
hash
],
trans
,
pass
);
break
;
}
case
XLOG_UNMOUNT_TRANS
:
{
error
=
xlog_recover_unmount_trans
(
trans
);
break
;
}
case
XLOG_WAS_CONT_TRANS
:
{
error
=
xlog_recover_add_to_cont_trans
(
trans
,
dp
,
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
));
break
;
}
case
XLOG_START_TRANS
:
{
xlog_warn
(
"XFS: xlog_recover_process_data: bad transaction"
);
ASSERT
(
0
);
error
=
XFS_ERROR
(
EIO
);
break
;
}
case
0
:
case
XLOG_CONTINUE_TRANS
:
{
error
=
xlog_recover_add_to_trans
(
trans
,
dp
,
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
));
break
;
xfs_caddr_t
lp
;
int
num_logops
;
xlog_op_header_t
*
ohead
;
xlog_recover_t
*
trans
;
xlog_tid_t
tid
;
int
error
;
unsigned
long
hash
;
uint
flags
;
lp
=
dp
+
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
);
num_logops
=
INT_GET
(
rhead
->
h_num_logops
,
ARCH_CONVERT
);
/* check the log format matches our own - else we can't recover */
if
(
xlog_header_check_recover
(
log
->
l_mp
,
rhead
))
return
(
XFS_ERROR
(
EIO
));
while
((
dp
<
lp
)
&&
num_logops
)
{
ASSERT
(
dp
+
sizeof
(
xlog_op_header_t
)
<=
lp
);
ohead
=
(
xlog_op_header_t
*
)
dp
;
dp
+=
sizeof
(
xlog_op_header_t
);
if
(
ohead
->
oh_clientid
!=
XFS_TRANSACTION
&&
ohead
->
oh_clientid
!=
XFS_LOG
)
{
xlog_warn
(
"XFS: xlog_recover_process_data: bad clientid"
);
ASSERT
(
0
);
return
(
XFS_ERROR
(
EIO
));
}
default:
{
xlog_warn
(
"XFS: xlog_recover_process_data: bad flag"
);
ASSERT
(
0
);
error
=
XFS_ERROR
(
EIO
);
break
;
tid
=
INT_GET
(
ohead
->
oh_tid
,
ARCH_CONVERT
);
hash
=
XLOG_RHASH
(
tid
);
trans
=
xlog_recover_find_tid
(
rhash
[
hash
],
tid
);
if
(
trans
==
NULL
)
{
/* not found; add new tid */
if
(
ohead
->
oh_flags
&
XLOG_START_TRANS
)
xlog_recover_new_tid
(
&
rhash
[
hash
],
tid
,
INT_GET
(
rhead
->
h_lsn
,
ARCH_CONVERT
));
}
else
{
ASSERT
(
dp
+
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
)
<=
lp
);
flags
=
ohead
->
oh_flags
&
~
XLOG_END_TRANS
;
if
(
flags
&
XLOG_WAS_CONT_TRANS
)
flags
&=
~
XLOG_CONTINUE_TRANS
;
switch
(
flags
)
{
case
XLOG_COMMIT_TRANS
:
error
=
xlog_recover_commit_trans
(
log
,
&
rhash
[
hash
],
trans
,
pass
);
break
;
case
XLOG_UNMOUNT_TRANS
:
error
=
xlog_recover_unmount_trans
(
trans
);
break
;
case
XLOG_WAS_CONT_TRANS
:
error
=
xlog_recover_add_to_cont_trans
(
trans
,
dp
,
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
));
break
;
case
XLOG_START_TRANS
:
xlog_warn
(
"XFS: xlog_recover_process_data: bad transaction"
);
ASSERT
(
0
);
error
=
XFS_ERROR
(
EIO
);
break
;
case
0
:
case
XLOG_CONTINUE_TRANS
:
error
=
xlog_recover_add_to_trans
(
trans
,
dp
,
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
));
break
;
default:
xlog_warn
(
"XFS: xlog_recover_process_data: bad flag"
);
ASSERT
(
0
);
error
=
XFS_ERROR
(
EIO
);
break
;
}
if
(
error
)
return
error
;
}
}
/* switch */
if
(
error
)
return
error
;
}
/* if */
dp
+=
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
);
num_logops
--
;
}
return
(
0
);
}
/* xlog_recover_process_data */
dp
+=
INT_GET
(
ohead
->
oh_len
,
ARCH_CONVERT
);
num_logops
--
;
}
return
0
;
}
/*
* Process an extent free intent item that was recovered from
* the log. We need to free the extents that it describes.
*/
STATIC
void
xlog_recover_process_efi
(
xfs_mount_t
*
mp
,
xfs_efi_log_item_t
*
efip
)
xlog_recover_process_efi
(
xfs_mount_t
*
mp
,
xfs_efi_log_item_t
*
efip
)
{
xfs_efd_log_item_t
*
efdp
;
xfs_trans_t
*
tp
;
...
...
@@ -2900,8 +2996,7 @@ xlog_recover_process_efi(xfs_mount_t *mp,
efip
->
efi_flags
|=
XFS_EFI_RECOVERED
;
xfs_trans_commit
(
tp
,
0
,
NULL
);
}
/* xlog_recover_process_efi */
}
/*
* Verify that once we've encountered something other than an EFI
...
...
@@ -2909,13 +3004,13 @@ xlog_recover_process_efi(xfs_mount_t *mp,
*/
#if defined(DEBUG)
STATIC
void
xlog_recover_check_ail
(
xfs_mount_t
*
mp
,
xfs_log_item_t
*
lip
,
int
gen
)
xlog_recover_check_ail
(
xfs_mount_t
*
mp
,
xfs_log_item_t
*
lip
,
int
gen
)
{
int
orig_
gen
;
int
orig_gen
=
gen
;
orig_gen
=
gen
;
do
{
ASSERT
(
lip
->
li_type
!=
XFS_LI_EFI
);
lip
=
xfs_trans_next_ail
(
mp
,
lip
,
&
gen
,
NULL
);
...
...
@@ -2930,7 +3025,6 @@ xlog_recover_check_ail(xfs_mount_t *mp,
}
#endif
/* DEBUG */
/*
* When this is called, all of the EFIs which did not have
* corresponding EFDs should be in the AIL. What we do now
...
...
@@ -2950,7 +3044,8 @@ xlog_recover_check_ail(xfs_mount_t *mp,
* we see something other than an EFI in the AIL.
*/
STATIC
void
xlog_recover_process_efis
(
xlog_t
*
log
)
xlog_recover_process_efis
(
xlog_t
*
log
)
{
xfs_log_item_t
*
lip
;
xfs_efi_log_item_t
*
efip
;
...
...
@@ -2986,8 +3081,7 @@ xlog_recover_process_efis(xlog_t *log)
lip
=
xfs_trans_next_ail
(
mp
,
lip
,
&
gen
,
NULL
);
}
AIL_UNLOCK
(
mp
,
s
);
}
/* xlog_recover_process_efis */
}
/*
* This routine performs a transaction to null out a bad inode pointer
...
...
@@ -3030,8 +3124,7 @@ xlog_recover_clear_agi_bucket(
(
offset
+
sizeof
(
xfs_agino_t
)
-
1
));
(
void
)
xfs_trans_commit
(
tp
,
0
,
NULL
);
}
/* xlog_recover_clear_agi_bucket */
}
/*
* xlog_iunlink_recover
...
...
@@ -3046,7 +3139,8 @@ xlog_recover_clear_agi_bucket(
* atomic.
*/
void
xlog_recover_process_iunlinks
(
xlog_t
*
log
)
xlog_recover_process_iunlinks
(
xlog_t
*
log
)
{
xfs_mount_t
*
mp
;
xfs_agnumber_t
agno
;
...
...
@@ -3188,40 +3282,47 @@ xlog_recover_process_iunlinks(xlog_t *log)
}
mp
->
m_dmevmask
=
mp_dmevmask
;
}
}
/* xlog_recover_process_iunlinks */
/*
* Stamp cycle number in every block
*
* This routine is also called in xfs_log.c
*/
/*ARGSUSED*/
void
xlog_pack_data
(
xlog_t
*
log
,
xlog_in_core_t
*
iclog
)
{
int
i
,
j
,
k
;
int
size
=
iclog
->
ic_offset
+
iclog
->
ic_roundoff
;
xfs_caddr_t
dp
;
union
ich
{
xlog_rec_ext_header_t
hic_xheader
;
char
hic_sector
[
XLOG_HEADER_SIZE
];
}
*
xhdr
;
uint
cycle_lsn
;
#ifdef DEBUG
uint
*
up
;
uint
chksum
=
0
;
STATIC
void
xlog_pack_data_checksum
(
xlog_t
*
log
,
xlog_in_core_t
*
iclog
,
int
size
)
{
int
i
;
uint
*
up
;
uint
chksum
=
0
;
up
=
(
uint
*
)
iclog
->
ic_datap
;
/* divide length by 4 to get # words */
for
(
i
=
0
;
i
<
size
>>
2
;
i
++
)
{
for
(
i
=
0
;
i
<
(
size
>>
2
)
;
i
++
)
{
chksum
^=
INT_GET
(
*
up
,
ARCH_CONVERT
);
up
++
;
}
INT_SET
(
iclog
->
ic_header
.
h_chksum
,
ARCH_CONVERT
,
chksum
);
#endif
/* DEBUG */
}
#else
#define xlog_pack_data_checksum(log, iclog, size)
#endif
/*
* Stamp cycle number in every block
*/
void
xlog_pack_data
(
xlog_t
*
log
,
xlog_in_core_t
*
iclog
)
{
int
i
,
j
,
k
;
int
size
=
iclog
->
ic_offset
+
iclog
->
ic_roundoff
;
uint
cycle_lsn
;
xfs_caddr_t
dp
;
xlog_in_core_2_t
*
xhdr
;
xlog_pack_data_checksum
(
log
,
iclog
,
size
);
cycle_lsn
=
CYCLE_LSN_NOCONV
(
iclog
->
ic_header
.
h_lsn
,
ARCH_CONVERT
);
...
...
@@ -3234,7 +3335,7 @@ xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog)
}
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
xhdr
=
(
union
ich
*
)
&
iclog
->
ic_header
;
xhdr
=
(
xlog_in_core_2_t
*
)
&
iclog
->
ic_header
;
for
(
;
i
<
BTOBB
(
size
);
i
++
)
{
j
=
i
/
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
k
=
i
%
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
...
...
@@ -3247,45 +3348,18 @@ xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog)
xhdr
[
i
].
hic_xheader
.
xh_cycle
=
cycle_lsn
;
}
}
}
}
/* xlog_pack_data */
/*ARGSUSED*/
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
STATIC
void
xlog_unpack_data
(
xlog_rec_header_t
*
rhead
,
xfs_caddr_t
dp
,
xlog_t
*
log
)
xlog_unpack_data_checksum
(
xlog_rec_header_t
*
rhead
,
xfs_caddr_t
dp
,
xlog_t
*
log
)
{
int
i
,
j
,
k
;
union
ich
{
xlog_rec_header_t
hic_header
;
xlog_rec_ext_header_t
hic_xheader
;
char
hic_sector
[
XLOG_HEADER_SIZE
];
}
*
xhdr
;
uint
*
up
=
(
uint
*
)
dp
;
uint
chksum
=
0
;
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
uint
*
up
=
(
uint
*
)
dp
;
uint
chksum
=
0
;
#endif
for
(
i
=
0
;
i
<
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
))
&&
i
<
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
i
++
)
{
*
(
uint
*
)
dp
=
*
(
uint
*
)
&
rhead
->
h_cycle_data
[
i
];
dp
+=
BBSIZE
;
}
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
xhdr
=
(
union
ich
*
)
rhead
;
for
(
;
i
<
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
i
++
)
{
j
=
i
/
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
k
=
i
%
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
*
(
uint
*
)
dp
=
xhdr
[
j
].
hic_xheader
.
xh_cycle_data
[
k
];
dp
+=
BBSIZE
;
}
}
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
/* divide length by 4 to get # words */
for
(
i
=
0
;
i
<
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
>>
2
;
i
++
)
{
chksum
^=
INT_GET
(
*
up
,
ARCH_CONVERT
);
...
...
@@ -3306,9 +3380,38 @@ xlog_unpack_data(xlog_rec_header_t *rhead,
log
->
l_flags
|=
XLOG_CHKSUM_MISMATCH
;
}
}
#endif
/* DEBUG && XFS_LOUD_RECOVERY */
}
/* xlog_unpack_data */
}
#else
#define xlog_unpack_data_checksum(rhead, dp, log)
#endif
STATIC
void
xlog_unpack_data
(
xlog_rec_header_t
*
rhead
,
xfs_caddr_t
dp
,
xlog_t
*
log
)
{
int
i
,
j
,
k
;
xlog_in_core_2_t
*
xhdr
;
for
(
i
=
0
;
i
<
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
))
&&
i
<
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
i
++
)
{
*
(
uint
*
)
dp
=
*
(
uint
*
)
&
rhead
->
h_cycle_data
[
i
];
dp
+=
BBSIZE
;
}
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
xhdr
=
(
xlog_in_core_2_t
*
)
rhead
;
for
(
;
i
<
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
i
++
)
{
j
=
i
/
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
k
=
i
%
(
XLOG_HEADER_CYCLE_SIZE
/
BBSIZE
);
*
(
uint
*
)
dp
=
xhdr
[
j
].
hic_xheader
.
xh_cycle_data
[
k
];
dp
+=
BBSIZE
;
}
}
xlog_unpack_data_checksum
(
rhead
,
dp
,
log
);
}
/*
* Read the log from tail to head and process the log records found.
...
...
@@ -3319,223 +3422,294 @@ xlog_unpack_data(xlog_rec_header_t *rhead,
* here.
*/
STATIC
int
xlog_do_recovery_pass
(
xlog_t
*
log
,
xfs_daddr_t
head_blk
,
xfs_daddr_t
tail_blk
,
int
pass
)
xlog_do_recovery_pass
(
xlog_t
*
log
,
xfs_daddr_t
head_blk
,
xfs_daddr_t
tail_blk
,
int
pass
)
{
xlog_rec_header_t
*
rhead
;
xfs_daddr_t
blk_no
;
xfs_caddr_t
bufaddr
;
xfs_buf_t
*
hbp
,
*
dbp
;
int
error
,
h_size
;
int
bblks
,
split_bblks
;
int
hblks
,
split_hblks
,
wrapped_hblks
;
xlog_recover_t
*
rhash
[
XLOG_RHASH_SIZE
];
error
=
0
;
/*
* Read the header of the tail block and get the iclog buffer size from
* h_size. Use this to tell how many sectors make up the log header.
*/
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
xlog_rec_header_t
*
rhead
;
xfs_daddr_t
blk_no
;
xfs_caddr_t
bufaddr
,
offset
;
xfs_buf_t
*
hbp
,
*
dbp
;
int
error
=
0
,
h_size
;
int
bblks
,
split_bblks
;
int
hblks
,
split_hblks
,
wrapped_hblks
;
xlog_recover_t
*
rhash
[
XLOG_RHASH_SIZE
];
/*
* When using variable length iclogs, read first sector of iclog
* header and extract the header size from it. Get a new hbp that
* is the correct size.
* Read the header of the tail block and get the iclog buffer size from
* h_size. Use this to tell how many sectors make up the log header.
*/
hbp
=
xlog_get_bp
(
1
,
log
->
l_mp
);
if
(
!
hbp
)
return
ENOMEM
;
if
((
error
=
xlog_bread
(
log
,
tail_blk
,
1
,
hbp
)))
goto
bread_err1
;
rhead
=
(
xlog_rec_header_t
*
)
XFS_BUF_PTR
(
hbp
);
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
if
(
XFS_SB_VERSION_HASLOGV2
(
&
log
->
l_mp
->
m_sb
))
{
/*
* When using variable length iclogs, read first sector of
* iclog header and extract the header size from it. Get a
* new hbp that is the correct size.
*/
hbp
=
xlog_get_bp
(
log
,
1
);
if
(
!
hbp
)
return
ENOMEM
;
if
((
error
=
xlog_bread
(
log
,
tail_blk
,
1
,
hbp
)))
goto
bread_err1
;
offset
=
xlog_align
(
log
,
tail_blk
,
1
,
hbp
);
rhead
=
(
xlog_rec_header_t
*
)
offset
;
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
if
((
INT_GET
(
rhead
->
h_version
,
ARCH_CONVERT
)
&
(
~
XLOG_VERSION_OKBITS
))
!=
0
)
{
xlog_warn
(
"XFS: xlog_do_recovery_pass: unrecognised log version number."
);
error
=
XFS_ERROR
(
EIO
);
goto
bread_err1
;
}
h_size
=
INT_GET
(
rhead
->
h_size
,
ARCH_CONVERT
);
if
((
INT_GET
(
rhead
->
h_version
,
ARCH_CONVERT
)
&
(
~
XLOG_VERSION_OKBITS
))
!=
0
)
{
xlog_warn
(
"XFS: xlog_do_recovery_pass: unrecognised log version number."
);
error
=
XFS_ERROR
(
EIO
);
goto
bread_err1
;
}
h_size
=
INT_GET
(
rhead
->
h_size
,
ARCH_CONVERT
);
if
((
INT_GET
(
rhead
->
h_version
,
ARCH_CONVERT
)
&
XLOG_VERSION_2
)
&&
(
h_size
>
XLOG_HEADER_CYCLE_SIZE
))
{
hblks
=
h_size
/
XLOG_HEADER_CYCLE_SIZE
;
if
(
h_size
%
XLOG_HEADER_CYCLE_SIZE
)
hblks
++
;
xlog_put_bp
(
hbp
);
hbp
=
xlog_get_bp
(
hblks
,
log
->
l_mp
);
if
((
INT_GET
(
rhead
->
h_version
,
ARCH_CONVERT
)
&
XLOG_VERSION_2
)
&&
(
h_size
>
XLOG_HEADER_CYCLE_SIZE
))
{
hblks
=
h_size
/
XLOG_HEADER_CYCLE_SIZE
;
if
(
h_size
%
XLOG_HEADER_CYCLE_SIZE
)
hblks
++
;
xlog_put_bp
(
hbp
);
hbp
=
xlog_get_bp
(
log
,
hblks
);
}
else
{
hblks
=
1
;
}
}
else
{
hblks
=
1
;
}
}
else
{
hblks
=
1
;
hbp
=
xlog_get_bp
(
1
,
log
->
l_mp
);
h_size
=
XLOG_BIG_RECORD_BSIZE
;
}
if
(
!
hbp
)
return
ENOMEM
;
dbp
=
xlog_get_bp
(
BTOBB
(
h_size
),
log
->
l_mp
);
if
(
!
dbp
)
{
xlog_put_bp
(
hbp
);
return
ENOMEM
;
}
memset
(
rhash
,
0
,
sizeof
(
rhash
));
if
(
tail_blk
<=
head_blk
)
{
for
(
blk_no
=
tail_blk
;
blk_no
<
head_blk
;
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
,
hblks
,
hbp
)))
goto
bread_err2
;
rhead
=
(
xlog_rec_header_t
*
)
XFS_BUF_PTR
(
hbp
);
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
ASSERT
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
<=
INT_MAX
));
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
/* blocks in data section */
if
(
unlikely
((
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
!=
XLOG_HEADER_MAGIC_NUM
)
||
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
>
INT_MAX
))
||
(
bblks
<=
0
)
||
(
blk_no
>
log
->
l_logBBsize
)))
{
XFS_ERROR_REPORT
(
"xlog_do_recovery_pass(1)"
,
XFS_ERRLEVEL_LOW
,
log
->
l_mp
);
error
=
EFSCORRUPTED
;
goto
bread_err2
;
}
ASSERT
(
log
->
l_sectbb_log
==
0
);
hblks
=
1
;
hbp
=
xlog_get_bp
(
log
,
1
);
h_size
=
XLOG_BIG_RECORD_BSIZE
;
}
if
((
INT_GET
(
rhead
->
h_version
,
ARCH_CONVERT
)
&
(
~
XLOG_VERSION_OKBITS
))
!=
0
)
{
xlog_warn
(
"XFS: xlog_do_recovery_pass: unrecognised log version number."
);
error
=
XFS_ERROR
(
EIO
);
goto
bread_err2
;
}
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
/* blocks in data section */
if
(
bblks
>
0
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
+
hblks
,
bblks
,
dbp
)))
goto
bread_err2
;
xlog_unpack_data
(
rhead
,
XFS_BUF_PTR
(
dbp
),
log
);
if
((
error
=
xlog_recover_process_data
(
log
,
rhash
,
rhead
,
XFS_BUF_PTR
(
dbp
),
pass
)))
goto
bread_err2
;
}
blk_no
+=
(
bblks
+
hblks
);
if
(
!
hbp
)
return
ENOMEM
;
dbp
=
xlog_get_bp
(
log
,
BTOBB
(
h_size
));
if
(
!
dbp
)
{
xlog_put_bp
(
hbp
);
return
ENOMEM
;
}
}
else
{
/*
* Perform recovery around the end of the physical log. When the head
* is not on the same cycle number as the tail, we can't do a sequential
* recovery as above.
*/
blk_no
=
tail_blk
;
while
(
blk_no
<
log
->
l_logBBsize
)
{
/*
* Check for header wrapping around physical end-of-log
*/
wrapped_hblks
=
0
;
if
(
blk_no
+
hblks
<=
log
->
l_logBBsize
)
{
/* Read header in one read */
if
((
error
=
xlog_bread
(
log
,
blk_no
,
hblks
,
hbp
)))
goto
bread_err2
;
}
else
{
/* This log record is split across physical end of log */
split_hblks
=
0
;
if
(
blk_no
!=
log
->
l_logBBsize
)
{
/* some data is before physical end of log */
ASSERT
(
blk_no
<=
INT_MAX
);
split_hblks
=
log
->
l_logBBsize
-
(
int
)
blk_no
;
ASSERT
(
split_hblks
>
0
);
if
((
error
=
xlog_bread
(
log
,
blk_no
,
split_hblks
,
hbp
)))
goto
bread_err2
;
memset
(
rhash
,
0
,
sizeof
(
rhash
));
if
(
tail_blk
<=
head_blk
)
{
for
(
blk_no
=
tail_blk
;
blk_no
<
head_blk
;
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
,
hblks
,
hbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
,
hblks
,
hbp
);
rhead
=
(
xlog_rec_header_t
*
)
offset
;
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
ASSERT
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
<=
INT_MAX
));
/* blocks in data section */
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
if
(
unlikely
(
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
!=
XLOG_HEADER_MAGIC_NUM
)
||
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
>
INT_MAX
))
||
(
bblks
<=
0
)
||
(
blk_no
>
log
->
l_logBBsize
)))
{
XFS_ERROR_REPORT
(
"xlog_do_recovery_pass(1)"
,
XFS_ERRLEVEL_LOW
,
log
->
l_mp
);
error
=
EFSCORRUPTED
;
goto
bread_err2
;
}
if
((
INT_GET
(
rhead
->
h_version
,
ARCH_CONVERT
)
&
(
~
XLOG_VERSION_OKBITS
))
!=
0
)
{
xlog_warn
(
"XFS: xlog_do_recovery_pass: unrecognised log version number."
);
error
=
XFS_ERROR
(
EIO
);
goto
bread_err2
;
}
/* blocks in data section */
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
if
(
bblks
>
0
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
+
hblks
,
bblks
,
dbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
+
hblks
,
bblks
,
dbp
);
xlog_unpack_data
(
rhead
,
offset
,
log
);
if
((
error
=
xlog_recover_process_data
(
log
,
rhash
,
rhead
,
offset
,
pass
)))
goto
bread_err2
;
}
blk_no
+=
(
bblks
+
hblks
);
}
}
else
{
/*
* Perform recovery around the end of the physical log.
* When the head is not on the same cycle number as the tail,
* we can't do a sequential recovery as above.
*/
blk_no
=
tail_blk
;
while
(
blk_no
<
log
->
l_logBBsize
)
{
/*
* Check for header wrapping around physical end-of-log
*/
wrapped_hblks
=
0
;
if
(
blk_no
+
hblks
<=
log
->
l_logBBsize
)
{
/* Read header in one read */
if
((
error
=
xlog_bread
(
log
,
blk_no
,
hblks
,
hbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
,
hblks
,
hbp
);
}
else
{
/* This LR is split across physical log end */
offset
=
NULL
;
split_hblks
=
0
;
if
(
blk_no
!=
log
->
l_logBBsize
)
{
/* some data before physical log end */
ASSERT
(
blk_no
<=
INT_MAX
);
split_hblks
=
log
->
l_logBBsize
-
(
int
)
blk_no
;
ASSERT
(
split_hblks
>
0
);
if
((
error
=
xlog_bread
(
log
,
blk_no
,
split_hblks
,
hbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
,
split_hblks
,
hbp
);
}
/*
* Note: this black magic still works with
* large sector sizes (non-512) only because:
* - we increased the buffer size originally
* by 1 sector giving us enough extra space
* for the second read;
* - the log start is guaranteed to be sector
* aligned;
* - we read the log end (LR header start)
* _first_, then the log start (LR header end)
* - order is important.
*/
bufaddr
=
XFS_BUF_PTR
(
hbp
);
XFS_BUF_SET_PTR
(
hbp
,
bufaddr
+
BBTOB
(
split_hblks
),
BBTOB
(
hblks
-
split_hblks
));
wrapped_hblks
=
hblks
-
split_hblks
;
if
((
error
=
xlog_bread
(
log
,
0
,
wrapped_hblks
,
hbp
)))
goto
bread_err2
;
XFS_BUF_SET_PTR
(
hbp
,
bufaddr
,
hblks
);
if
(
!
offset
)
offset
=
xlog_align
(
log
,
0
,
wrapped_hblks
,
hbp
);
}
rhead
=
(
xlog_rec_header_t
*
)
offset
;
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
ASSERT
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
<=
INT_MAX
));
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
/* LR body must have data or it wouldn't have been
* written */
ASSERT
(
bblks
>
0
);
blk_no
+=
hblks
;
/* successfully read header */
if
(
unlikely
(
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
!=
XLOG_HEADER_MAGIC_NUM
)
||
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
>
INT_MAX
))
||
(
bblks
<=
0
)))
{
XFS_ERROR_REPORT
(
"xlog_do_recovery_pass(2)"
,
XFS_ERRLEVEL_LOW
,
log
->
l_mp
);
error
=
EFSCORRUPTED
;
goto
bread_err2
;
}
/* Read in data for log record */
if
(
blk_no
+
bblks
<=
log
->
l_logBBsize
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
,
bblks
,
dbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
,
bblks
,
dbp
);
}
else
{
/* This log record is split across the
* physical end of log */
offset
=
NULL
;
split_bblks
=
0
;
if
(
blk_no
!=
log
->
l_logBBsize
)
{
/* some data is before the physical
* end of log */
ASSERT
(
!
wrapped_hblks
);
ASSERT
(
blk_no
<=
INT_MAX
);
split_bblks
=
log
->
l_logBBsize
-
(
int
)
blk_no
;
ASSERT
(
split_bblks
>
0
);
if
((
error
=
xlog_bread
(
log
,
blk_no
,
split_bblks
,
dbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
,
split_bblks
,
dbp
);
}
/*
* Note: this black magic still works with
* large sector sizes (non-512) only because:
* - we increased the buffer size originally
* by 1 sector giving us enough extra space
* for the second read;
* - the log start is guaranteed to be sector
* aligned;
* - we read the log end (LR header start)
* _first_, then the log start (LR header end)
* - order is important.
*/
bufaddr
=
XFS_BUF_PTR
(
dbp
);
XFS_BUF_SET_PTR
(
dbp
,
bufaddr
+
BBTOB
(
split_bblks
),
BBTOB
(
bblks
-
split_bblks
));
if
((
error
=
xlog_bread
(
log
,
wrapped_hblks
,
bblks
-
split_bblks
,
dbp
)))
goto
bread_err2
;
XFS_BUF_SET_PTR
(
dbp
,
bufaddr
,
XLOG_BIG_RECORD_BSIZE
);
if
(
!
offset
)
offset
=
xlog_align
(
log
,
wrapped_hblks
,
bblks
-
split_bblks
,
dbp
);
}
xlog_unpack_data
(
rhead
,
offset
,
log
);
if
((
error
=
xlog_recover_process_data
(
log
,
rhash
,
rhead
,
offset
,
pass
)))
goto
bread_err2
;
blk_no
+=
bblks
;
}
bufaddr
=
XFS_BUF_PTR
(
hbp
);
XFS_BUF_SET_PTR
(
hbp
,
bufaddr
+
BBTOB
(
split_hblks
),
BBTOB
(
hblks
-
split_hblks
));
wrapped_hblks
=
hblks
-
split_hblks
;
if
((
error
=
xlog_bread
(
log
,
0
,
wrapped_hblks
,
hbp
)))
goto
bread_err2
;
XFS_BUF_SET_PTR
(
hbp
,
bufaddr
,
hblks
);
}
rhead
=
(
xlog_rec_header_t
*
)
XFS_BUF_PTR
(
hbp
);
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
ASSERT
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
<=
INT_MAX
));
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
/* LR body must have data or it wouldn't have been written */
ASSERT
(
bblks
>
0
);
blk_no
+=
hblks
;
/* successfully read header */
if
(
unlikely
((
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
!=
XLOG_HEADER_MAGIC_NUM
)
||
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
>
INT_MAX
))
||
(
bblks
<=
0
)))
{
XFS_ERROR_REPORT
(
"xlog_do_recovery_pass(2)"
,
XFS_ERRLEVEL_LOW
,
log
->
l_mp
);
error
=
EFSCORRUPTED
;
goto
bread_err2
;
}
/* Read in data for log record */
if
(
blk_no
+
bblks
<=
log
->
l_logBBsize
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
,
bblks
,
dbp
)))
goto
bread_err2
;
}
else
{
/* This log record is split across physical end of log */
split_bblks
=
0
;
if
(
blk_no
!=
log
->
l_logBBsize
)
{
/* some data is before physical end of log */
ASSERT
(
blk_no
<=
INT_MAX
);
split_bblks
=
log
->
l_logBBsize
-
(
int
)
blk_no
;
ASSERT
(
split_bblks
>
0
);
if
((
error
=
xlog_bread
(
log
,
blk_no
,
split_bblks
,
dbp
)))
goto
bread_err2
;
ASSERT
(
blk_no
>=
log
->
l_logBBsize
);
blk_no
-=
log
->
l_logBBsize
;
/* read first part of physical log */
while
(
blk_no
<
head_blk
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
,
hblks
,
hbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
,
hblks
,
hbp
);
rhead
=
(
xlog_rec_header_t
*
)
offset
;
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
ASSERT
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
<=
INT_MAX
));
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
ASSERT
(
bblks
>
0
);
if
((
error
=
xlog_bread
(
log
,
blk_no
+
hblks
,
bblks
,
dbp
)))
goto
bread_err2
;
offset
=
xlog_align
(
log
,
blk_no
+
hblks
,
bblks
,
dbp
);
xlog_unpack_data
(
rhead
,
offset
,
log
);
if
((
error
=
xlog_recover_process_data
(
log
,
rhash
,
rhead
,
offset
,
pass
)))
goto
bread_err2
;
blk_no
+=
(
bblks
+
hblks
);
}
bufaddr
=
XFS_BUF_PTR
(
dbp
);
XFS_BUF_SET_PTR
(
dbp
,
bufaddr
+
BBTOB
(
split_bblks
),
BBTOB
(
bblks
-
split_bblks
));
if
((
error
=
xlog_bread
(
log
,
wrapped_hblks
,
bblks
-
split_bblks
,
dbp
)))
goto
bread_err2
;
XFS_BUF_SET_PTR
(
dbp
,
bufaddr
,
XLOG_BIG_RECORD_BSIZE
);
}
xlog_unpack_data
(
rhead
,
XFS_BUF_PTR
(
dbp
),
log
);
if
((
error
=
xlog_recover_process_data
(
log
,
rhash
,
rhead
,
XFS_BUF_PTR
(
dbp
),
pass
)))
goto
bread_err2
;
blk_no
+=
bblks
;
}
ASSERT
(
blk_no
>=
log
->
l_logBBsize
);
blk_no
-=
log
->
l_logBBsize
;
/* read first part of physical log */
while
(
blk_no
<
head_blk
)
{
if
((
error
=
xlog_bread
(
log
,
blk_no
,
hblks
,
hbp
)))
goto
bread_err2
;
rhead
=
(
xlog_rec_header_t
*
)
XFS_BUF_PTR
(
hbp
);
ASSERT
(
INT_GET
(
rhead
->
h_magicno
,
ARCH_CONVERT
)
==
XLOG_HEADER_MAGIC_NUM
);
ASSERT
(
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
)
<=
INT_MAX
));
bblks
=
(
int
)
BTOBB
(
INT_GET
(
rhead
->
h_len
,
ARCH_CONVERT
));
ASSERT
(
bblks
>
0
);
if
((
error
=
xlog_bread
(
log
,
blk_no
+
hblks
,
bblks
,
dbp
)))
goto
bread_err2
;
xlog_unpack_data
(
rhead
,
XFS_BUF_PTR
(
dbp
),
log
);
if
((
error
=
xlog_recover_process_data
(
log
,
rhash
,
rhead
,
XFS_BUF_PTR
(
dbp
),
pass
)))
goto
bread_err2
;
blk_no
+=
(
bblks
+
hblks
);
}
}
bread_err2:
xlog_put_bp
(
dbp
);
bread_err1:
xlog_put_bp
(
hbp
);
return
error
;
}
bread_err2:
xlog_put_bp
(
dbp
);
bread_err1:
xlog_put_bp
(
hbp
);
return
error
;
}
/*
...
...
@@ -3552,9 +3726,10 @@ xlog_do_recovery_pass(xlog_t *log,
* the log recovery has been completed.
*/
STATIC
int
xlog_do_log_recovery
(
xlog_t
*
log
,
xfs_daddr_t
head_blk
,
xfs_daddr_t
tail_blk
)
xlog_do_log_recovery
(
xlog_t
*
log
,
xfs_daddr_t
head_blk
,
xfs_daddr_t
tail_blk
)
{
int
error
;
#ifdef DEBUG
...
...
@@ -3599,9 +3774,10 @@ xlog_do_log_recovery(xlog_t *log,
* Do the actual recovery
*/
STATIC
int
xlog_do_recover
(
xlog_t
*
log
,
xfs_daddr_t
head_blk
,
xfs_daddr_t
tail_blk
)
xlog_do_recover
(
xlog_t
*
log
,
xfs_daddr_t
head_blk
,
xfs_daddr_t
tail_blk
)
{
int
error
;
xfs_buf_t
*
bp
;
...
...
@@ -3663,7 +3839,7 @@ xlog_do_recover(xlog_t *log,
/* Normal transactions can now occur */
log
->
l_flags
&=
~
XLOG_ACTIVE_RECOVERY
;
return
0
;
}
/* xlog_do_recover */
}
/*
* Perform recovery and re-initialize some log variables in xlog_find_tail.
...
...
@@ -3671,22 +3847,18 @@ xlog_do_recover(xlog_t *log,
* Return error or zero.
*/
int
xlog_recover
(
xlog_t
*
log
,
int
readonly
)
xlog_recover
(
xlog_t
*
log
,
int
readonly
)
{
xfs_daddr_t
head_blk
,
tail_blk
;
int
error
;
xfs_daddr_t
head_blk
,
tail_blk
;
int
error
;
/* find the tail of the log */
if
((
error
=
xlog_find_tail
(
log
,
&
head_blk
,
&
tail_blk
,
readonly
)))
return
error
;
if
(
tail_blk
!=
head_blk
)
{
#ifndef __KERNEL__
extern
xfs_daddr_t
HEAD_BLK
,
TAIL_BLK
;
head_blk
=
HEAD_BLK
;
tail_blk
=
TAIL_BLK
;
#endif
/* There used to be a comment here:
*
* disallow recovery on read-only mounts. note -- mount
...
...
@@ -3698,36 +3870,21 @@ xlog_recover(xlog_t *log, int readonly)
* under the vfs layer, so we can get away with it unless
* the device itself is read-only, in which case we fail.
*/
#ifdef __KERNEL__
if
((
error
=
xfs_dev_is_read_only
(
log
->
l_mp
,
"recovery required"
)))
{
return
error
;
}
#else
if
(
readonly
)
{
return
ENOSPC
;
}
#endif
#ifdef __KERNEL__
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
cmn_err
(
CE_NOTE
,
"Starting XFS recovery on filesystem: %s (dev: %d/%d)"
,
log
->
l_mp
->
m_fsname
,
MAJOR
(
log
->
l_dev
),
MINOR
(
log
->
l_dev
));
#else
cmn_err
(
CE_NOTE
,
"!Starting XFS recovery on filesystem: %s (dev: %d/%d)"
,
log
->
l_mp
->
m_fsname
,
MAJOR
(
log
->
l_dev
),
MINOR
(
log
->
l_dev
));
#endif
#endif
error
=
xlog_do_recover
(
log
,
head_blk
,
tail_blk
);
log
->
l_flags
|=
XLOG_RECOVERY_NEEDED
;
}
return
error
;
}
/* xlog_recover */
}
/*
* In the first part of recovery we replay inodes and buffers and build
...
...
@@ -3739,7 +3896,9 @@ xlog_recover(xlog_t *log, int readonly)
* in the real-time portion of the file system.
*/
int
xlog_recover_finish
(
xlog_t
*
log
,
int
mfsi_flags
)
xlog_recover_finish
(
xlog_t
*
log
,
int
mfsi_flags
)
{
/*
* Now we're ready to do the transactions needed for the
...
...
@@ -3761,23 +3920,16 @@ xlog_recover_finish(xlog_t *log, int mfsi_flags)
(
XFS_LOG_FORCE
|
XFS_LOG_SYNC
));
if
(
(
mfsi_flags
&
XFS_MFSI_NOUNLINK
)
==
0
)
{
xlog_recover_process_iunlinks
(
log
);
}
xlog_recover_check_summary
(
log
);
#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
cmn_err
(
CE_NOTE
,
"Ending XFS recovery on filesystem: %s (dev: %d/%d)"
,
log
->
l_mp
->
m_fsname
,
MAJOR
(
log
->
l_dev
),
MINOR
(
log
->
l_dev
));
#else
cmn_err
(
CE_NOTE
,
"!Ending XFS recovery on filesystem: %s (dev: %d/%d)"
,
log
->
l_mp
->
m_fsname
,
MAJOR
(
log
->
l_dev
),
MINOR
(
log
->
l_dev
));
#endif
log
->
l_flags
&=
~
XLOG_RECOVERY_NEEDED
;
}
else
{
cmn_err
(
CE_DEBUG
,
...
...
@@ -3785,7 +3937,7 @@ xlog_recover_finish(xlog_t *log, int mfsi_flags)
log
->
l_mp
->
m_fsname
);
}
return
0
;
}
/* xlog_recover_finish */
}
#if defined(DEBUG)
...
...
@@ -3794,7 +3946,8 @@ xlog_recover_finish(xlog_t *log, int mfsi_flags)
* are consistent with the superblock counters.
*/
void
xlog_recover_check_summary
(
xlog_t
*
log
)
xlog_recover_check_summary
(
xlog_t
*
log
)
{
xfs_mount_t
*
mp
;
xfs_agf_t
*
agfp
;
...
...
fs/xfs/xfs_mount.c
View file @
20d7d526
...
...
@@ -467,7 +467,11 @@ xfs_readsb(xfs_mount_t *mp)
bp
=
xfs_buf_read_flags
(
mp
->
m_ddev_targp
,
XFS_SB_DADDR
,
BTOBB
(
sector_size
),
extra_flags
);
ASSERT
(
bp
);
if
(
!
bp
||
XFS_BUF_ISERROR
(
bp
))
{
cmn_err
(
CE_WARN
,
"XFS: SB read failed"
);
error
=
bp
?
XFS_BUF_GETERROR
(
bp
)
:
ENOMEM
;
goto
fail
;
}
ASSERT
(
XFS_BUF_ISBUSY
(
bp
));
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
<=
0
);
...
...
@@ -482,9 +486,7 @@ xfs_readsb(xfs_mount_t *mp)
error
=
xfs_mount_validate_sb
(
mp
,
&
(
mp
->
m_sb
));
if
(
error
)
{
cmn_err
(
CE_WARN
,
"XFS: SB validate failed"
);
XFS_BUF_UNMANAGE
(
bp
);
xfs_buf_relse
(
bp
);
return
error
;
goto
fail
;
}
/*
...
...
@@ -494,9 +496,8 @@ xfs_readsb(xfs_mount_t *mp)
cmn_err
(
CE_WARN
,
"XFS: device supports only %u byte sectors (not %u)"
,
sector_size
,
mp
->
m_sb
.
sb_sectsize
);
XFS_BUF_UNMANAGE
(
bp
);
xfs_buf_relse
(
bp
);
return
XFS_ERROR
(
ENOSYS
);
error
=
ENOSYS
;
goto
fail
;
}
/*
...
...
@@ -509,7 +510,11 @@ xfs_readsb(xfs_mount_t *mp)
sector_size
=
mp
->
m_sb
.
sb_sectsize
;
bp
=
xfs_buf_read_flags
(
mp
->
m_ddev_targp
,
XFS_SB_DADDR
,
BTOBB
(
sector_size
),
extra_flags
);
ASSERT
(
bp
);
if
(
!
bp
||
XFS_BUF_ISERROR
(
bp
))
{
cmn_err
(
CE_WARN
,
"XFS: SB re-read failed"
);
error
=
bp
?
XFS_BUF_GETERROR
(
bp
)
:
ENOMEM
;
goto
fail
;
}
ASSERT
(
XFS_BUF_ISBUSY
(
bp
));
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
<=
0
);
}
...
...
@@ -518,6 +523,13 @@ xfs_readsb(xfs_mount_t *mp)
xfs_buf_relse
(
bp
);
ASSERT
(
XFS_BUF_VALUSEMA
(
bp
)
>
0
);
return
0
;
fail:
if
(
bp
)
{
XFS_BUF_UNMANAGE
(
bp
);
xfs_buf_relse
(
bp
);
}
return
error
;
}
...
...
@@ -546,16 +558,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
mp
->
m_blockmask
=
sbp
->
sb_blocksize
-
1
;
mp
->
m_blockwsize
=
sbp
->
sb_blocksize
>>
XFS_WORDLOG
;
mp
->
m_blockwmask
=
mp
->
m_blockwsize
-
1
;
if
(
XFS_SB_VERSION_HASLOGV2
(
sbp
))
{
if
(
sbp
->
sb_logsunit
<=
1
)
{
mp
->
m_lstripemask
=
1
;
}
else
{
mp
->
m_lstripemask
=
1
<<
xfs_highbit32
(
sbp
->
sb_logsunit
>>
BBSHIFT
);
}
}
INIT_LIST_HEAD
(
&
mp
->
m_del_inodes
);
/*
* Setup for attributes, in case they get created.
...
...
@@ -601,7 +604,6 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
sbp
->
sb_inopblock
);
mp
->
m_ialloc_blks
=
mp
->
m_ialloc_inos
>>
sbp
->
sb_inopblog
;
}
/*
* xfs_mountfs
*
...
...
fs/xfs/xfs_mount.h
View file @
20d7d526
...
...
@@ -68,6 +68,7 @@ typedef struct xfs_trans_reservations {
((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks))
#else
struct
cred
;
struct
log
;
struct
vfs
;
struct
vnode
;
struct
xfs_mount_args
;
...
...
@@ -296,13 +297,14 @@ typedef struct xfs_mount {
int
m_ihsize
;
/* size of next field */
struct
xfs_ihash
*
m_ihash
;
/* fs private inode hash table*/
struct
xfs_inode
*
m_inodes
;
/* active inode list */
struct
list_head
m_del_inodes
;
/* inodes to reclaim */
mutex_t
m_ilock
;
/* inode list mutex */
uint
m_ireclaims
;
/* count of calls to reclaim*/
uint
m_readio_log
;
/* min read size log bytes */
uint
m_readio_blocks
;
/* min read size blocks */
uint
m_writeio_log
;
/* min write size log bytes */
uint
m_writeio_blocks
;
/* min write size blocks */
void
*
m_log
;
/* log specific stuff */
struct
log
*
m_log
;
/* log specific stuff */
int
m_logbufs
;
/* number of log buffers */
int
m_logbsize
;
/* size of each log buffer */
uint
m_rsumlevels
;
/* rt summary levels */
...
...
@@ -357,7 +359,6 @@ typedef struct xfs_mount {
#endif
int
m_dalign
;
/* stripe unit */
int
m_swidth
;
/* stripe width */
int
m_lstripemask
;
/* log stripe mask */
int
m_sinoalign
;
/* stripe unit inode alignmnt */
int
m_attr_magicpct
;
/* 37% of the blocksize */
int
m_dir_magicpct
;
/* 37% of the dir blocksize */
...
...
fs/xfs/xfs_vfsops.c
View file @
20d7d526
...
...
@@ -620,7 +620,7 @@ xfs_mntupdate(
if
(
*
flags
&
MS_RDONLY
)
{
pagebuf_delwri_flush
(
mp
->
m_ddev_targp
,
0
,
NULL
);
xfs_finish_reclaim_all
(
mp
);
xfs_finish_reclaim_all
(
mp
,
0
);
do
{
VFS_SYNC
(
vfsp
,
SYNC_ATTR
|
SYNC_WAIT
,
NULL
,
error
);
...
...
@@ -849,19 +849,14 @@ xfs_sync(
* xfs sync routine for internal use
*
* This routine supports all of the flags defined for the generic VFS_SYNC
* interface as explained above under x
y
s_sync. In the interests of not
* interface as explained above under x
f
s_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internallly-
* required functions are specified within a separate xflags parameter,
* only available by calling this routine.
*
* xflags:
* XFS_XSYNC_RELOC - Sync for relocation. Don't try to get behavior
* locks as this will cause you to hang. Not all
* combinations of flags are necessarily supported
* when this is specified.
*/
int
xfs_sync
sub
(
STATIC
int
xfs_sync
_inodes
(
xfs_mount_t
*
mp
,
int
flags
,
int
xflags
,
...
...
@@ -877,12 +872,10 @@ xfs_syncsub(
uint64_t
fflag
;
uint
lock_flags
;
uint
base_lock_flags
;
uint
log_flags
;
boolean_t
mount_locked
;
boolean_t
vnode_refed
;
int
preempt
;
xfs_dinode_t
*
dip
;
xfs_buf_log_item_t
*
bip
;
xfs_iptr_t
*
ipointer
;
#ifdef DEBUG
boolean_t
ipointer_in
=
B_FALSE
;
...
...
@@ -961,16 +954,6 @@ xfs_syncsub(
base_lock_flags
|=
XFS_IOLOCK_SHARED
;
}
/*
* Sync out the log. This ensures that the log is periodically
* flushed even if there is not enough activity to fill it up.
*/
if
(
flags
&
SYNC_WAIT
)
{
xfs_log_force
(
mp
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
|
XFS_LOG_SYNC
);
}
else
{
xfs_log_force
(
mp
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
);
}
XFS_MOUNT_ILOCK
(
mp
);
ip
=
mp
->
m_inodes
;
...
...
@@ -1016,27 +999,23 @@ xfs_syncsub(
ip
=
ip
->
i_mnext
;
continue
;
}
if
((
ip
->
i_update_core
==
0
)
&&
((
ip
->
i_itemp
==
NULL
)
||
!
(
ip
->
i_itemp
->
ili_format
.
ilf_fields
&
XFS_ILOG_ALL
)))
{
if
(
xfs_ilock_nowait
(
ip
,
XFS_ILOCK_EXCL
)
==
0
)
{
ip
=
ip
->
i_mnext
;
}
else
if
((
xfs_ipincount
(
ip
)
==
0
)
&&
if
(
xfs_ilock_nowait
(
ip
,
XFS_ILOCK_EXCL
)
==
0
)
{
ip
=
ip
->
i_mnext
;
}
else
if
((
xfs_ipincount
(
ip
)
==
0
)
&&
xfs_iflock_nowait
(
ip
))
{
IPOINTER_INSERT
(
ip
,
mp
);
IPOINTER_INSERT
(
ip
,
mp
);
xfs_finish_reclaim
(
ip
,
1
,
XFS_IFLUSH_DELWRI_ELSE_SYNC
);
xfs_finish_reclaim
(
ip
,
1
,
XFS_IFLUSH_DELWRI_ELSE_
A
SYNC
);
XFS_MOUNT_ILOCK
(
mp
);
mount_locked
=
B_TRUE
;
IPOINTER_REMOVE
(
ip
,
mp
);
}
else
{
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
ip
=
ip
->
i_mnext
;
}
continue
;
XFS_MOUNT_ILOCK
(
mp
);
mount_locked
=
B_TRUE
;
IPOINTER_REMOVE
(
ip
,
mp
);
}
else
{
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
ip
=
ip
->
i_mnext
;
}
continue
;
}
if
(
XFS_FORCED_SHUTDOWN
(
mp
)
&&
!
(
flags
&
SYNC_CLOSE
))
{
...
...
@@ -1148,21 +1127,9 @@ xfs_syncsub(
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
if
(
XFS_FORCED_SHUTDOWN
(
mp
))
{
if
(
xflags
&
XFS_XSYNC_RELOC
)
{
fs_tosspages
(
XFS_ITOBHV
(
ip
),
0
,
-
1
,
FI_REMAPF
);
}
else
{
VOP_TOSS_PAGES
(
vp
,
0
,
-
1
,
FI_REMAPF
);
}
VOP_TOSS_PAGES
(
vp
,
0
,
-
1
,
FI_REMAPF
);
}
else
{
if
(
xflags
&
XFS_XSYNC_RELOC
)
{
fs_flushinval_pages
(
XFS_ITOBHV
(
ip
),
0
,
-
1
,
FI_REMAPF
);
}
else
{
VOP_FLUSHINVAL_PAGES
(
vp
,
0
,
-
1
,
FI_REMAPF
);
}
VOP_FLUSHINVAL_PAGES
(
vp
,
0
,
-
1
,
FI_REMAPF
);
}
xfs_ilock
(
ip
,
XFS_ILOCK_SHARED
);
...
...
@@ -1418,16 +1385,55 @@ xfs_syncsub(
ASSERT
(
ipointer_in
==
B_FALSE
);
kmem_free
(
ipointer
,
sizeof
(
xfs_iptr_t
));
return
XFS_ERROR
(
last_error
);
}
/*
* xfs sync routine for internal use
*
* This routine supports all of the flags defined for the generic VFS_SYNC
* interface as explained above under xfs_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internallly-
* required functions are specified within a separate xflags parameter,
* only available by calling this routine.
*
*/
int
xfs_syncsub
(
xfs_mount_t
*
mp
,
int
flags
,
int
xflags
,
int
*
bypassed
)
{
int
error
=
0
;
int
last_error
=
0
;
uint
log_flags
=
XFS_LOG_FORCE
;
xfs_buf_t
*
bp
;
xfs_buf_log_item_t
*
bip
;
/*
* Sync out the log. This ensures that the log is periodically
* flushed even if there is not enough activity to fill it up.
*/
if
(
flags
&
SYNC_WAIT
)
log_flags
|=
XFS_LOG_SYNC
;
xfs_log_force
(
mp
,
(
xfs_lsn_t
)
0
,
log_flags
);
if
(
flags
&
(
SYNC_ATTR
|
SYNC_DELWRI
))
{
if
(
flags
&
SYNC_BDFLUSH
)
xfs_finish_reclaim_all
(
mp
,
1
);
else
error
=
xfs_sync_inodes
(
mp
,
flags
,
xflags
,
bypassed
);
}
/*
* Flushing out dirty data above probably generated more
* log activity, so if this isn't vfs_sync() then flush
* the log again.
If SYNC_WAIT is set then do it synchronously.
* the log again.
*/
if
(
!
(
flags
&
SYNC_BDFLUSH
))
{
log_flags
=
XFS_LOG_FORCE
;
if
(
flags
&
SYNC_WAIT
)
{
log_flags
|=
XFS_LOG_SYNC
;
}
if
(
flags
&
SYNC_DELWRI
)
{
xfs_log_force
(
mp
,
(
xfs_lsn_t
)
0
,
log_flags
);
}
...
...
@@ -1463,11 +1469,10 @@ xfs_syncsub(
* that point so it can become pinned in between
* there and here.
*/
if
(
XFS_BUF_ISPINNED
(
bp
))
{
xfs_log_force
(
mp
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
);
}
XFS_BUF_BFLAGS
(
bp
)
|=
fflag
;
if
(
XFS_BUF_ISPINNED
(
bp
))
xfs_log_force
(
mp
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
);
if
(
!
(
flags
&
SYNC_WAIT
))
XFS_BUF_BFLAGS
(
bp
)
|=
XFS_B_ASYNC
;
error
=
xfs_bwrite
(
mp
,
bp
);
}
if
(
error
)
{
...
...
@@ -1478,9 +1483,9 @@ xfs_syncsub(
/*
* Now check to see if the log needs a "dummy" transaction.
*/
if
(
xfs_log_need_covered
(
mp
))
{
xfs_trans_t
*
tp
;
xfs_inode_t
*
ip
;
/*
* Put a dummy transaction in the log to tell
...
...
@@ -1491,7 +1496,6 @@ xfs_syncsub(
XFS_ICHANGE_LOG_RES
(
mp
),
0
,
0
,
0
)))
{
xfs_trans_cancel
(
tp
,
0
);
kmem_free
(
ipointer
,
sizeof
(
xfs_iptr_t
));
return
error
;
}
...
...
@@ -1503,6 +1507,7 @@ xfs_syncsub(
xfs_trans_log_inode
(
tp
,
ip
,
XFS_ILOG_CORE
);
error
=
xfs_trans_commit
(
tp
,
0
,
NULL
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_log_force
(
mp
,
(
xfs_lsn_t
)
0
,
XFS_LOG_FORCE
);
}
/*
...
...
@@ -1516,7 +1521,6 @@ xfs_syncsub(
}
}
kmem_free
(
ipointer
,
sizeof
(
xfs_iptr_t
));
return
XFS_ERROR
(
last_error
);
}
...
...
fs/xfs/xfs_vnodeops.c
View file @
20d7d526
...
...
@@ -658,7 +658,7 @@ xfs_setattr(
if
(
vap
->
va_size
>
ip
->
i_d
.
di_size
)
{
code
=
xfs_igrow_start
(
ip
,
vap
->
va_size
,
credp
);
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
}
else
if
(
vap
->
va_size
<
ip
->
i_d
.
di_size
)
{
}
else
if
(
vap
->
va_size
<
=
ip
->
i_d
.
di_size
)
{
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
xfs_itruncate_start
(
ip
,
XFS_ITRUNC_DEFINITE
,
(
xfs_fsize_t
)
vap
->
va_size
);
...
...
@@ -701,7 +701,7 @@ xfs_setattr(
if
(
vap
->
va_size
>
ip
->
i_d
.
di_size
)
{
xfs_igrow_finish
(
tp
,
ip
,
vap
->
va_size
,
!
(
flags
&
ATTR_DMI
));
}
else
if
((
vap
->
va_size
<
ip
->
i_d
.
di_size
)
||
}
else
if
((
vap
->
va_size
<
=
ip
->
i_d
.
di_size
)
||
((
vap
->
va_size
==
0
)
&&
ip
->
i_d
.
di_nextents
))
{
/*
* signal a sync transaction unless
...
...
@@ -3786,27 +3786,30 @@ xfs_inode_flush(
flush_flags = XFS_IFLUSH_SYNC;
else
#endif
flush_flags
=
XFS_IFLUSH_DELWRI
;
flush_flags
=
XFS_IFLUSH_DELWRI_ELSE_ASYNC
;
xfs_ifunlock
(
ip
);
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
error
=
xfs_itobp
(
mp
,
NULL
,
ip
,
&
dip
,
&
bp
,
0
);
if
(
error
)
goto
eagain
;
return
error
;
xfs_buf_relse
(
bp
);
if
(
xfs_ilock_nowait
(
ip
,
XFS_ILOCK_SHARED
)
==
0
)
goto
eagain
;
return
EAGAIN
;
if
(
xfs_ipincount
(
ip
)
||
!
xfs_iflock_nowait
(
ip
))
{
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
return
EAGAIN
;
}
if
((
xfs_ipincount
(
ip
)
==
0
)
&&
xfs_iflock_nowait
(
ip
))
error
=
xfs_iflush
(
ip
,
flush_flags
);
error
=
xfs_iflush
(
ip
,
flush_flags
);
}
else
{
error
=
EAGAIN
;
}
xfs_iunlock
(
ip
,
XFS_ILOCK_SHARED
);
}
else
{
eagain:
error
=
EAGAIN
;
}
}
...
...
@@ -3934,6 +3937,8 @@ xfs_reclaim(
/* Protect sync from us */
XFS_MOUNT_ILOCK
(
mp
);
vn_bhv_remove
(
VN_BHV_HEAD
(
vp
),
XFS_ITOBHV
(
ip
));
list_add_tail
(
&
ip
->
i_reclaim
,
&
mp
->
m_del_inodes
);
XFS_MOUNT_IUNLOCK
(
mp
);
}
return
0
;
...
...
@@ -4010,40 +4015,33 @@ xfs_finish_reclaim(
}
int
xfs_finish_reclaim_all
(
xfs_mount_t
*
mp
)
xfs_finish_reclaim_all
(
xfs_mount_t
*
mp
,
int
noblock
)
{
int
purged
;
struct
list_head
*
curr
,
*
next
;
xfs_inode_t
*
ip
;
vnode_t
*
vp
;
int
done
=
0
;
while
(
!
done
)
{
purged
=
0
;
XFS_MOUNT_ILOCK
(
mp
);
ip
=
mp
->
m_inodes
;
if
(
ip
==
NULL
)
{
list_for_each_safe
(
curr
,
next
,
&
mp
->
m_del_inodes
)
{
ip
=
list_entry
(
curr
,
xfs_inode_t
,
i_reclaim
);
if
(
noblock
)
{
if
(
xfs_ilock_nowait
(
ip
,
XFS_ILOCK_EXCL
)
==
0
)
continue
;
if
(
xfs_ipincount
(
ip
)
||
!
xfs_iflock_nowait
(
ip
))
{
xfs_iunlock
(
ip
,
XFS_ILOCK_EXCL
);
continue
;
}
}
XFS_MOUNT_IUNLOCK
(
mp
);
xfs_finish_reclaim
(
ip
,
noblock
,
XFS_IFLUSH_DELWRI_ELSE_ASYNC
);
purged
=
1
;
break
;
}
do
{
/* Make sure we skip markers inserted by sync */
if
(
ip
->
i_mount
==
NULL
)
{
ip
=
ip
->
i_mnext
;
continue
;
}
/*
* It's up to our caller to purge the root
* and quota vnodes later.
*/
vp
=
XFS_ITOV_NULL
(
ip
);
if
(
!
vp
)
{
XFS_MOUNT_IUNLOCK
(
mp
);
xfs_finish_reclaim
(
ip
,
0
,
XFS_IFLUSH_ASYNC
);
purged
=
1
;
break
;
}
}
while
(
ip
!=
mp
->
m_inodes
);
done
=
!
purged
;
}
...
...
fs/xfs/xfsidbg.c
View file @
20d7d526
...
...
@@ -4269,8 +4269,10 @@ xfsidbg_xlog(xlog_t *log)
kdb_printf
(
"iclog_bak: 0x%p iclog_size: 0x%x (%d) num iclogs: %d
\n
"
,
log
->
l_iclog_bak
,
log
->
l_iclog_size
,
log
->
l_iclog_size
,
log
->
l_iclog_bufs
);
kdb_printf
(
"l_iclog_hsize %d l_iclog_heads %d
\n
"
,
log
->
l_iclog_hsize
,
log
->
l_iclog_heads
);
kdb_printf
(
"l_stripemask %d l_iclog_hsize %d l_iclog_heads %d
\n
"
,
log
->
l_stripemask
,
log
->
l_iclog_hsize
,
log
->
l_iclog_heads
);
kdb_printf
(
"l_sectbb_log %u l_sectbb_mask %u
\n
"
,
log
->
l_sectbb_log
,
log
->
l_sectbb_mask
);
kdb_printf
(
"&grant_lock: 0x%p resHeadQ: 0x%p wrHeadQ: 0x%p
\n
"
,
&
log
->
l_grant_lock
,
log
->
l_reserve_headq
,
log
->
l_write_headq
);
kdb_printf
(
"GResCycle: %d GResBytes: %d GWrCycle: %d GWrBytes: %d
\n
"
,
...
...
@@ -4712,7 +4714,6 @@ xfsidbg_xmount(xfs_mount_t *mp)
(
xfs_dfiloff_t
)
mp
->
m_dirfreeblk
);
kdb_printf
(
"chsize %d chash 0x%p
\n
"
,
mp
->
m_chsize
,
mp
->
m_chash
);
kdb_printf
(
"m_lstripemask %d
\n
"
,
mp
->
m_lstripemask
);
kdb_printf
(
"m_frozen %d m_active_trans %d
\n
"
,
mp
->
m_frozen
,
mp
->
m_active_trans
.
counter
);
if
(
mp
->
m_fsname
!=
NULL
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment