Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
2f43bbd9
Commit
2f43bbd9
authored
Sep 29, 2014
by
Dave Chinner
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'xfs-trans-recover-cleanup' into for-next
parents
33044dc4
b818cca1
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
308 additions
and
256 deletions
+308
-256
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_log_recover.c
+308
-256
No files found.
fs/xfs/xfs_log_recover.c
View file @
2f43bbd9
...
...
@@ -1445,160 +1445,6 @@ xlog_clear_stale_blocks(
******************************************************************************
*/
STATIC
xlog_recover_t
*
xlog_recover_find_tid
(
struct
hlist_head
*
head
,
xlog_tid_t
tid
)
{
xlog_recover_t
*
trans
;
hlist_for_each_entry
(
trans
,
head
,
r_list
)
{
if
(
trans
->
r_log_tid
==
tid
)
return
trans
;
}
return
NULL
;
}
STATIC
void
xlog_recover_new_tid
(
struct
hlist_head
*
head
,
xlog_tid_t
tid
,
xfs_lsn_t
lsn
)
{
xlog_recover_t
*
trans
;
trans
=
kmem_zalloc
(
sizeof
(
xlog_recover_t
),
KM_SLEEP
);
trans
->
r_log_tid
=
tid
;
trans
->
r_lsn
=
lsn
;
INIT_LIST_HEAD
(
&
trans
->
r_itemq
);
INIT_HLIST_NODE
(
&
trans
->
r_list
);
hlist_add_head
(
&
trans
->
r_list
,
head
);
}
STATIC
void
xlog_recover_add_item
(
struct
list_head
*
head
)
{
xlog_recover_item_t
*
item
;
item
=
kmem_zalloc
(
sizeof
(
xlog_recover_item_t
),
KM_SLEEP
);
INIT_LIST_HEAD
(
&
item
->
ri_list
);
list_add_tail
(
&
item
->
ri_list
,
head
);
}
STATIC
int
xlog_recover_add_to_cont_trans
(
struct
xlog
*
log
,
struct
xlog_recover
*
trans
,
xfs_caddr_t
dp
,
int
len
)
{
xlog_recover_item_t
*
item
;
xfs_caddr_t
ptr
,
old_ptr
;
int
old_len
;
if
(
list_empty
(
&
trans
->
r_itemq
))
{
/* finish copying rest of trans header */
xlog_recover_add_item
(
&
trans
->
r_itemq
);
ptr
=
(
xfs_caddr_t
)
&
trans
->
r_theader
+
sizeof
(
xfs_trans_header_t
)
-
len
;
memcpy
(
ptr
,
dp
,
len
);
/* d, s, l */
return
0
;
}
/* take the tail entry */
item
=
list_entry
(
trans
->
r_itemq
.
prev
,
xlog_recover_item_t
,
ri_list
);
old_ptr
=
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_addr
;
old_len
=
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_len
;
ptr
=
kmem_realloc
(
old_ptr
,
len
+
old_len
,
old_len
,
KM_SLEEP
);
memcpy
(
&
ptr
[
old_len
],
dp
,
len
);
/* d, s, l */
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_len
+=
len
;
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_addr
=
ptr
;
trace_xfs_log_recover_item_add_cont
(
log
,
trans
,
item
,
0
);
return
0
;
}
/*
* The next region to add is the start of a new region. It could be
* a whole region or it could be the first part of a new region. Because
* of this, the assumption here is that the type and size fields of all
* format structures fit into the first 32 bits of the structure.
*
* This works because all regions must be 32 bit aligned. Therefore, we
* either have both fields or we have neither field. In the case we have
* neither field, the data part of the region is zero length. We only have
* a log_op_header and can throw away the header since a new one will appear
* later. If we have at least 4 bytes, then we can determine how many regions
* will appear in the current log item.
*/
STATIC
int
xlog_recover_add_to_trans
(
struct
xlog
*
log
,
struct
xlog_recover
*
trans
,
xfs_caddr_t
dp
,
int
len
)
{
xfs_inode_log_format_t
*
in_f
;
/* any will do */
xlog_recover_item_t
*
item
;
xfs_caddr_t
ptr
;
if
(
!
len
)
return
0
;
if
(
list_empty
(
&
trans
->
r_itemq
))
{
/* we need to catch log corruptions here */
if
(
*
(
uint
*
)
dp
!=
XFS_TRANS_HEADER_MAGIC
)
{
xfs_warn
(
log
->
l_mp
,
"%s: bad header magic number"
,
__func__
);
ASSERT
(
0
);
return
-
EIO
;
}
if
(
len
==
sizeof
(
xfs_trans_header_t
))
xlog_recover_add_item
(
&
trans
->
r_itemq
);
memcpy
(
&
trans
->
r_theader
,
dp
,
len
);
/* d, s, l */
return
0
;
}
ptr
=
kmem_alloc
(
len
,
KM_SLEEP
);
memcpy
(
ptr
,
dp
,
len
);
in_f
=
(
xfs_inode_log_format_t
*
)
ptr
;
/* take the tail entry */
item
=
list_entry
(
trans
->
r_itemq
.
prev
,
xlog_recover_item_t
,
ri_list
);
if
(
item
->
ri_total
!=
0
&&
item
->
ri_total
==
item
->
ri_cnt
)
{
/* tail item is in use, get a new one */
xlog_recover_add_item
(
&
trans
->
r_itemq
);
item
=
list_entry
(
trans
->
r_itemq
.
prev
,
xlog_recover_item_t
,
ri_list
);
}
if
(
item
->
ri_total
==
0
)
{
/* first region to be added */
if
(
in_f
->
ilf_size
==
0
||
in_f
->
ilf_size
>
XLOG_MAX_REGIONS_IN_ITEM
)
{
xfs_warn
(
log
->
l_mp
,
"bad number of regions (%d) in inode log format"
,
in_f
->
ilf_size
);
ASSERT
(
0
);
kmem_free
(
ptr
);
return
-
EIO
;
}
item
->
ri_total
=
in_f
->
ilf_size
;
item
->
ri_buf
=
kmem_zalloc
(
item
->
ri_total
*
sizeof
(
xfs_log_iovec_t
),
KM_SLEEP
);
}
ASSERT
(
item
->
ri_total
>
item
->
ri_cnt
);
/* Description region is ri_buf[0] */
item
->
ri_buf
[
item
->
ri_cnt
].
i_addr
=
ptr
;
item
->
ri_buf
[
item
->
ri_cnt
].
i_len
=
len
;
item
->
ri_cnt
++
;
trace_xfs_log_recover_item_add
(
log
,
trans
,
item
,
0
);
return
0
;
}
/*
* Sort the log items in the transaction.
*
...
...
@@ -3254,31 +3100,6 @@ xlog_recover_do_icreate_pass2(
return
0
;
}
/*
* Free up any resources allocated by the transaction
*
* Remember that EFIs, EFDs, and IUNLINKs are handled later.
*/
STATIC
void
xlog_recover_free_trans
(
struct
xlog_recover
*
trans
)
{
xlog_recover_item_t
*
item
,
*
n
;
int
i
;
list_for_each_entry_safe
(
item
,
n
,
&
trans
->
r_itemq
,
ri_list
)
{
/* Free the regions in the item. */
list_del
(
&
item
->
ri_list
);
for
(
i
=
0
;
i
<
item
->
ri_cnt
;
i
++
)
kmem_free
(
item
->
ri_buf
[
i
].
i_addr
);
/* Free the item itself */
kmem_free
(
item
->
ri_buf
);
kmem_free
(
item
);
}
/* Free the transaction recover structure */
kmem_free
(
trans
);
}
STATIC
void
xlog_recover_buffer_ra_pass2
(
struct
xlog
*
log
,
...
...
@@ -3528,21 +3349,308 @@ xlog_recover_commit_trans(
if
(
!
list_empty
(
&
done_list
))
list_splice_init
(
&
done_list
,
&
trans
->
r_itemq
);
xlog_recover_free_trans
(
trans
);
error2
=
xfs_buf_delwri_submit
(
&
buffer_list
);
return
error
?
error
:
error2
;
}
STATIC
void
xlog_recover_add_item
(
struct
list_head
*
head
)
{
xlog_recover_item_t
*
item
;
item
=
kmem_zalloc
(
sizeof
(
xlog_recover_item_t
),
KM_SLEEP
);
INIT_LIST_HEAD
(
&
item
->
ri_list
);
list_add_tail
(
&
item
->
ri_list
,
head
);
}
STATIC
int
xlog_recover_unmount_trans
(
struct
xlog
*
log
)
xlog_recover_add_to_cont_trans
(
struct
xlog
*
log
,
struct
xlog_recover
*
trans
,
xfs_caddr_t
dp
,
int
len
)
{
xlog_recover_item_t
*
item
;
xfs_caddr_t
ptr
,
old_ptr
;
int
old_len
;
if
(
list_empty
(
&
trans
->
r_itemq
))
{
/* finish copying rest of trans header */
xlog_recover_add_item
(
&
trans
->
r_itemq
);
ptr
=
(
xfs_caddr_t
)
&
trans
->
r_theader
+
sizeof
(
xfs_trans_header_t
)
-
len
;
memcpy
(
ptr
,
dp
,
len
);
return
0
;
}
/* take the tail entry */
item
=
list_entry
(
trans
->
r_itemq
.
prev
,
xlog_recover_item_t
,
ri_list
);
old_ptr
=
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_addr
;
old_len
=
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_len
;
ptr
=
kmem_realloc
(
old_ptr
,
len
+
old_len
,
old_len
,
KM_SLEEP
);
memcpy
(
&
ptr
[
old_len
],
dp
,
len
);
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_len
+=
len
;
item
->
ri_buf
[
item
->
ri_cnt
-
1
].
i_addr
=
ptr
;
trace_xfs_log_recover_item_add_cont
(
log
,
trans
,
item
,
0
);
return
0
;
}
/*
* The next region to add is the start of a new region. It could be
* a whole region or it could be the first part of a new region. Because
* of this, the assumption here is that the type and size fields of all
* format structures fit into the first 32 bits of the structure.
*
* This works because all regions must be 32 bit aligned. Therefore, we
* either have both fields or we have neither field. In the case we have
* neither field, the data part of the region is zero length. We only have
* a log_op_header and can throw away the header since a new one will appear
* later. If we have at least 4 bytes, then we can determine how many regions
* will appear in the current log item.
*/
STATIC
int
xlog_recover_add_to_trans
(
struct
xlog
*
log
,
struct
xlog_recover
*
trans
,
xfs_caddr_t
dp
,
int
len
)
{
/* Do nothing now */
xfs_warn
(
log
->
l_mp
,
"%s: Unmount LR"
,
__func__
);
xfs_inode_log_format_t
*
in_f
;
/* any will do */
xlog_recover_item_t
*
item
;
xfs_caddr_t
ptr
;
if
(
!
len
)
return
0
;
if
(
list_empty
(
&
trans
->
r_itemq
))
{
/* we need to catch log corruptions here */
if
(
*
(
uint
*
)
dp
!=
XFS_TRANS_HEADER_MAGIC
)
{
xfs_warn
(
log
->
l_mp
,
"%s: bad header magic number"
,
__func__
);
ASSERT
(
0
);
return
-
EIO
;
}
if
(
len
==
sizeof
(
xfs_trans_header_t
))
xlog_recover_add_item
(
&
trans
->
r_itemq
);
memcpy
(
&
trans
->
r_theader
,
dp
,
len
);
return
0
;
}
ptr
=
kmem_alloc
(
len
,
KM_SLEEP
);
memcpy
(
ptr
,
dp
,
len
);
in_f
=
(
xfs_inode_log_format_t
*
)
ptr
;
/* take the tail entry */
item
=
list_entry
(
trans
->
r_itemq
.
prev
,
xlog_recover_item_t
,
ri_list
);
if
(
item
->
ri_total
!=
0
&&
item
->
ri_total
==
item
->
ri_cnt
)
{
/* tail item is in use, get a new one */
xlog_recover_add_item
(
&
trans
->
r_itemq
);
item
=
list_entry
(
trans
->
r_itemq
.
prev
,
xlog_recover_item_t
,
ri_list
);
}
if
(
item
->
ri_total
==
0
)
{
/* first region to be added */
if
(
in_f
->
ilf_size
==
0
||
in_f
->
ilf_size
>
XLOG_MAX_REGIONS_IN_ITEM
)
{
xfs_warn
(
log
->
l_mp
,
"bad number of regions (%d) in inode log format"
,
in_f
->
ilf_size
);
ASSERT
(
0
);
kmem_free
(
ptr
);
return
-
EIO
;
}
item
->
ri_total
=
in_f
->
ilf_size
;
item
->
ri_buf
=
kmem_zalloc
(
item
->
ri_total
*
sizeof
(
xfs_log_iovec_t
),
KM_SLEEP
);
}
ASSERT
(
item
->
ri_total
>
item
->
ri_cnt
);
/* Description region is ri_buf[0] */
item
->
ri_buf
[
item
->
ri_cnt
].
i_addr
=
ptr
;
item
->
ri_buf
[
item
->
ri_cnt
].
i_len
=
len
;
item
->
ri_cnt
++
;
trace_xfs_log_recover_item_add
(
log
,
trans
,
item
,
0
);
return
0
;
}
/*
* Free up any resources allocated by the transaction
*
* Remember that EFIs, EFDs, and IUNLINKs are handled later.
*/
STATIC
void
xlog_recover_free_trans
(
struct
xlog_recover
*
trans
)
{
xlog_recover_item_t
*
item
,
*
n
;
int
i
;
list_for_each_entry_safe
(
item
,
n
,
&
trans
->
r_itemq
,
ri_list
)
{
/* Free the regions in the item. */
list_del
(
&
item
->
ri_list
);
for
(
i
=
0
;
i
<
item
->
ri_cnt
;
i
++
)
kmem_free
(
item
->
ri_buf
[
i
].
i_addr
);
/* Free the item itself */
kmem_free
(
item
->
ri_buf
);
kmem_free
(
item
);
}
/* Free the transaction recover structure */
kmem_free
(
trans
);
}
/*
* On error or completion, trans is freed.
*/
STATIC
int
xlog_recovery_process_trans
(
struct
xlog
*
log
,
struct
xlog_recover
*
trans
,
xfs_caddr_t
dp
,
unsigned
int
len
,
unsigned
int
flags
,
int
pass
)
{
int
error
=
0
;
bool
freeit
=
false
;
/* mask off ophdr transaction container flags */
flags
&=
~
XLOG_END_TRANS
;
if
(
flags
&
XLOG_WAS_CONT_TRANS
)
flags
&=
~
XLOG_CONTINUE_TRANS
;
/*
* Callees must not free the trans structure. We'll decide if we need to
* free it or not based on the operation being done and it's result.
*/
switch
(
flags
)
{
/* expected flag values */
case
0
:
case
XLOG_CONTINUE_TRANS
:
error
=
xlog_recover_add_to_trans
(
log
,
trans
,
dp
,
len
);
break
;
case
XLOG_WAS_CONT_TRANS
:
error
=
xlog_recover_add_to_cont_trans
(
log
,
trans
,
dp
,
len
);
break
;
case
XLOG_COMMIT_TRANS
:
error
=
xlog_recover_commit_trans
(
log
,
trans
,
pass
);
/* success or fail, we are now done with this transaction. */
freeit
=
true
;
break
;
/* unexpected flag values */
case
XLOG_UNMOUNT_TRANS
:
/* just skip trans */
xfs_warn
(
log
->
l_mp
,
"%s: Unmount LR"
,
__func__
);
freeit
=
true
;
break
;
case
XLOG_START_TRANS
:
default:
xfs_warn
(
log
->
l_mp
,
"%s: bad flag 0x%x"
,
__func__
,
flags
);
ASSERT
(
0
);
error
=
-
EIO
;
break
;
}
if
(
error
||
freeit
)
xlog_recover_free_trans
(
trans
);
return
error
;
}
/*
* Lookup the transaction recovery structure associated with the ID in the
* current ophdr. If the transaction doesn't exist and the start flag is set in
* the ophdr, then allocate a new transaction for future ID matches to find.
* Either way, return what we found during the lookup - an existing transaction
* or nothing.
*/
STATIC
struct
xlog_recover
*
xlog_recover_ophdr_to_trans
(
struct
hlist_head
rhash
[],
struct
xlog_rec_header
*
rhead
,
struct
xlog_op_header
*
ohead
)
{
struct
xlog_recover
*
trans
;
xlog_tid_t
tid
;
struct
hlist_head
*
rhp
;
tid
=
be32_to_cpu
(
ohead
->
oh_tid
);
rhp
=
&
rhash
[
XLOG_RHASH
(
tid
)];
hlist_for_each_entry
(
trans
,
rhp
,
r_list
)
{
if
(
trans
->
r_log_tid
==
tid
)
return
trans
;
}
/*
* skip over non-start transaction headers - we could be
* processing slack space before the next transaction starts
*/
if
(
!
(
ohead
->
oh_flags
&
XLOG_START_TRANS
))
return
NULL
;
ASSERT
(
be32_to_cpu
(
ohead
->
oh_len
)
==
0
);
/*
* This is a new transaction so allocate a new recovery container to
* hold the recovery ops that will follow.
*/
trans
=
kmem_zalloc
(
sizeof
(
struct
xlog_recover
),
KM_SLEEP
);
trans
->
r_log_tid
=
tid
;
trans
->
r_lsn
=
be64_to_cpu
(
rhead
->
h_lsn
);
INIT_LIST_HEAD
(
&
trans
->
r_itemq
);
INIT_HLIST_NODE
(
&
trans
->
r_list
);
hlist_add_head
(
&
trans
->
r_list
,
rhp
);
/*
* Nothing more to do for this ophdr. Items to be added to this new
* transaction will be in subsequent ophdr containers.
*/
return
NULL
;
}
STATIC
int
xlog_recover_process_ophdr
(
struct
xlog
*
log
,
struct
hlist_head
rhash
[],
struct
xlog_rec_header
*
rhead
,
struct
xlog_op_header
*
ohead
,
xfs_caddr_t
dp
,
xfs_caddr_t
end
,
int
pass
)
{
struct
xlog_recover
*
trans
;
unsigned
int
len
;
/* Do we understand who wrote this op? */
if
(
ohead
->
oh_clientid
!=
XFS_TRANSACTION
&&
ohead
->
oh_clientid
!=
XFS_LOG
)
{
xfs_warn
(
log
->
l_mp
,
"%s: bad clientid 0x%x"
,
__func__
,
ohead
->
oh_clientid
);
ASSERT
(
0
);
return
-
EIO
;
}
/*
* Check the ophdr contains all the data it is supposed to contain.
*/
len
=
be32_to_cpu
(
ohead
->
oh_len
);
if
(
dp
+
len
>
end
)
{
xfs_warn
(
log
->
l_mp
,
"%s: bad length 0x%x"
,
__func__
,
len
);
WARN_ON
(
1
);
return
-
EIO
;
}
trans
=
xlog_recover_ophdr_to_trans
(
rhash
,
rhead
,
ohead
);
if
(
!
trans
)
{
/* nothing to do, so skip over this ophdr */
return
0
;
}
return
xlog_recovery_process_trans
(
log
,
trans
,
dp
,
len
,
ohead
->
oh_flags
,
pass
);
}
/*
* There are two valid states of the r_state field. 0 indicates that the
* transaction structure is in a normal state. We have either seen the
...
...
@@ -3560,86 +3668,30 @@ xlog_recover_process_data(
xfs_caddr_t
dp
,
int
pass
)
{
xfs_caddr_t
lp
;
struct
xlog_op_header
*
ohead
;
xfs_caddr_t
end
;
int
num_logops
;
xlog_op_header_t
*
ohead
;
xlog_recover_t
*
trans
;
xlog_tid_t
tid
;
int
error
;
unsigned
long
hash
;
uint
flags
;
lp
=
dp
+
be32_to_cpu
(
rhead
->
h_len
);
end
=
dp
+
be32_to_cpu
(
rhead
->
h_len
);
num_logops
=
be32_to_cpu
(
rhead
->
h_num_logops
);
/* check the log format matches our own - else we can't recover */
if
(
xlog_header_check_recover
(
log
->
l_mp
,
rhead
))
return
-
EIO
;
while
((
dp
<
lp
)
&&
num_logops
)
{
ASSERT
(
dp
+
sizeof
(
xlog_op_header_t
)
<=
lp
);
ohead
=
(
xlog_op_header_t
*
)
dp
;
dp
+=
sizeof
(
xlog_op_header_t
);
if
(
ohead
->
oh_clientid
!=
XFS_TRANSACTION
&&
ohead
->
oh_clientid
!=
XFS_LOG
)
{
xfs_warn
(
log
->
l_mp
,
"%s: bad clientid 0x%x"
,
__func__
,
ohead
->
oh_clientid
);
ASSERT
(
0
);
return
-
EIO
;
}
tid
=
be32_to_cpu
(
ohead
->
oh_tid
);
hash
=
XLOG_RHASH
(
tid
);
trans
=
xlog_recover_find_tid
(
&
rhash
[
hash
],
tid
);
if
(
trans
==
NULL
)
{
/* not found; add new tid */
if
(
ohead
->
oh_flags
&
XLOG_START_TRANS
)
xlog_recover_new_tid
(
&
rhash
[
hash
],
tid
,
be64_to_cpu
(
rhead
->
h_lsn
));
}
else
{
if
(
dp
+
be32_to_cpu
(
ohead
->
oh_len
)
>
lp
)
{
xfs_warn
(
log
->
l_mp
,
"%s: bad length 0x%x"
,
__func__
,
be32_to_cpu
(
ohead
->
oh_len
));
WARN_ON
(
1
);
return
-
EIO
;
}
flags
=
ohead
->
oh_flags
&
~
XLOG_END_TRANS
;
if
(
flags
&
XLOG_WAS_CONT_TRANS
)
flags
&=
~
XLOG_CONTINUE_TRANS
;
switch
(
flags
)
{
case
XLOG_COMMIT_TRANS
:
error
=
xlog_recover_commit_trans
(
log
,
trans
,
pass
);
break
;
case
XLOG_UNMOUNT_TRANS
:
error
=
xlog_recover_unmount_trans
(
log
);
break
;
case
XLOG_WAS_CONT_TRANS
:
error
=
xlog_recover_add_to_cont_trans
(
log
,
trans
,
dp
,
be32_to_cpu
(
ohead
->
oh_len
));
break
;
case
XLOG_START_TRANS
:
xfs_warn
(
log
->
l_mp
,
"%s: bad transaction"
,
__func__
);
ASSERT
(
0
);
error
=
-
EIO
;
break
;
case
0
:
case
XLOG_CONTINUE_TRANS
:
error
=
xlog_recover_add_to_trans
(
log
,
trans
,
dp
,
be32_to_cpu
(
ohead
->
oh_len
));
break
;
default:
xfs_warn
(
log
->
l_mp
,
"%s: bad flag 0x%x"
,
__func__
,
flags
);
ASSERT
(
0
);
error
=
-
EIO
;
break
;
}
if
(
error
)
{
xlog_recover_free_trans
(
trans
);
return
error
;
}
}
while
((
dp
<
end
)
&&
num_logops
)
{
ohead
=
(
struct
xlog_op_header
*
)
dp
;
dp
+=
sizeof
(
*
ohead
);
ASSERT
(
dp
<=
end
);
/* errors will abort recovery */
error
=
xlog_recover_process_ophdr
(
log
,
rhash
,
rhead
,
ohead
,
dp
,
end
,
pass
);
if
(
error
)
return
error
;
dp
+=
be32_to_cpu
(
ohead
->
oh_len
);
num_logops
--
;
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment