Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
0f238367
Commit
0f238367
authored
Mar 27, 2019
by
Kent Overstreet
Committed by
Kent Overstreet
Oct 22, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: trans_for_each_iter()
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
7c26ecae
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
255 additions
and
248 deletions
+255
-248
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_background.c
+1
-1
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_cache.c
+3
-3
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.c
+54
-44
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_iter.h
+59
-44
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_locking.h
+3
-2
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.c
+15
-15
fs/bcachefs/btree_update_leaf.c
fs/bcachefs/btree_update_leaf.c
+17
-47
fs/bcachefs/debug.c
fs/bcachefs/debug.c
+2
-2
fs/bcachefs/ec.c
fs/bcachefs/ec.c
+4
-4
fs/bcachefs/fs-io.c
fs/bcachefs/fs-io.c
+20
-18
fs/bcachefs/fsck.c
fs/bcachefs/fsck.c
+66
-57
fs/bcachefs/inode.c
fs/bcachefs/inode.c
+2
-2
fs/bcachefs/io.c
fs/bcachefs/io.c
+5
-5
fs/bcachefs/migrate.c
fs/bcachefs/migrate.c
+1
-1
fs/bcachefs/move.c
fs/bcachefs/move.c
+2
-2
fs/bcachefs/quota.c
fs/bcachefs/quota.c
+1
-1
No files found.
fs/bcachefs/alloc_background.c
View file @
0f238367
...
@@ -945,7 +945,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
...
@@ -945,7 +945,7 @@ static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
bch2_btree_iter_set_pos
(
iter
,
POS
(
ca
->
dev_idx
,
b
));
bch2_btree_iter_set_pos
(
iter
,
POS
(
ca
->
dev_idx
,
b
));
retry:
retry:
k
=
bch2_btree_iter_peek_slot
(
iter
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
ret
=
b
tree_iter
_err
(
k
);
ret
=
b
key
_err
(
k
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
...
fs/bcachefs/btree_cache.c
View file @
0f238367
...
@@ -814,7 +814,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
...
@@ -814,7 +814,7 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
* We might have got -EINTR because trylock failed, and we're
* We might have got -EINTR because trylock failed, and we're
* holding other locks that would cause us to deadlock:
* holding other locks that would cause us to deadlock:
*/
*/
for_each_linked_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
btree_iter_cmp
(
iter
,
linked
)
<
0
)
if
(
btree_iter_cmp
(
iter
,
linked
)
<
0
)
__bch2_btree_iter_unlock
(
linked
);
__bch2_btree_iter_unlock
(
linked
);
...
@@ -839,13 +839,13 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
...
@@ -839,13 +839,13 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
}
}
}
}
bch2_btree_
iter_relock
(
iter
);
bch2_btree_
trans_relock
(
iter
->
trans
);
}
}
out:
out:
if
(
btree_lock_want
(
iter
,
level
+
1
)
==
BTREE_NODE_UNLOCKED
)
if
(
btree_lock_want
(
iter
,
level
+
1
)
==
BTREE_NODE_UNLOCKED
)
btree_node_unlock
(
iter
,
level
+
1
);
btree_node_unlock
(
iter
,
level
+
1
);
bch2_btree_
iter_verify_locks
(
iter
);
bch2_btree_
trans_verify_locks
(
iter
->
trans
);
BUG_ON
((
!
may_drop_locks
||
!
IS_ERR
(
ret
))
&&
BUG_ON
((
!
may_drop_locks
||
!
IS_ERR
(
ret
))
&&
(
iter
->
uptodate
>=
BTREE_ITER_NEED_RELOCK
||
(
iter
->
uptodate
>=
BTREE_ITER_NEED_RELOCK
||
...
...
fs/bcachefs/btree_iter.c
View file @
0f238367
...
@@ -70,7 +70,7 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
...
@@ -70,7 +70,7 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
EBUG_ON
(
iter
->
l
[
b
->
level
].
b
!=
b
);
EBUG_ON
(
iter
->
l
[
b
->
level
].
b
!=
b
);
EBUG_ON
(
iter
->
l
[
b
->
level
].
lock_seq
+
1
!=
b
->
lock
.
state
.
seq
);
EBUG_ON
(
iter
->
l
[
b
->
level
].
lock_seq
+
1
!=
b
->
lock
.
state
.
seq
);
for_each_btree_iter_with_node
(
iter
,
b
,
linked
)
trans_for_each_iter_with_node
(
iter
->
trans
,
b
,
linked
)
linked
->
l
[
b
->
level
].
lock_seq
+=
2
;
linked
->
l
[
b
->
level
].
lock_seq
+=
2
;
six_unlock_write
(
&
b
->
lock
);
six_unlock_write
(
&
b
->
lock
);
...
@@ -83,7 +83,7 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
...
@@ -83,7 +83,7 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
EBUG_ON
(
btree_node_read_locked
(
iter
,
b
->
level
));
EBUG_ON
(
btree_node_read_locked
(
iter
,
b
->
level
));
for_each_linked_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
linked
->
l
[
b
->
level
].
b
==
b
&&
if
(
linked
->
l
[
b
->
level
].
b
==
b
&&
btree_node_read_locked
(
linked
,
b
->
level
))
btree_node_read_locked
(
linked
,
b
->
level
))
readers
++
;
readers
++
;
...
@@ -187,7 +187,8 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
...
@@ -187,7 +187,8 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
if
(
iter
->
uptodate
==
BTREE_ITER_NEED_RELOCK
)
if
(
iter
->
uptodate
==
BTREE_ITER_NEED_RELOCK
)
iter
->
uptodate
=
BTREE_ITER_NEED_PEEK
;
iter
->
uptodate
=
BTREE_ITER_NEED_PEEK
;
bch2_btree_iter_verify_locks
(
iter
);
bch2_btree_trans_verify_locks
(
iter
->
trans
);
return
iter
->
uptodate
<
BTREE_ITER_NEED_RELOCK
;
return
iter
->
uptodate
<
BTREE_ITER_NEED_RELOCK
;
}
}
...
@@ -202,7 +203,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
...
@@ -202,7 +203,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
bool
ret
=
true
;
bool
ret
=
true
;
/* Check if it's safe to block: */
/* Check if it's safe to block: */
for_each_btree_iter
(
iter
,
linked
)
{
trans_for_each_iter
(
iter
->
trans
,
linked
)
{
if
(
!
linked
->
nodes_locked
)
if
(
!
linked
->
nodes_locked
)
continue
;
continue
;
...
@@ -262,7 +263,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
...
@@ -262,7 +263,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
/* Btree iterator locking: */
/* Btree iterator locking: */
#ifdef CONFIG_BCACHEFS_DEBUG
#ifdef CONFIG_BCACHEFS_DEBUG
void
__
bch2_btree_iter_verify_locks
(
struct
btree_iter
*
iter
)
void
bch2_btree_iter_verify_locks
(
struct
btree_iter
*
iter
)
{
{
unsigned
l
;
unsigned
l
;
...
@@ -279,35 +280,23 @@ void __bch2_btree_iter_verify_locks(struct btree_iter *iter)
...
@@ -279,35 +280,23 @@ void __bch2_btree_iter_verify_locks(struct btree_iter *iter)
}
}
}
}
void
bch2_btree_
iter_verify_locks
(
struct
btree_iter
*
iter
)
void
bch2_btree_
trans_verify_locks
(
struct
btree_trans
*
trans
)
{
{
struct
btree_iter
*
linked
;
struct
btree_iter
*
iter
;
for_each_btree_iter
(
iter
,
linked
)
__bch2_btree_iter_verify_locks
(
linked
);
trans_for_each_iter
(
trans
,
iter
)
bch2_btree_iter_verify_locks
(
iter
);
}
}
#endif
#endif
__flatten
__flatten
static
bool
__
bch2_btree_iter_relock
(
struct
btree_iter
*
iter
)
static
bool
bch2_btree_iter_relock
(
struct
btree_iter
*
iter
)
{
{
return
iter
->
uptodate
>=
BTREE_ITER_NEED_RELOCK
return
iter
->
uptodate
>=
BTREE_ITER_NEED_RELOCK
?
btree_iter_get_locks
(
iter
,
false
)
?
btree_iter_get_locks
(
iter
,
false
)
:
true
;
:
true
;
}
}
bool
bch2_btree_iter_relock
(
struct
btree_iter
*
iter
)
{
struct
btree_iter
*
linked
;
bool
ret
=
true
;
for_each_btree_iter
(
iter
,
linked
)
ret
&=
__bch2_btree_iter_relock
(
linked
);
return
ret
;
}
bool
__bch2_btree_iter_upgrade
(
struct
btree_iter
*
iter
,
bool
__bch2_btree_iter_upgrade
(
struct
btree_iter
*
iter
,
unsigned
new_locks_want
)
unsigned
new_locks_want
)
{
{
...
@@ -325,8 +314,9 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
...
@@ -325,8 +314,9 @@ bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
* on iterators that might lock ancestors before us to avoid getting
* on iterators that might lock ancestors before us to avoid getting
* -EINTR later:
* -EINTR later:
*/
*/
for_each_linked_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
linked
->
btree_id
==
iter
->
btree_id
&&
if
(
linked
!=
iter
&&
linked
->
btree_id
==
iter
->
btree_id
&&
btree_iter_cmp
(
linked
,
iter
)
<=
0
&&
btree_iter_cmp
(
linked
,
iter
)
<=
0
&&
linked
->
locks_want
<
new_locks_want
)
{
linked
->
locks_want
<
new_locks_want
)
{
linked
->
locks_want
=
new_locks_want
;
linked
->
locks_want
=
new_locks_want
;
...
@@ -371,7 +361,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
...
@@ -371,7 +361,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
* might have had to modify locks_want on linked iterators due to lock
* might have had to modify locks_want on linked iterators due to lock
* ordering:
* ordering:
*/
*/
for_each_btree_iter
(
iter
,
linked
)
{
trans_for_each_iter
(
iter
->
trans
,
linked
)
{
unsigned
new_locks_want
=
downgrade_to
?:
unsigned
new_locks_want
=
downgrade_to
?:
(
linked
->
flags
&
BTREE_ITER_INTENT
?
1
:
0
);
(
linked
->
flags
&
BTREE_ITER_INTENT
?
1
:
0
);
...
@@ -394,19 +384,40 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
...
@@ -394,19 +384,40 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
}
}
}
}
bch2_btree_
iter_verify_locks
(
iter
);
bch2_btree_
trans_verify_locks
(
iter
->
trans
);
}
}
int
bch2_btree_iter_unlock
(
struct
btree_iter
*
iter
)
int
bch2_btree_iter_unlock
(
struct
btree_iter
*
iter
)
{
{
struct
btree_iter
*
linked
;
struct
btree_iter
*
linked
;
for_each_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
__bch2_btree_iter_unlock
(
linked
);
__bch2_btree_iter_unlock
(
linked
);
return
iter
->
flags
&
BTREE_ITER_ERROR
?
-
EIO
:
0
;
return
btree_iter_err
(
iter
)
;
}
}
bool
bch2_btree_trans_relock
(
struct
btree_trans
*
trans
)
{
struct
btree_iter
*
iter
;
bool
ret
=
true
;
trans_for_each_iter
(
trans
,
iter
)
ret
&=
bch2_btree_iter_relock
(
iter
);
return
ret
;
}
void
bch2_btree_trans_unlock
(
struct
btree_trans
*
trans
)
{
struct
btree_iter
*
iter
;
trans_for_each_iter
(
trans
,
iter
)
__bch2_btree_iter_unlock
(
iter
);
}
/* Btree transaction locking: */
/* Btree iterator: */
/* Btree iterator: */
#ifdef CONFIG_BCACHEFS_DEBUG
#ifdef CONFIG_BCACHEFS_DEBUG
...
@@ -464,7 +475,7 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
...
@@ -464,7 +475,7 @@ void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
{
{
struct
btree_iter
*
linked
;
struct
btree_iter
*
linked
;
for_each_btree_iter_with_node
(
iter
,
b
,
linked
)
trans_for_each_iter_with_node
(
iter
->
trans
,
b
,
linked
)
__bch2_btree_iter_verify
(
linked
,
b
);
__bch2_btree_iter_verify
(
linked
,
b
);
}
}
...
@@ -618,7 +629,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
...
@@ -618,7 +629,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
__bch2_btree_node_iter_fix
(
iter
,
b
,
node_iter
,
t
,
__bch2_btree_node_iter_fix
(
iter
,
b
,
node_iter
,
t
,
where
,
clobber_u64s
,
new_u64s
);
where
,
clobber_u64s
,
new_u64s
);
for_each_btree_iter_with_node
(
iter
,
b
,
linked
)
trans_for_each_iter_with_node
(
iter
->
trans
,
b
,
linked
)
__bch2_btree_node_iter_fix
(
linked
,
b
,
__bch2_btree_node_iter_fix
(
linked
,
b
,
&
linked
->
l
[
b
->
level
].
iter
,
t
,
&
linked
->
l
[
b
->
level
].
iter
,
t
,
where
,
clobber_u64s
,
new_u64s
);
where
,
clobber_u64s
,
new_u64s
);
...
@@ -776,7 +787,7 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
...
@@ -776,7 +787,7 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
enum
btree_node_locked_type
t
;
enum
btree_node_locked_type
t
;
struct
btree_iter
*
linked
;
struct
btree_iter
*
linked
;
for_each_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
btree_iter_pos_in_node
(
linked
,
b
))
{
if
(
btree_iter_pos_in_node
(
linked
,
b
))
{
/*
/*
* bch2_btree_iter_node_drop() has already been called -
* bch2_btree_iter_node_drop() has already been called -
...
@@ -810,7 +821,7 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
...
@@ -810,7 +821,7 @@ void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
iter
->
l
[
level
].
b
=
BTREE_ITER_NOT_END
;
iter
->
l
[
level
].
b
=
BTREE_ITER_NOT_END
;
mark_btree_node_unlocked
(
iter
,
level
);
mark_btree_node_unlocked
(
iter
,
level
);
for_each_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
linked
->
l
[
level
].
b
==
b
)
{
if
(
linked
->
l
[
level
].
b
==
b
)
{
__btree_node_unlock
(
linked
,
level
);
__btree_node_unlock
(
linked
,
level
);
linked
->
l
[
level
].
b
=
BTREE_ITER_NOT_END
;
linked
->
l
[
level
].
b
=
BTREE_ITER_NOT_END
;
...
@@ -825,7 +836,7 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
...
@@ -825,7 +836,7 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
{
{
struct
btree_iter
*
linked
;
struct
btree_iter
*
linked
;
for_each_btree_iter_with_node
(
iter
,
b
,
linked
)
trans_for_each_iter_with_node
(
iter
->
trans
,
b
,
linked
)
__btree_iter_init
(
linked
,
b
->
level
);
__btree_iter_init
(
linked
,
b
->
level
);
}
}
...
@@ -1005,7 +1016,7 @@ static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
...
@@ -1005,7 +1016,7 @@ static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
iter
=
iter
->
next
;
iter
=
iter
->
next
;
}
while
(
iter
!=
sorted_iters
);
}
while
(
iter
!=
sorted_iters
);
ret
=
btree_
iter_linked
(
iter
)
?
-
EINTR
:
0
;
ret
=
btree_
trans_has_multiple_iters
(
iter
->
trans
)
?
-
EINTR
:
0
;
out:
out:
bch2_btree_cache_cannibalize_unlock
(
c
);
bch2_btree_cache_cannibalize_unlock
(
c
);
return
ret
;
return
ret
;
...
@@ -1051,7 +1062,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
...
@@ -1051,7 +1062,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
if
(
unlikely
(
iter
->
level
>=
BTREE_MAX_DEPTH
))
if
(
unlikely
(
iter
->
level
>=
BTREE_MAX_DEPTH
))
return
0
;
return
0
;
if
(
__
bch2_btree_iter_relock
(
iter
))
if
(
bch2_btree_iter_relock
(
iter
))
return
0
;
return
0
;
/*
/*
...
@@ -1091,7 +1102,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
...
@@ -1091,7 +1102,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
iter
->
uptodate
=
BTREE_ITER_NEED_PEEK
;
iter
->
uptodate
=
BTREE_ITER_NEED_PEEK
;
bch2_btree_
iter_verify_locks
(
iter
);
bch2_btree_
trans_verify_locks
(
iter
->
trans
);
__bch2_btree_iter_verify
(
iter
,
iter
->
l
[
iter
->
level
].
b
);
__bch2_btree_iter_verify
(
iter
,
iter
->
l
[
iter
->
level
].
b
);
return
0
;
return
0
;
}
}
...
@@ -1104,7 +1115,7 @@ int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
...
@@ -1104,7 +1115,7 @@ int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
if
(
unlikely
(
ret
))
if
(
unlikely
(
ret
))
ret
=
btree_iter_traverse_error
(
iter
,
ret
);
ret
=
btree_iter_traverse_error
(
iter
,
ret
);
BUG_ON
(
ret
==
-
EINTR
&&
!
btree_
iter_linked
(
iter
));
BUG_ON
(
ret
==
-
EINTR
&&
!
btree_
trans_has_multiple_iters
(
iter
->
trans
));
return
ret
;
return
ret
;
}
}
...
@@ -1117,7 +1128,7 @@ static inline void bch2_btree_iter_checks(struct btree_iter *iter,
...
@@ -1117,7 +1128,7 @@ static inline void bch2_btree_iter_checks(struct btree_iter *iter,
(
iter
->
btree_id
==
BTREE_ID_EXTENTS
&&
(
iter
->
btree_id
==
BTREE_ID_EXTENTS
&&
type
!=
BTREE_ITER_NODES
));
type
!=
BTREE_ITER_NODES
));
bch2_btree_
iter_verify_locks
(
iter
);
bch2_btree_
trans_verify_locks
(
iter
->
trans
);
}
}
/* Iterate across nodes (leaf and interior nodes) */
/* Iterate across nodes (leaf and interior nodes) */
...
@@ -1619,7 +1630,7 @@ static void bch2_btree_iter_unlink(struct btree_iter *iter)
...
@@ -1619,7 +1630,7 @@ static void bch2_btree_iter_unlink(struct btree_iter *iter)
if
(
!
btree_iter_linked
(
iter
))
if
(
!
btree_iter_linked
(
iter
))
return
;
return
;
for_each_linked_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
linked
->
next
==
iter
)
{
if
(
linked
->
next
==
iter
)
{
linked
->
next
=
iter
->
next
;
linked
->
next
=
iter
->
next
;
iter
->
next
=
iter
;
iter
->
next
=
iter
;
...
@@ -1686,7 +1697,7 @@ int bch2_trans_iter_put(struct btree_trans *trans,
...
@@ -1686,7 +1697,7 @@ int bch2_trans_iter_put(struct btree_trans *trans,
struct
btree_iter
*
iter
)
struct
btree_iter
*
iter
)
{
{
ssize_t
idx
=
btree_trans_iter_idx
(
trans
,
iter
);
ssize_t
idx
=
btree_trans_iter_idx
(
trans
,
iter
);
int
ret
=
(
iter
->
flags
&
BTREE_ITER_ERROR
)
?
-
EIO
:
0
;
int
ret
=
btree_iter_err
(
iter
)
;
trans
->
iters_live
&=
~
(
1ULL
<<
idx
);
trans
->
iters_live
&=
~
(
1ULL
<<
idx
);
return
ret
;
return
ret
;
...
@@ -1706,7 +1717,7 @@ static inline void __bch2_trans_iter_free(struct btree_trans *trans,
...
@@ -1706,7 +1717,7 @@ static inline void __bch2_trans_iter_free(struct btree_trans *trans,
int
bch2_trans_iter_free
(
struct
btree_trans
*
trans
,
int
bch2_trans_iter_free
(
struct
btree_trans
*
trans
,
struct
btree_iter
*
iter
)
struct
btree_iter
*
iter
)
{
{
int
ret
=
(
iter
->
flags
&
BTREE_ITER_ERROR
)
?
-
EIO
:
0
;
int
ret
=
btree_iter_err
(
iter
)
;
__bch2_trans_iter_free
(
trans
,
btree_trans_iter_idx
(
trans
,
iter
));
__bch2_trans_iter_free
(
trans
,
btree_trans_iter_idx
(
trans
,
iter
));
return
ret
;
return
ret
;
...
@@ -1715,7 +1726,7 @@ int bch2_trans_iter_free(struct btree_trans *trans,
...
@@ -1715,7 +1726,7 @@ int bch2_trans_iter_free(struct btree_trans *trans,
int
bch2_trans_iter_free_on_commit
(
struct
btree_trans
*
trans
,
int
bch2_trans_iter_free_on_commit
(
struct
btree_trans
*
trans
,
struct
btree_iter
*
iter
)
struct
btree_iter
*
iter
)
{
{
int
ret
=
(
iter
->
flags
&
BTREE_ITER_ERROR
)
?
-
EIO
:
0
;
int
ret
=
btree_iter_err
(
iter
)
;
trans
->
iters_unlink_on_commit
|=
trans
->
iters_unlink_on_commit
|=
1ULL
<<
btree_trans_iter_idx
(
trans
,
iter
);
1ULL
<<
btree_trans_iter_idx
(
trans
,
iter
);
...
@@ -1966,8 +1977,7 @@ int bch2_trans_unlock(struct btree_trans *trans)
...
@@ -1966,8 +1977,7 @@ int bch2_trans_unlock(struct btree_trans *trans)
unsigned
idx
=
__ffs
(
iters
);
unsigned
idx
=
__ffs
(
iters
);
struct
btree_iter
*
iter
=
&
trans
->
iters
[
idx
];
struct
btree_iter
*
iter
=
&
trans
->
iters
[
idx
];
if
(
iter
->
flags
&
BTREE_ITER_ERROR
)
ret
=
ret
?:
btree_iter_err
(
iter
);
ret
=
-
EIO
;
__bch2_btree_iter_unlock
(
iter
);
__bch2_btree_iter_unlock
(
iter
);
iters
^=
1
<<
idx
;
iters
^=
1
<<
idx
;
...
...
fs/bcachefs/btree_iter.h
View file @
0f238367
...
@@ -23,11 +23,44 @@ static inline struct btree *btree_node_parent(struct btree_iter *iter,
...
@@ -23,11 +23,44 @@ static inline struct btree *btree_node_parent(struct btree_iter *iter,
return
btree_iter_node
(
iter
,
b
->
level
+
1
);
return
btree_iter_node
(
iter
,
b
->
level
+
1
);
}
}
static
inline
bool
btree_trans_has_multiple_iters
(
const
struct
btree_trans
*
trans
)
{
return
hweight64
(
trans
->
iters_linked
)
>
1
;
}
static
inline
bool
btree_iter_linked
(
const
struct
btree_iter
*
iter
)
static
inline
bool
btree_iter_linked
(
const
struct
btree_iter
*
iter
)
{
{
return
iter
->
next
!=
iter
;
return
iter
->
next
!=
iter
;
}
}
static
inline
int
btree_iter_err
(
const
struct
btree_iter
*
iter
)
{
return
iter
->
flags
&
BTREE_ITER_ERROR
?
-
EIO
:
0
;
}
/* Iterate over iters within a transaction: */
static
inline
struct
btree_iter
*
__trans_next_iter
(
struct
btree_trans
*
trans
,
struct
btree_iter
*
iter
)
{
unsigned
idx
;
/* XXX expensive pointer subtraction: */
for
(
idx
=
iter
-
trans
->
iters
;
idx
<
trans
->
nr_iters
;
idx
++
)
if
(
trans
->
iters_linked
&
(
1ULL
<<
idx
))
return
&
trans
->
iters
[
idx
];
return
NULL
;
}
#define trans_for_each_iter(_trans, _iter) \
for (_iter = (_trans)->iters; \
(_iter = __trans_next_iter((_trans), _iter)); \
_iter++)
static
inline
bool
__iter_has_node
(
const
struct
btree_iter
*
iter
,
static
inline
bool
__iter_has_node
(
const
struct
btree_iter
*
iter
,
const
struct
btree
*
b
)
const
struct
btree
*
b
)
{
{
...
@@ -44,59 +77,39 @@ static inline bool __iter_has_node(const struct btree_iter *iter,
...
@@ -44,59 +77,39 @@ static inline bool __iter_has_node(const struct btree_iter *iter,
}
}
static
inline
struct
btree_iter
*
static
inline
struct
btree_iter
*
__next_linked_iter
(
struct
btree_iter
*
iter
,
struct
btree_iter
*
linked
)
__trans_next_iter_with_node
(
struct
btree_trans
*
trans
,
struct
btree
*
b
,
struct
btree_iter
*
iter
)
{
{
return
linked
->
next
!=
iter
?
linked
->
next
:
NULL
;
unsigned
idx
;
}
static
inline
struct
btree_iter
*
/* XXX expensive pointer subtraction: */
__next_iter_with_node
(
struct
btree_iter
*
iter
,
struct
btree
*
b
,
struct
btree_iter
*
linked
)
for
(
idx
=
iter
-
trans
->
iters
;
{
idx
<
trans
->
nr_iters
;
while
(
linked
&&
!
__iter_has_node
(
linked
,
b
))
idx
++
)
{
linked
=
__next_linked_iter
(
iter
,
linked
);
if
(
!
(
trans
->
iters_linked
&
(
1ULL
<<
idx
)))
continue
;
return
linked
;
iter
=
&
trans
->
iters
[
idx
];
if
(
__iter_has_node
(
iter
,
b
))
return
iter
;
}
return
NULL
;
}
}
/**
#define trans_for_each_iter_with_node(_trans, _b, _iter) \
* for_each_btree_iter - iterate over all iterators linked with @_iter,
for (_iter = (_trans)->iters; \
* including @_iter
(_iter = __trans_next_iter_with_node((_trans), (_b), _iter));\
*/
_iter++)
#define for_each_btree_iter(_iter, _linked) \
for ((_linked) = (_iter); (_linked); \
(_linked) = __next_linked_iter(_iter, _linked))
/**
* for_each_btree_iter_with_node - iterate over all iterators linked with @_iter
* that also point to @_b
*
* @_b is assumed to be locked by @_iter
*
* Filters out iterators that don't have a valid btree_node iterator for @_b -
* i.e. iterators for which bch2_btree_node_relock() would not succeed.
*/
#define for_each_btree_iter_with_node(_iter, _b, _linked) \
for ((_linked) = (_iter); \
((_linked) = __next_iter_with_node(_iter, _b, _linked)); \
(_linked) = __next_linked_iter(_iter, _linked))
/**
* for_each_linked_btree_iter - iterate over all iterators linked with @_iter,
* _not_ including @_iter
*/
#define for_each_linked_btree_iter(_iter, _linked) \
for ((_linked) = (_iter)->next; \
(_linked) != (_iter); \
(_linked) = (_linked)->next)
#ifdef CONFIG_BCACHEFS_DEBUG
#ifdef CONFIG_BCACHEFS_DEBUG
void
bch2_btree_iter_verify
(
struct
btree_iter
*
,
struct
btree
*
);
void
bch2_btree_iter_verify
(
struct
btree_iter
*
,
struct
btree
*
);
void
bch2_btree_
iter_verify_locks
(
struct
btree_iter
*
);
void
bch2_btree_
trans_verify_locks
(
struct
btree_trans
*
);
#else
#else
static
inline
void
bch2_btree_iter_verify
(
struct
btree_iter
*
iter
,
static
inline
void
bch2_btree_iter_verify
(
struct
btree_iter
*
iter
,
struct
btree
*
b
)
{}
struct
btree
*
b
)
{}
static
inline
void
bch2_btree_
iter_verify_locks
(
struct
btree_iter
*
iter
)
{}
static
inline
void
bch2_btree_
trans_verify_locks
(
struct
btree_trans
*
iter
)
{}
#endif
#endif
void
bch2_btree_node_iter_fix
(
struct
btree_iter
*
,
struct
btree
*
,
void
bch2_btree_node_iter_fix
(
struct
btree_iter
*
,
struct
btree
*
,
...
@@ -104,7 +117,9 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
...
@@ -104,7 +117,9 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
unsigned
,
unsigned
);
unsigned
,
unsigned
);
int
bch2_btree_iter_unlock
(
struct
btree_iter
*
);
int
bch2_btree_iter_unlock
(
struct
btree_iter
*
);
bool
bch2_btree_iter_relock
(
struct
btree_iter
*
);
bool
bch2_btree_trans_relock
(
struct
btree_trans
*
);
void
bch2_btree_trans_unlock
(
struct
btree_trans
*
);
bool
__bch2_btree_iter_upgrade
(
struct
btree_iter
*
,
unsigned
);
bool
__bch2_btree_iter_upgrade
(
struct
btree_iter
*
,
unsigned
);
bool
__bch2_btree_iter_upgrade_nounlock
(
struct
btree_iter
*
,
unsigned
);
bool
__bch2_btree_iter_upgrade_nounlock
(
struct
btree_iter
*
,
unsigned
);
...
@@ -252,7 +267,7 @@ static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
...
@@ -252,7 +267,7 @@ static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
!IS_ERR_OR_NULL((_k).k); \
!IS_ERR_OR_NULL((_k).k); \
(_k) = __bch2_btree_iter_next(_iter, _flags))
(_k) = __bch2_btree_iter_next(_iter, _flags))
static
inline
int
b
tree_iter
_err
(
struct
bkey_s_c
k
)
static
inline
int
b
key
_err
(
struct
bkey_s_c
k
)
{
{
return
PTR_ERR_OR_ZERO
(
k
.
k
);
return
PTR_ERR_OR_ZERO
(
k
.
k
);
}
}
...
...
fs/bcachefs/btree_locking.h
View file @
0f238367
...
@@ -163,8 +163,9 @@ static inline bool btree_node_lock_increment(struct btree_iter *iter,
...
@@ -163,8 +163,9 @@ static inline bool btree_node_lock_increment(struct btree_iter *iter,
{
{
struct
btree_iter
*
linked
;
struct
btree_iter
*
linked
;
for_each_linked_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
linked
->
l
[
level
].
b
==
b
&&
if
(
linked
!=
iter
&&
linked
->
l
[
level
].
b
==
b
&&
btree_node_locked_type
(
linked
,
level
)
>=
want
)
{
btree_node_locked_type
(
linked
,
level
)
>=
want
)
{
six_lock_increment
(
&
b
->
lock
,
want
);
six_lock_increment
(
&
b
->
lock
,
want
);
return
true
;
return
true
;
...
...
fs/bcachefs/btree_update_interior.c
View file @
0f238367
...
@@ -246,7 +246,7 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
...
@@ -246,7 +246,7 @@ void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
{
{
struct
btree_iter
*
linked
;
struct
btree_iter
*
linked
;
for_each_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
BUG_ON
(
linked
->
l
[
b
->
level
].
b
==
b
);
BUG_ON
(
linked
->
l
[
b
->
level
].
b
==
b
);
/*
/*
...
@@ -1438,7 +1438,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
...
@@ -1438,7 +1438,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
bch2_btree_node_free_inmem
(
c
,
b
,
iter
);
bch2_btree_node_free_inmem
(
c
,
b
,
iter
);
bch2_btree_
iter_verify_locks
(
iter
);
bch2_btree_
trans_verify_locks
(
iter
->
trans
);
bch2_time_stats_update
(
&
c
->
times
[
BCH_TIME_btree_node_split
],
bch2_time_stats_update
(
&
c
->
times
[
BCH_TIME_btree_node_split
],
start_time
);
start_time
);
...
@@ -1474,7 +1474,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
...
@@ -1474,7 +1474,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as, struct btree *b,
btree_update_updated_node
(
as
,
b
);
btree_update_updated_node
(
as
,
b
);
for_each_btree_iter_with_node
(
iter
,
b
,
linked
)
trans_for_each_iter_with_node
(
iter
->
trans
,
b
,
linked
)
bch2_btree_node_iter_peek
(
&
linked
->
l
[
b
->
level
].
iter
,
b
);
bch2_btree_node_iter_peek
(
&
linked
->
l
[
b
->
level
].
iter
,
b
);
bch2_btree_iter_verify
(
iter
,
b
);
bch2_btree_iter_verify
(
iter
,
b
);
...
@@ -1559,7 +1559,7 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
...
@@ -1559,7 +1559,7 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
* We already have a disk reservation and open buckets pinned; this
* We already have a disk reservation and open buckets pinned; this
* allocation must not block:
* allocation must not block:
*/
*/
for_each_btree_iter
(
iter
,
linked
)
trans_for_each_iter
(
iter
->
trans
,
linked
)
if
(
linked
->
btree_id
==
BTREE_ID_EXTENTS
)
if
(
linked
->
btree_id
==
BTREE_ID_EXTENTS
)
flags
|=
BTREE_INSERT_USE_RESERVE
;
flags
|=
BTREE_INSERT_USE_RESERVE
;
...
@@ -1571,10 +1571,10 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
...
@@ -1571,10 +1571,10 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
if
(
flags
&
BTREE_INSERT_NOUNLOCK
)
if
(
flags
&
BTREE_INSERT_NOUNLOCK
)
return
-
EINTR
;
return
-
EINTR
;
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
iter
->
trans
);
down_read
(
&
c
->
gc_lock
);
down_read
(
&
c
->
gc_lock
);
if
(
btree_iter_linked
(
iter
))
if
(
!
bch2_btree_trans_relock
(
iter
->
trans
))
ret
=
-
EINTR
;
ret
=
-
EINTR
;
}
}
...
@@ -1753,7 +1753,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
...
@@ -1753,7 +1753,7 @@ void __bch2_foreground_maybe_merge(struct bch_fs *c,
if
(
!
(
flags
&
BTREE_INSERT_GC_LOCK_HELD
))
if
(
!
(
flags
&
BTREE_INSERT_GC_LOCK_HELD
))
up_read
(
&
c
->
gc_lock
);
up_read
(
&
c
->
gc_lock
);
out:
out:
bch2_btree_
iter_verify_locks
(
iter
);
bch2_btree_
trans_verify_locks
(
iter
->
trans
);
/*
/*
* Don't downgrade locks here: we're called after successful insert,
* Don't downgrade locks here: we're called after successful insert,
...
@@ -2036,10 +2036,10 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
...
@@ -2036,10 +2036,10 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
return
-
EINTR
;
return
-
EINTR
;
if
(
!
down_read_trylock
(
&
c
->
gc_lock
))
{
if
(
!
down_read_trylock
(
&
c
->
gc_lock
))
{
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
iter
->
trans
);
down_read
(
&
c
->
gc_lock
);
down_read
(
&
c
->
gc_lock
);
if
(
!
bch2_btree_
iter_relock
(
iter
))
{
if
(
!
bch2_btree_
trans_relock
(
iter
->
trans
))
{
ret
=
-
EINTR
;
ret
=
-
EINTR
;
goto
err
;
goto
err
;
}
}
...
@@ -2050,16 +2050,16 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
...
@@ -2050,16 +2050,16 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
/* bch2_btree_reserve_get will unlock */
/* bch2_btree_reserve_get will unlock */
ret
=
bch2_btree_cache_cannibalize_lock
(
c
,
&
cl
);
ret
=
bch2_btree_cache_cannibalize_lock
(
c
,
&
cl
);
if
(
ret
)
{
if
(
ret
)
{
ret
=
-
EINTR
;
bch2_btree_trans_unlock
(
iter
->
trans
);
bch2_btree_iter_unlock
(
iter
);
up_read
(
&
c
->
gc_lock
);
up_read
(
&
c
->
gc_lock
);
closure_sync
(
&
cl
);
closure_sync
(
&
cl
);
down_read
(
&
c
->
gc_lock
);
down_read
(
&
c
->
gc_lock
);
if
(
!
bch2_btree_iter_relock
(
iter
))
if
(
!
bch2_btree_trans_relock
(
iter
->
trans
))
{
ret
=
-
EINTR
;
goto
err
;
goto
err
;
}
}
}
new_hash
=
bch2_btree_node_mem_alloc
(
c
);
new_hash
=
bch2_btree_node_mem_alloc
(
c
);
}
}
...
@@ -2079,12 +2079,12 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
...
@@ -2079,12 +2079,12 @@ int bch2_btree_node_update_key(struct bch_fs *c, struct btree_iter *iter,
if
(
ret
!=
-
EINTR
)
if
(
ret
!=
-
EINTR
)
goto
err
;
goto
err
;
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
iter
->
trans
);
up_read
(
&
c
->
gc_lock
);
up_read
(
&
c
->
gc_lock
);
closure_sync
(
&
cl
);
closure_sync
(
&
cl
);
down_read
(
&
c
->
gc_lock
);
down_read
(
&
c
->
gc_lock
);
if
(
!
bch2_btree_
iter_relock
(
iter
))
if
(
!
bch2_btree_
trans_relock
(
iter
->
trans
))
goto
err
;
goto
err
;
}
}
...
...
fs/bcachefs/btree_update_leaf.c
View file @
0f238367
...
@@ -51,25 +51,6 @@ static void btree_trans_unlock_write(struct btree_trans *trans)
...
@@ -51,25 +51,6 @@ static void btree_trans_unlock_write(struct btree_trans *trans)
bch2_btree_node_unlock_write
(
i
->
iter
->
l
[
0
].
b
,
i
->
iter
);
bch2_btree_node_unlock_write
(
i
->
iter
->
l
[
0
].
b
,
i
->
iter
);
}
}
static
bool
btree_trans_relock
(
struct
btree_trans
*
trans
)
{
struct
btree_insert_entry
*
i
;
trans_for_each_update_iter
(
trans
,
i
)
return
bch2_btree_iter_relock
(
i
->
iter
);
return
true
;
}
static
void
btree_trans_unlock
(
struct
btree_trans
*
trans
)
{
struct
btree_insert_entry
*
i
;
trans_for_each_update_iter
(
trans
,
i
)
{
bch2_btree_iter_unlock
(
i
->
iter
);
break
;
}
}
static
inline
int
btree_trans_cmp
(
struct
btree_insert_entry
l
,
static
inline
int
btree_trans_cmp
(
struct
btree_insert_entry
l
,
struct
btree_insert_entry
r
)
struct
btree_insert_entry
r
)
{
{
...
@@ -422,8 +403,6 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
...
@@ -422,8 +403,6 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
EBUG_ON
((
i
->
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
)
&&
EBUG_ON
((
i
->
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
)
&&
!
(
trans
->
flags
&
BTREE_INSERT_ATOMIC
));
!
(
trans
->
flags
&
BTREE_INSERT_ATOMIC
));
bch2_btree_iter_verify_locks
(
i
->
iter
);
}
}
BUG_ON
(
debug_check_bkeys
(
c
)
&&
BUG_ON
(
debug_check_bkeys
(
c
)
&&
...
@@ -451,14 +430,14 @@ static int bch2_trans_journal_preres_get(struct btree_trans *trans)
...
@@ -451,14 +430,14 @@ static int bch2_trans_journal_preres_get(struct btree_trans *trans)
if
(
ret
!=
-
EAGAIN
)
if
(
ret
!=
-
EAGAIN
)
return
ret
;
return
ret
;
btree_trans_unlock
(
trans
);
b
ch2_b
tree_trans_unlock
(
trans
);
ret
=
bch2_journal_preres_get
(
&
c
->
journal
,
ret
=
bch2_journal_preres_get
(
&
c
->
journal
,
&
trans
->
journal_preres
,
u64s
,
0
);
&
trans
->
journal_preres
,
u64s
,
0
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
if
(
!
btree_trans_relock
(
trans
))
{
if
(
!
b
ch2_b
tree_trans_relock
(
trans
))
{
trans_restart
(
" (iter relock after journal preres get blocked)"
);
trans_restart
(
" (iter relock after journal preres get blocked)"
);
return
-
EINTR
;
return
-
EINTR
;
}
}
...
@@ -617,12 +596,9 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
...
@@ -617,12 +596,9 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
* have been traversed/locked, depending on what the caller was
* have been traversed/locked, depending on what the caller was
* doing:
* doing:
*/
*/
trans_for_each_update_iter
(
trans
,
i
)
{
trans_for_each_iter
(
trans
,
linked
)
for_each_btree_iter
(
i
->
iter
,
linked
)
if
(
linked
->
uptodate
<
BTREE_ITER_NEED_RELOCK
)
if
(
linked
->
uptodate
<
BTREE_ITER_NEED_RELOCK
)
linked
->
flags
|=
BTREE_ITER_NOUNLOCK
;
linked
->
flags
|=
BTREE_ITER_NOUNLOCK
;
break
;
}
}
}
trans_for_each_update_iter
(
trans
,
i
)
trans_for_each_update_iter
(
trans
,
i
)
...
@@ -707,20 +683,20 @@ int bch2_trans_commit_error(struct btree_trans *trans,
...
@@ -707,20 +683,20 @@ int bch2_trans_commit_error(struct btree_trans *trans,
return
ret
;
return
ret
;
}
}
if
(
btree_trans_relock
(
trans
))
if
(
b
ch2_b
tree_trans_relock
(
trans
))
return
0
;
return
0
;
trans_restart
(
" (iter relock after marking replicas)"
);
trans_restart
(
" (iter relock after marking replicas)"
);
ret
=
-
EINTR
;
ret
=
-
EINTR
;
break
;
break
;
case
BTREE_INSERT_NEED_JOURNAL_RES
:
case
BTREE_INSERT_NEED_JOURNAL_RES
:
btree_trans_unlock
(
trans
);
b
ch2_b
tree_trans_unlock
(
trans
);
ret
=
bch2_trans_journal_res_get
(
trans
,
JOURNAL_RES_GET_CHECK
);
ret
=
bch2_trans_journal_res_get
(
trans
,
JOURNAL_RES_GET_CHECK
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
if
(
btree_trans_relock
(
trans
))
if
(
b
ch2_b
tree_trans_relock
(
trans
))
return
0
;
return
0
;
trans_restart
(
" (iter relock after journal res get blocked)"
);
trans_restart
(
" (iter relock after journal res get blocked)"
);
...
@@ -785,11 +761,10 @@ static int __bch2_trans_commit(struct btree_trans *trans,
...
@@ -785,11 +761,10 @@ static int __bch2_trans_commit(struct btree_trans *trans,
goto
err
;
goto
err
;
}
}
if
(
i
->
iter
->
flags
&
BTREE_ITER_ERROR
)
{
ret
=
btree_iter_err
(
i
->
iter
);
ret
=
-
EIO
;
if
(
ret
)
goto
err
;
goto
err
;
}
}
}
ret
=
do_btree_insert_at
(
trans
,
stopped_at
);
ret
=
do_btree_insert_at
(
trans
,
stopped_at
);
if
(
unlikely
(
ret
))
if
(
unlikely
(
ret
))
...
@@ -802,16 +777,10 @@ static int __bch2_trans_commit(struct btree_trans *trans,
...
@@ -802,16 +777,10 @@ static int __bch2_trans_commit(struct btree_trans *trans,
bch2_btree_iter_downgrade
(
i
->
iter
);
bch2_btree_iter_downgrade
(
i
->
iter
);
err:
err:
/* make sure we didn't drop or screw up locks: */
/* make sure we didn't drop or screw up locks: */
trans_for_each_update_iter
(
trans
,
i
)
{
bch2_btree_trans_verify_locks
(
trans
);
bch2_btree_iter_verify_locks
(
i
->
iter
);
break
;
}
trans_for_each_update_iter
(
trans
,
i
)
{
trans_for_each_iter
(
trans
,
linked
)
for_each_btree_iter
(
i
->
iter
,
linked
)
linked
->
flags
&=
~
BTREE_ITER_NOUNLOCK
;
linked
->
flags
&=
~
BTREE_ITER_NOUNLOCK
;
break
;
}
return
ret
;
return
ret
;
}
}
...
@@ -847,13 +816,14 @@ int bch2_trans_commit(struct btree_trans *trans,
...
@@ -847,13 +816,14 @@ int bch2_trans_commit(struct btree_trans *trans,
trans_for_each_update
(
trans
,
i
)
trans_for_each_update
(
trans
,
i
)
btree_insert_entry_checks
(
trans
,
i
);
btree_insert_entry_checks
(
trans
,
i
);
bch2_btree_trans_verify_locks
(
trans
);
if
(
unlikely
(
!
(
trans
->
flags
&
BTREE_INSERT_NOCHECK_RW
)
&&
if
(
unlikely
(
!
(
trans
->
flags
&
BTREE_INSERT_NOCHECK_RW
)
&&
!
percpu_ref_tryget
(
&
c
->
writes
)))
{
!
percpu_ref_tryget
(
&
c
->
writes
)))
{
if
(
likely
(
!
(
trans
->
flags
&
BTREE_INSERT_LAZY_RW
)))
if
(
likely
(
!
(
trans
->
flags
&
BTREE_INSERT_LAZY_RW
)))
return
-
EROFS
;
return
-
EROFS
;
btree_trans_unlock
(
trans
);
b
ch2_b
tree_trans_unlock
(
trans
);
ret
=
bch2_fs_read_write_early
(
c
);
ret
=
bch2_fs_read_write_early
(
c
);
if
(
ret
)
if
(
ret
)
...
@@ -861,7 +831,7 @@ int bch2_trans_commit(struct btree_trans *trans,
...
@@ -861,7 +831,7 @@ int bch2_trans_commit(struct btree_trans *trans,
percpu_ref_get
(
&
c
->
writes
);
percpu_ref_get
(
&
c
->
writes
);
if
(
!
btree_trans_relock
(
trans
))
{
if
(
!
b
ch2_b
tree_trans_relock
(
trans
))
{
ret
=
-
EINTR
;
ret
=
-
EINTR
;
goto
err
;
goto
err
;
}
}
...
@@ -962,7 +932,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
...
@@ -962,7 +932,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
iter
=
bch2_trans_get_iter
(
&
trans
,
id
,
start
,
BTREE_ITER_INTENT
);
iter
=
bch2_trans_get_iter
(
&
trans
,
id
,
start
,
BTREE_ITER_INTENT
);
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
!
(
ret
=
b
tree_iter
_err
(
k
))
&&
!
(
ret
=
b
key
_err
(
k
))
&&
bkey_cmp
(
iter
->
pos
,
end
)
<
0
)
{
bkey_cmp
(
iter
->
pos
,
end
)
<
0
)
{
unsigned
max_sectors
=
KEY_SIZE_MAX
&
(
~
0
<<
c
->
block_bits
);
unsigned
max_sectors
=
KEY_SIZE_MAX
&
(
~
0
<<
c
->
block_bits
);
/* really shouldn't be using a bare, unpadded bkey_i */
/* really shouldn't be using a bare, unpadded bkey_i */
...
...
fs/bcachefs/debug.c
View file @
0f238367
...
@@ -226,7 +226,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
...
@@ -226,7 +226,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
iter
=
bch2_trans_get_iter
(
&
trans
,
i
->
id
,
i
->
from
,
BTREE_ITER_PREFETCH
);
iter
=
bch2_trans_get_iter
(
&
trans
,
i
->
id
,
i
->
from
,
BTREE_ITER_PREFETCH
);
k
=
bch2_btree_iter_peek
(
iter
);
k
=
bch2_btree_iter_peek
(
iter
);
while
(
k
.
k
&&
!
(
err
=
b
tree_iter
_err
(
k
)))
{
while
(
k
.
k
&&
!
(
err
=
b
key
_err
(
k
)))
{
bch2_bkey_val_to_text
(
&
PBUF
(
i
->
buf
),
i
->
c
,
k
);
bch2_bkey_val_to_text
(
&
PBUF
(
i
->
buf
),
i
->
c
,
k
);
i
->
bytes
=
strlen
(
i
->
buf
);
i
->
bytes
=
strlen
(
i
->
buf
);
BUG_ON
(
i
->
bytes
>=
PAGE_SIZE
);
BUG_ON
(
i
->
bytes
>=
PAGE_SIZE
);
...
@@ -333,7 +333,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
...
@@ -333,7 +333,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
iter
=
bch2_trans_get_iter
(
&
trans
,
i
->
id
,
i
->
from
,
BTREE_ITER_PREFETCH
);
iter
=
bch2_trans_get_iter
(
&
trans
,
i
->
id
,
i
->
from
,
BTREE_ITER_PREFETCH
);
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
!
(
err
=
b
tree_iter
_err
(
k
)))
{
!
(
err
=
b
key
_err
(
k
)))
{
struct
btree_iter_level
*
l
=
&
iter
->
l
[
0
];
struct
btree_iter_level
*
l
=
&
iter
->
l
[
0
];
struct
bkey_packed
*
_k
=
struct
bkey_packed
*
_k
=
bch2_btree_node_iter_peek
(
&
l
->
iter
,
l
->
b
);
bch2_btree_node_iter_peek
(
&
l
->
iter
,
l
->
b
);
...
...
fs/bcachefs/ec.c
View file @
0f238367
...
@@ -426,7 +426,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
...
@@ -426,7 +426,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
POS
(
0
,
stripe_idx
),
POS
(
0
,
stripe_idx
),
BTREE_ITER_SLOTS
);
BTREE_ITER_SLOTS
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
if
(
b
tree_iter
_err
(
k
)
||
k
.
k
->
type
!=
KEY_TYPE_stripe
)
{
if
(
b
key
_err
(
k
)
||
k
.
k
->
type
!=
KEY_TYPE_stripe
)
{
__bcache_io_error
(
c
,
__bcache_io_error
(
c
,
"error doing reconstruct read: stripe not found"
);
"error doing reconstruct read: stripe not found"
);
kfree
(
buf
);
kfree
(
buf
);
...
@@ -541,7 +541,7 @@ static int ec_stripe_mem_alloc(struct bch_fs *c,
...
@@ -541,7 +541,7 @@ static int ec_stripe_mem_alloc(struct bch_fs *c,
if
(
!
__ec_stripe_mem_alloc
(
c
,
idx
,
GFP_NOWAIT
|
__GFP_NOWARN
))
if
(
!
__ec_stripe_mem_alloc
(
c
,
idx
,
GFP_NOWAIT
|
__GFP_NOWARN
))
return
0
;
return
0
;
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
iter
->
trans
);
if
(
!
__ec_stripe_mem_alloc
(
c
,
idx
,
GFP_KERNEL
))
if
(
!
__ec_stripe_mem_alloc
(
c
,
idx
,
GFP_KERNEL
))
return
-
EINTR
;
return
-
EINTR
;
...
@@ -750,7 +750,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
...
@@ -750,7 +750,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
BTREE_ITER_INTENT
);
BTREE_ITER_INTENT
);
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
!
(
ret
=
b
tree_iter
_err
(
k
))
&&
!
(
ret
=
b
key
_err
(
k
))
&&
bkey_cmp
(
bkey_start_pos
(
k
.
k
),
pos
->
p
)
<
0
)
{
bkey_cmp
(
bkey_start_pos
(
k
.
k
),
pos
->
p
)
<
0
)
{
idx
=
extent_matches_stripe
(
c
,
&
s
->
key
.
v
,
k
);
idx
=
extent_matches_stripe
(
c
,
&
s
->
key
.
v
,
k
);
if
(
idx
<
0
)
{
if
(
idx
<
0
)
{
...
@@ -1170,7 +1170,7 @@ static int __bch2_stripe_write_key(struct btree_trans *trans,
...
@@ -1170,7 +1170,7 @@ static int __bch2_stripe_write_key(struct btree_trans *trans,
bch2_btree_iter_set_pos
(
iter
,
POS
(
0
,
idx
));
bch2_btree_iter_set_pos
(
iter
,
POS
(
0
,
idx
));
k
=
bch2_btree_iter_peek_slot
(
iter
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
ret
=
b
tree_iter
_err
(
k
);
ret
=
b
key
_err
(
k
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
...
fs/bcachefs/fs-io.c
View file @
0f238367
...
@@ -251,7 +251,7 @@ static int sum_sector_overwrites(struct btree_trans *trans,
...
@@ -251,7 +251,7 @@ static int sum_sector_overwrites(struct btree_trans *trans,
* carefully not advancing past @new and thus whatever leaf node
* carefully not advancing past @new and thus whatever leaf node
* @_iter currently points to:
* @_iter currently points to:
*/
*/
BUG_ON
(
b
tree_iter
_err
(
old
));
BUG_ON
(
b
key
_err
(
old
));
if
(
allocating
&&
if
(
allocating
&&
!*
allocating
&&
!*
allocating
&&
...
@@ -322,10 +322,10 @@ static int bch2_extent_update(struct btree_trans *trans,
...
@@ -322,10 +322,10 @@ static int bch2_extent_update(struct btree_trans *trans,
if
(
i_sectors_delta
||
if
(
i_sectors_delta
||
new_i_size
>
inode
->
ei_inode
.
bi_size
)
{
new_i_size
>
inode
->
ei_inode
.
bi_size
)
{
if
(
c
->
opts
.
new_inode_updates
)
{
if
(
c
->
opts
.
new_inode_updates
)
{
bch2_btree_
iter_unlock
(
extent_iter
);
bch2_btree_
trans_unlock
(
trans
);
mutex_lock
(
&
inode
->
ei_update_lock
);
mutex_lock
(
&
inode
->
ei_update_lock
);
if
(
!
bch2_btree_
iter_relock
(
extent_iter
))
{
if
(
!
bch2_btree_
trans_relock
(
trans
))
{
mutex_unlock
(
&
inode
->
ei_update_lock
);
mutex_unlock
(
&
inode
->
ei_update_lock
);
return
-
EINTR
;
return
-
EINTR
;
}
}
...
@@ -921,10 +921,11 @@ static void readpage_bio_extend(struct readpages_iter *iter,
...
@@ -921,10 +921,11 @@ static void readpage_bio_extend(struct readpages_iter *iter,
}
}
}
}
static
void
bchfs_read
(
struct
b
ch_fs
*
c
,
struct
btree_iter
*
iter
,
static
void
bchfs_read
(
struct
b
tree_trans
*
trans
,
struct
btree_iter
*
iter
,
struct
bch_read_bio
*
rbio
,
u64
inum
,
struct
bch_read_bio
*
rbio
,
u64
inum
,
struct
readpages_iter
*
readpages_iter
)
struct
readpages_iter
*
readpages_iter
)
{
{
struct
bch_fs
*
c
=
trans
->
c
;
struct
bio
*
bio
=
&
rbio
->
bio
;
struct
bio
*
bio
=
&
rbio
->
bio
;
int
flags
=
BCH_READ_RETRY_IF_STALE
|
int
flags
=
BCH_READ_RETRY_IF_STALE
|
BCH_READ_MAY_PROMOTE
;
BCH_READ_MAY_PROMOTE
;
...
@@ -943,7 +944,7 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
...
@@ -943,7 +944,7 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
BUG_ON
(
!
k
.
k
);
BUG_ON
(
!
k
.
k
);
if
(
IS_ERR
(
k
.
k
))
{
if
(
IS_ERR
(
k
.
k
))
{
int
ret
=
b
ch2_btree_iter_unlock
(
iter
);
int
ret
=
b
tree_iter_err
(
iter
);
BUG_ON
(
!
ret
);
BUG_ON
(
!
ret
);
bcache_io_error
(
c
,
bio
,
"btree IO error %i"
,
ret
);
bcache_io_error
(
c
,
bio
,
"btree IO error %i"
,
ret
);
bio_endio
(
bio
);
bio_endio
(
bio
);
...
@@ -951,7 +952,7 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
...
@@ -951,7 +952,7 @@ static void bchfs_read(struct bch_fs *c, struct btree_iter *iter,
}
}
bkey_reassemble
(
&
tmp
.
k
,
k
);
bkey_reassemble
(
&
tmp
.
k
,
k
);
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
trans
);
k
=
bkey_i_to_s_c
(
&
tmp
.
k
);
k
=
bkey_i_to_s_c
(
&
tmp
.
k
);
if
(
readpages_iter
)
{
if
(
readpages_iter
)
{
...
@@ -1030,7 +1031,8 @@ void bch2_readahead(struct readahead_control *ractl)
...
@@ -1030,7 +1031,8 @@ void bch2_readahead(struct readahead_control *ractl)
rbio
->
bio
.
bi_end_io
=
bch2_readpages_end_io
;
rbio
->
bio
.
bi_end_io
=
bch2_readpages_end_io
;
__bio_add_page
(
&
rbio
->
bio
,
page
,
PAGE_SIZE
,
0
);
__bio_add_page
(
&
rbio
->
bio
,
page
,
PAGE_SIZE
,
0
);
bchfs_read
(
c
,
iter
,
rbio
,
inode
->
v
.
i_ino
,
&
readpages_iter
);
bchfs_read
(
&
trans
,
iter
,
rbio
,
inode
->
v
.
i_ino
,
&
readpages_iter
);
}
}
bch2_pagecache_add_put
(
&
inode
->
ei_pagecache_lock
);
bch2_pagecache_add_put
(
&
inode
->
ei_pagecache_lock
);
...
@@ -1054,7 +1056,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
...
@@ -1054,7 +1056,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
iter
=
bch2_trans_get_iter
(
&
trans
,
BTREE_ID_EXTENTS
,
POS_MIN
,
iter
=
bch2_trans_get_iter
(
&
trans
,
BTREE_ID_EXTENTS
,
POS_MIN
,
BTREE_ITER_SLOTS
);
BTREE_ITER_SLOTS
);
bchfs_read
(
c
,
iter
,
rbio
,
inum
,
NULL
);
bchfs_read
(
&
trans
,
iter
,
rbio
,
inum
,
NULL
);
bch2_trans_exit
(
&
trans
);
bch2_trans_exit
(
&
trans
);
}
}
...
@@ -2098,7 +2100,7 @@ static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode,
...
@@ -2098,7 +2100,7 @@ static int __bch2_fpunch(struct bch_fs *c, struct bch_inode_info *inode,
BTREE_ITER_INTENT
);
BTREE_ITER_INTENT
);
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
!
(
ret
=
b
tree_iter
_err
(
k
))
&&
!
(
ret
=
b
key
_err
(
k
))
&&
bkey_cmp
(
iter
->
pos
,
end
)
<
0
)
{
bkey_cmp
(
iter
->
pos
,
end
)
<
0
)
{
struct
disk_reservation
disk_res
=
struct
disk_reservation
disk_res
=
bch2_disk_reservation_init
(
c
,
0
);
bch2_disk_reservation_init
(
c
,
0
);
...
@@ -2437,14 +2439,14 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
...
@@ -2437,14 +2439,14 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
ret
=
bch2_btree_iter_traverse
(
dst
);
ret
=
bch2_btree_iter_traverse
(
dst
);
if
(
ret
)
if
(
ret
)
goto
b
tree_iter
_err
;
goto
b
key
_err
;
bch2_btree_iter_set_pos
(
src
,
bch2_btree_iter_set_pos
(
src
,
POS
(
dst
->
pos
.
inode
,
dst
->
pos
.
offset
+
(
len
>>
9
)));
POS
(
dst
->
pos
.
inode
,
dst
->
pos
.
offset
+
(
len
>>
9
)));
k
=
bch2_btree_iter_peek_slot
(
src
);
k
=
bch2_btree_iter_peek_slot
(
src
);
if
((
ret
=
b
tree_iter
_err
(
k
)))
if
((
ret
=
b
key
_err
(
k
)))
goto
b
tree_iter
_err
;
goto
b
key
_err
;
bkey_reassemble
(
&
copy
.
k
,
k
);
bkey_reassemble
(
&
copy
.
k
,
k
);
...
@@ -2465,7 +2467,7 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
...
@@ -2465,7 +2467,7 @@ static long bch2_fcollapse(struct bch_inode_info *inode,
dst
,
&
copy
.
k
,
dst
,
&
copy
.
k
,
0
,
true
,
true
,
NULL
);
0
,
true
,
true
,
NULL
);
bch2_disk_reservation_put
(
c
,
&
disk_res
);
bch2_disk_reservation_put
(
c
,
&
disk_res
);
b
tree_iter
_err:
b
key
_err:
if
(
ret
==
-
EINTR
)
if
(
ret
==
-
EINTR
)
ret
=
0
;
ret
=
0
;
if
(
ret
)
if
(
ret
)
...
@@ -2559,8 +2561,8 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
...
@@ -2559,8 +2561,8 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
struct
bkey_s_c
k
;
struct
bkey_s_c
k
;
k
=
bch2_btree_iter_peek_slot
(
iter
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
if
((
ret
=
b
tree_iter
_err
(
k
)))
if
((
ret
=
b
key
_err
(
k
)))
goto
b
tree_iter
_err
;
goto
b
key
_err
;
/* already reserved */
/* already reserved */
if
(
k
.
k
->
type
==
KEY_TYPE_reservation
&&
if
(
k
.
k
->
type
==
KEY_TYPE_reservation
&&
...
@@ -2591,7 +2593,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
...
@@ -2591,7 +2593,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
&
quota_res
,
&
quota_res
,
sectors
,
true
);
sectors
,
true
);
if
(
unlikely
(
ret
))
if
(
unlikely
(
ret
))
goto
b
tree_iter
_err
;
goto
b
key
_err
;
}
}
if
(
reservation
.
v
.
nr_replicas
<
replicas
||
if
(
reservation
.
v
.
nr_replicas
<
replicas
||
...
@@ -2599,7 +2601,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
...
@@ -2599,7 +2601,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
ret
=
bch2_disk_reservation_get
(
c
,
&
disk_res
,
sectors
,
ret
=
bch2_disk_reservation_get
(
c
,
&
disk_res
,
sectors
,
replicas
,
0
);
replicas
,
0
);
if
(
unlikely
(
ret
))
if
(
unlikely
(
ret
))
goto
b
tree_iter
_err
;
goto
b
key
_err
;
reservation
.
v
.
nr_replicas
=
disk_res
.
nr_replicas
;
reservation
.
v
.
nr_replicas
=
disk_res
.
nr_replicas
;
}
}
...
@@ -2608,7 +2610,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
...
@@ -2608,7 +2610,7 @@ static long bch2_fallocate(struct bch_inode_info *inode, int mode,
&
disk_res
,
&
quota_res
,
&
disk_res
,
&
quota_res
,
iter
,
&
reservation
.
k_i
,
iter
,
&
reservation
.
k_i
,
0
,
true
,
true
,
NULL
);
0
,
true
,
true
,
NULL
);
b
tree_iter
_err:
b
key
_err:
bch2_quota_reservation_put
(
c
,
inode
,
&
quota_res
);
bch2_quota_reservation_put
(
c
,
inode
,
&
quota_res
);
bch2_disk_reservation_put
(
c
,
&
disk_res
);
bch2_disk_reservation_put
(
c
,
&
disk_res
);
if
(
ret
==
-
EINTR
)
if
(
ret
==
-
EINTR
)
...
...
fs/bcachefs/fsck.c
View file @
0f238367
...
@@ -33,9 +33,10 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum)
...
@@ -33,9 +33,10 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum)
return
bch2_trans_iter_free
(
trans
,
iter
)
?:
sectors
;
return
bch2_trans_iter_free
(
trans
,
iter
)
?:
sectors
;
}
}
static
int
remove_dirent
(
struct
b
ch_fs
*
c
,
struct
btree_iter
*
iter
,
static
int
remove_dirent
(
struct
b
tree_trans
*
trans
,
struct
bkey_s_c_dirent
dirent
)
struct
bkey_s_c_dirent
dirent
)
{
{
struct
bch_fs
*
c
=
trans
->
c
;
struct
qstr
name
;
struct
qstr
name
;
struct
bch_inode_unpacked
dir_inode
;
struct
bch_inode_unpacked
dir_inode
;
struct
bch_hash_info
dir_hash_info
;
struct
bch_hash_info
dir_hash_info
;
...
@@ -52,8 +53,8 @@ static int remove_dirent(struct bch_fs *c, struct btree_iter *iter,
...
@@ -52,8 +53,8 @@ static int remove_dirent(struct bch_fs *c, struct btree_iter *iter,
buf
[
name
.
len
]
=
'\0'
;
buf
[
name
.
len
]
=
'\0'
;
name
.
name
=
buf
;
name
.
name
=
buf
;
/* Unlock
iter
so we don't deadlock, after copying name: */
/* Unlock so we don't deadlock, after copying name: */
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
trans
);
ret
=
bch2_inode_find_by_inum
(
c
,
dir_inum
,
&
dir_inode
);
ret
=
bch2_inode_find_by_inum
(
c
,
dir_inum
,
&
dir_inode
);
if
(
ret
)
{
if
(
ret
)
{
...
@@ -143,29 +144,33 @@ static int walk_inode(struct bch_fs *c, struct inode_walker *w, u64 inum)
...
@@ -143,29 +144,33 @@ static int walk_inode(struct bch_fs *c, struct inode_walker *w, u64 inum)
struct
hash_check
{
struct
hash_check
{
struct
bch_hash_info
info
;
struct
bch_hash_info
info
;
struct
btree_trans
*
trans
;
/* start of current chain of hash collisions: */
/* start of current chain of hash collisions: */
struct
btree_iter
*
chain
;
struct
btree_iter
*
chain
;
/* next offset in current chain of hash collisions: */
/* next offset in current chain of hash collisions: */
u64
next
;
u64
chain_end
;
};
};
static
void
hash_check_init
(
const
struct
bch_hash_desc
desc
,
static
void
hash_check_init
(
struct
hash_check
*
h
)
struct
btree_trans
*
trans
,
{
h
->
chain
=
NULL
;
}
static
void
hash_stop_chain
(
struct
btree_trans
*
trans
,
struct
hash_check
*
h
)
struct
hash_check
*
h
)
{
{
h
->
trans
=
trans
;
if
(
h
->
chain
)
h
->
chain
=
bch2_trans_get_iter
(
trans
,
desc
.
btree_id
,
POS_MIN
,
0
);
bch2_trans_iter_free
(
trans
,
h
->
chain
);
h
->
next
=
-
1
;
h
->
chain
=
NULL
;
}
}
static
void
hash_check_set_inode
(
struct
hash_check
*
h
,
struct
bch_fs
*
c
,
static
void
hash_check_set_inode
(
struct
btree_trans
*
trans
,
struct
hash_check
*
h
,
const
struct
bch_inode_unpacked
*
bi
)
const
struct
bch_inode_unpacked
*
bi
)
{
{
h
->
info
=
bch2_hash_info_init
(
c
,
bi
);
h
->
info
=
bch2_hash_info_init
(
trans
->
c
,
bi
);
h
->
next
=
-
1
;
h
ash_stop_chain
(
trans
,
h
)
;
}
}
static
int
hash_redo_key
(
const
struct
bch_hash_desc
desc
,
static
int
hash_redo_key
(
const
struct
bch_hash_desc
desc
,
...
@@ -186,8 +191,6 @@ static int hash_redo_key(const struct bch_hash_desc desc,
...
@@ -186,8 +191,6 @@ static int hash_redo_key(const struct bch_hash_desc desc,
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
bch2_btree_iter_unlock
(
k_iter
);
bch2_hash_set
(
trans
,
desc
,
&
h
->
info
,
k_iter
->
pos
.
inode
,
bch2_hash_set
(
trans
,
desc
,
&
h
->
info
,
k_iter
->
pos
.
inode
,
tmp
,
BCH_HASH_SET_MUST_CREATE
);
tmp
,
BCH_HASH_SET_MUST_CREATE
);
ret
=
bch2_trans_commit
(
trans
,
NULL
,
NULL
,
ret
=
bch2_trans_commit
(
trans
,
NULL
,
NULL
,
...
@@ -232,7 +235,7 @@ static int hash_check_duplicates(struct btree_trans *trans,
...
@@ -232,7 +235,7 @@ static int hash_check_duplicates(struct btree_trans *trans,
if
(
!
bkey_cmp
(
h
->
chain
->
pos
,
k_iter
->
pos
))
if
(
!
bkey_cmp
(
h
->
chain
->
pos
,
k_iter
->
pos
))
return
0
;
return
0
;
iter
=
bch2_trans_copy_iter
(
h
->
trans
,
h
->
chain
);
iter
=
bch2_trans_copy_iter
(
trans
,
h
->
chain
);
BUG_ON
(
IS_ERR
(
iter
));
BUG_ON
(
IS_ERR
(
iter
));
for_each_btree_key_continue
(
iter
,
0
,
k2
)
{
for_each_btree_key_continue
(
iter
,
0
,
k2
)
{
...
@@ -252,23 +255,39 @@ static int hash_check_duplicates(struct btree_trans *trans,
...
@@ -252,23 +255,39 @@ static int hash_check_duplicates(struct btree_trans *trans,
}
}
}
}
fsck_err:
fsck_err:
bch2_trans_iter_free
(
h
->
trans
,
iter
);
bch2_trans_iter_free
(
trans
,
iter
);
return
ret
;
return
ret
;
}
}
static
bool
key_has_correct_hash
(
const
struct
bch_hash_desc
desc
,
static
void
hash_set_chain_start
(
struct
btree_trans
*
trans
,
struct
hash_check
*
h
,
struct
bch_fs
*
c
,
const
struct
bch_hash_desc
desc
,
struct
hash_check
*
h
,
struct
btree_iter
*
k_iter
,
struct
bkey_s_c
k
)
struct
btree_iter
*
k_iter
,
struct
bkey_s_c
k
)
{
{
u64
hash
;
bool
hole
=
(
k
.
k
->
type
!=
KEY_TYPE_whiteout
&&
k
.
k
->
type
!=
desc
.
key_type
);
if
(
k
.
k
->
type
!=
KEY_TYPE_whiteout
&&
if
(
hole
||
k
.
k
->
p
.
offset
>
h
->
chain_end
+
1
)
k
.
k
->
type
!=
desc
.
key_type
)
hash_stop_chain
(
trans
,
h
);
return
true
;
if
(
!
hole
)
{
if
(
!
h
->
chain
)
{
h
->
chain
=
bch2_trans_copy_iter
(
trans
,
k_iter
);
BUG_ON
(
IS_ERR
(
h
->
chain
));
}
h
->
chain_end
=
k
.
k
->
p
.
offset
;
}
}
static
bool
key_has_correct_hash
(
struct
btree_trans
*
trans
,
const
struct
bch_hash_desc
desc
,
struct
hash_check
*
h
,
struct
btree_iter
*
k_iter
,
struct
bkey_s_c
k
)
{
u64
hash
;
if
(
k
.
k
->
p
.
offset
!=
h
->
next
)
hash_set_chain_start
(
trans
,
desc
,
h
,
k_iter
,
k
);
bch2_btree_iter_copy
(
h
->
chain
,
k_iter
);
h
->
next
=
k
.
k
->
p
.
offset
+
1
;
if
(
k
.
k
->
type
!=
desc
.
key_type
)
if
(
k
.
k
->
type
!=
desc
.
key_type
)
return
true
;
return
true
;
...
@@ -288,13 +307,7 @@ static int hash_check_key(struct btree_trans *trans,
...
@@ -288,13 +307,7 @@ static int hash_check_key(struct btree_trans *trans,
u64
hashed
;
u64
hashed
;
int
ret
=
0
;
int
ret
=
0
;
if
(
k
.
k
->
type
!=
KEY_TYPE_whiteout
&&
hash_set_chain_start
(
trans
,
desc
,
h
,
k_iter
,
k
);
k
.
k
->
type
!=
desc
.
key_type
)
return
0
;
if
(
k
.
k
->
p
.
offset
!=
h
->
next
)
bch2_btree_iter_copy
(
h
->
chain
,
k_iter
);
h
->
next
=
k
.
k
->
p
.
offset
+
1
;
if
(
k
.
k
->
type
!=
desc
.
key_type
)
if
(
k
.
k
->
type
!=
desc
.
key_type
)
return
0
;
return
0
;
...
@@ -332,7 +345,7 @@ static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h,
...
@@ -332,7 +345,7 @@ static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h,
unsigned
len
;
unsigned
len
;
u64
hash
;
u64
hash
;
if
(
key_has_correct_hash
(
bch2_dirent_hash_desc
,
h
,
c
,
iter
,
*
k
))
if
(
key_has_correct_hash
(
trans
,
bch2_dirent_hash_desc
,
h
,
iter
,
*
k
))
return
0
;
return
0
;
len
=
bch2_dirent_name_bytes
(
bkey_s_c_to_dirent
(
*
k
));
len
=
bch2_dirent_name_bytes
(
bkey_s_c_to_dirent
(
*
k
));
...
@@ -526,7 +539,7 @@ static int check_dirents(struct bch_fs *c)
...
@@ -526,7 +539,7 @@ static int check_dirents(struct bch_fs *c)
iter
=
bch2_trans_get_iter
(
&
trans
,
BTREE_ID_DIRENTS
,
iter
=
bch2_trans_get_iter
(
&
trans
,
BTREE_ID_DIRENTS
,
POS
(
BCACHEFS_ROOT_INO
,
0
),
0
);
POS
(
BCACHEFS_ROOT_INO
,
0
),
0
);
hash_check_init
(
bch2_dirent_hash_desc
,
&
trans
,
&
h
);
hash_check_init
(
&
h
);
for_each_btree_key_continue
(
iter
,
0
,
k
)
{
for_each_btree_key_continue
(
iter
,
0
,
k
)
{
struct
bkey_s_c_dirent
d
;
struct
bkey_s_c_dirent
d
;
...
@@ -554,7 +567,7 @@ static int check_dirents(struct bch_fs *c)
...
@@ -554,7 +567,7 @@ static int check_dirents(struct bch_fs *c)
}
}
if
(
w
.
first_this_inode
&&
w
.
have_inode
)
if
(
w
.
first_this_inode
&&
w
.
have_inode
)
hash_check_set_inode
(
&
h
,
c
,
&
w
.
inode
);
hash_check_set_inode
(
&
trans
,
&
h
,
&
w
.
inode
);
ret
=
check_dirent_hash
(
&
trans
,
&
h
,
iter
,
&
k
);
ret
=
check_dirent_hash
(
&
trans
,
&
h
,
iter
,
&
k
);
if
(
ret
>
0
)
{
if
(
ret
>
0
)
{
...
@@ -587,7 +600,7 @@ static int check_dirents(struct bch_fs *c)
...
@@ -587,7 +600,7 @@ static int check_dirents(struct bch_fs *c)
".. dirent"
)
||
".. dirent"
)
||
fsck_err_on
(
memchr
(
d
.
v
->
d_name
,
'/'
,
name_len
),
c
,
fsck_err_on
(
memchr
(
d
.
v
->
d_name
,
'/'
,
name_len
),
c
,
"dirent name has invalid chars"
))
{
"dirent name has invalid chars"
))
{
ret
=
remove_dirent
(
c
,
iter
,
d
);
ret
=
remove_dirent
(
&
trans
,
d
);
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
continue
;
continue
;
...
@@ -597,7 +610,7 @@ static int check_dirents(struct bch_fs *c)
...
@@ -597,7 +610,7 @@ static int check_dirents(struct bch_fs *c)
"dirent points to own directory:
\n
%s"
,
"dirent points to own directory:
\n
%s"
,
(
bch2_bkey_val_to_text
(
&
PBUF
(
buf
),
c
,
(
bch2_bkey_val_to_text
(
&
PBUF
(
buf
),
c
,
k
),
buf
)))
{
k
),
buf
)))
{
ret
=
remove_dirent
(
c
,
iter
,
d
);
ret
=
remove_dirent
(
&
trans
,
d
);
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
continue
;
continue
;
...
@@ -614,7 +627,7 @@ static int check_dirents(struct bch_fs *c)
...
@@ -614,7 +627,7 @@ static int check_dirents(struct bch_fs *c)
"dirent points to missing inode:
\n
%s"
,
"dirent points to missing inode:
\n
%s"
,
(
bch2_bkey_val_to_text
(
&
PBUF
(
buf
),
c
,
(
bch2_bkey_val_to_text
(
&
PBUF
(
buf
),
c
,
k
),
buf
)))
{
k
),
buf
)))
{
ret
=
remove_dirent
(
c
,
iter
,
d
);
ret
=
remove_dirent
(
&
trans
,
d
);
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
continue
;
continue
;
...
@@ -650,6 +663,8 @@ static int check_dirents(struct bch_fs *c)
...
@@ -650,6 +663,8 @@ static int check_dirents(struct bch_fs *c)
}
}
}
}
hash_stop_chain
(
&
trans
,
&
h
);
err:
err:
fsck_err:
fsck_err:
return
bch2_trans_exit
(
&
trans
)
?:
ret
;
return
bch2_trans_exit
(
&
trans
)
?:
ret
;
...
@@ -677,7 +692,7 @@ static int check_xattrs(struct bch_fs *c)
...
@@ -677,7 +692,7 @@ static int check_xattrs(struct bch_fs *c)
iter
=
bch2_trans_get_iter
(
&
trans
,
BTREE_ID_XATTRS
,
iter
=
bch2_trans_get_iter
(
&
trans
,
BTREE_ID_XATTRS
,
POS
(
BCACHEFS_ROOT_INO
,
0
),
0
);
POS
(
BCACHEFS_ROOT_INO
,
0
),
0
);
hash_check_init
(
bch2_xattr_hash_desc
,
&
trans
,
&
h
);
hash_check_init
(
&
h
);
for_each_btree_key_continue
(
iter
,
0
,
k
)
{
for_each_btree_key_continue
(
iter
,
0
,
k
)
{
ret
=
walk_inode
(
c
,
&
w
,
k
.
k
->
p
.
inode
);
ret
=
walk_inode
(
c
,
&
w
,
k
.
k
->
p
.
inode
);
...
@@ -694,7 +709,7 @@ static int check_xattrs(struct bch_fs *c)
...
@@ -694,7 +709,7 @@ static int check_xattrs(struct bch_fs *c)
}
}
if
(
w
.
first_this_inode
&&
w
.
have_inode
)
if
(
w
.
first_this_inode
&&
w
.
have_inode
)
hash_check_set_inode
(
&
h
,
c
,
&
w
.
inode
);
hash_check_set_inode
(
&
trans
,
&
h
,
&
w
.
inode
);
ret
=
hash_check_key
(
&
trans
,
bch2_xattr_hash_desc
,
ret
=
hash_check_key
(
&
trans
,
bch2_xattr_hash_desc
,
&
h
,
iter
,
k
);
&
h
,
iter
,
k
);
...
@@ -926,7 +941,7 @@ static int check_directory_structure(struct bch_fs *c,
...
@@ -926,7 +941,7 @@ static int check_directory_structure(struct bch_fs *c,
if
(
fsck_err_on
(
inode_bitmap_test
(
&
dirs_done
,
d_inum
),
c
,
if
(
fsck_err_on
(
inode_bitmap_test
(
&
dirs_done
,
d_inum
),
c
,
"directory %llu has multiple hardlinks"
,
"directory %llu has multiple hardlinks"
,
d_inum
))
{
d_inum
))
{
ret
=
remove_dirent
(
c
,
iter
,
dirent
);
ret
=
remove_dirent
(
&
trans
,
dirent
);
if
(
ret
)
if
(
ret
)
goto
err
;
goto
err
;
continue
;
continue
;
...
@@ -972,7 +987,7 @@ static int check_directory_structure(struct bch_fs *c,
...
@@ -972,7 +987,7 @@ static int check_directory_structure(struct bch_fs *c,
if
(
fsck_err_on
(
!
inode_bitmap_test
(
&
dirs_done
,
k
.
k
->
p
.
inode
),
c
,
if
(
fsck_err_on
(
!
inode_bitmap_test
(
&
dirs_done
,
k
.
k
->
p
.
inode
),
c
,
"unreachable directory found (inum %llu)"
,
"unreachable directory found (inum %llu)"
,
k
.
k
->
p
.
inode
))
{
k
.
k
->
p
.
inode
))
{
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
&
trans
);
ret
=
reattach_inode
(
c
,
lostfound_inode
,
k
.
k
->
p
.
inode
);
ret
=
reattach_inode
(
c
,
lostfound_inode
,
k
.
k
->
p
.
inode
);
if
(
ret
)
{
if
(
ret
)
{
...
@@ -1187,6 +1202,9 @@ static int check_inode(struct btree_trans *trans,
...
@@ -1187,6 +1202,9 @@ static int check_inode(struct btree_trans *trans,
int
ret
=
0
;
int
ret
=
0
;
ret
=
bch2_inode_unpack
(
inode
,
&
u
);
ret
=
bch2_inode_unpack
(
inode
,
&
u
);
bch2_btree_trans_unlock
(
trans
);
if
(
bch2_fs_inconsistent_on
(
ret
,
c
,
if
(
bch2_fs_inconsistent_on
(
ret
,
c
,
"error unpacking inode %llu in fsck"
,
"error unpacking inode %llu in fsck"
,
inode
.
k
->
p
.
inode
))
inode
.
k
->
p
.
inode
))
...
@@ -1306,7 +1324,7 @@ static int bch2_gc_walk_inodes(struct bch_fs *c,
...
@@ -1306,7 +1324,7 @@ static int bch2_gc_walk_inodes(struct bch_fs *c,
nlinks_iter
=
genradix_iter_init
(
links
,
0
);
nlinks_iter
=
genradix_iter_init
(
links
,
0
);
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
!
(
ret2
=
b
tree_iter
_err
(
k
)))
{
!
(
ret2
=
b
key
_err
(
k
)))
{
peek_nlinks:
link
=
genradix_iter_peek
(
&
nlinks_iter
,
links
);
peek_nlinks:
link
=
genradix_iter_peek
(
&
nlinks_iter
,
links
);
if
(
!
link
&&
(
!
k
.
k
||
iter
->
pos
.
inode
>=
range_end
))
if
(
!
link
&&
(
!
k
.
k
||
iter
->
pos
.
inode
>=
range_end
))
...
@@ -1326,12 +1344,6 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
...
@@ -1326,12 +1344,6 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
link
=
&
zero_links
;
link
=
&
zero_links
;
if
(
k
.
k
&&
k
.
k
->
type
==
KEY_TYPE_inode
)
{
if
(
k
.
k
&&
k
.
k
->
type
==
KEY_TYPE_inode
)
{
/*
* Avoid potential deadlocks with iter for
* truncate/rm/etc.:
*/
bch2_btree_iter_unlock
(
iter
);
ret
=
check_inode
(
&
trans
,
lostfound_inode
,
iter
,
ret
=
check_inode
(
&
trans
,
lostfound_inode
,
iter
,
bkey_s_c_to_inode
(
k
),
link
);
bkey_s_c_to_inode
(
k
),
link
);
BUG_ON
(
ret
==
-
EINTR
);
BUG_ON
(
ret
==
-
EINTR
);
...
@@ -1402,7 +1414,7 @@ static int check_inodes_fast(struct bch_fs *c)
...
@@ -1402,7 +1414,7 @@ static int check_inodes_fast(struct bch_fs *c)
struct
btree_iter
*
iter
;
struct
btree_iter
*
iter
;
struct
bkey_s_c
k
;
struct
bkey_s_c
k
;
struct
bkey_s_c_inode
inode
;
struct
bkey_s_c_inode
inode
;
int
ret
=
0
;
int
ret
=
0
,
ret2
;
bch2_trans_init
(
&
trans
,
c
);
bch2_trans_init
(
&
trans
,
c
);
...
@@ -1426,12 +1438,9 @@ static int check_inodes_fast(struct bch_fs *c)
...
@@ -1426,12 +1438,9 @@ static int check_inodes_fast(struct bch_fs *c)
}
}
}
}
if
(
!
ret
)
ret2
=
bch2_trans_exit
(
&
trans
);
ret
=
bch2_btree_iter_unlock
(
iter
);
bch2_trans_exit
(
&
trans
);
return
ret
;
return
ret
?:
ret2
;
}
}
/*
/*
...
...
fs/bcachefs/inode.c
View file @
0f238367
...
@@ -323,7 +323,7 @@ int __bch2_inode_create(struct btree_trans *trans,
...
@@ -323,7 +323,7 @@ int __bch2_inode_create(struct btree_trans *trans,
while
(
1
)
{
while
(
1
)
{
struct
bkey_s_c
k
=
bch2_btree_iter_peek_slot
(
iter
);
struct
bkey_s_c
k
=
bch2_btree_iter_peek_slot
(
iter
);
ret
=
b
tree_iter
_err
(
k
);
ret
=
b
key
_err
(
k
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
@@ -399,7 +399,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
...
@@ -399,7 +399,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
struct
bkey_s_c
k
=
bch2_btree_iter_peek_slot
(
iter
);
struct
bkey_s_c
k
=
bch2_btree_iter_peek_slot
(
iter
);
u32
bi_generation
=
0
;
u32
bi_generation
=
0
;
ret
=
b
tree_iter
_err
(
k
);
ret
=
b
key
_err
(
k
);
if
(
ret
)
if
(
ret
)
break
;
break
;
...
...
fs/bcachefs/io.c
View file @
0f238367
...
@@ -1279,7 +1279,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
...
@@ -1279,7 +1279,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
rbio
->
bio
.
bi_status
=
0
;
rbio
->
bio
.
bi_status
=
0
;
k
=
bch2_btree_iter_peek_slot
(
iter
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
if
(
b
tree_iter
_err
(
k
))
if
(
b
key
_err
(
k
))
goto
err
;
goto
err
;
bkey_reassemble
(
&
tmp
.
k
,
k
);
bkey_reassemble
(
&
tmp
.
k
,
k
);
...
@@ -1332,7 +1332,7 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
...
@@ -1332,7 +1332,7 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
bkey_reassemble
(
&
tmp
.
k
,
k
);
bkey_reassemble
(
&
tmp
.
k
,
k
);
k
=
bkey_i_to_s_c
(
&
tmp
.
k
);
k
=
bkey_i_to_s_c
(
&
tmp
.
k
);
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
&
trans
);
bytes
=
min_t
(
unsigned
,
bvec_iter
.
bi_size
,
bytes
=
min_t
(
unsigned
,
bvec_iter
.
bi_size
,
(
k
.
k
->
p
.
offset
-
bvec_iter
.
bi_sector
)
<<
9
);
(
k
.
k
->
p
.
offset
-
bvec_iter
.
bi_sector
)
<<
9
);
...
@@ -1357,7 +1357,7 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
...
@@ -1357,7 +1357,7 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
* If we get here, it better have been because there was an error
* If we get here, it better have been because there was an error
* reading a btree node
* reading a btree node
*/
*/
BUG_ON
(
!
(
iter
->
flags
&
BTREE_ITER_ERROR
));
BUG_ON
(
!
btree_iter_err
(
iter
));
__bcache_io_error
(
c
,
"btree IO error"
);
__bcache_io_error
(
c
,
"btree IO error"
);
err:
err:
rbio
->
bio
.
bi_status
=
BLK_STS_IOERR
;
rbio
->
bio
.
bi_status
=
BLK_STS_IOERR
;
...
@@ -1893,7 +1893,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
...
@@ -1893,7 +1893,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
*/
*/
bkey_reassemble
(
&
tmp
.
k
,
k
);
bkey_reassemble
(
&
tmp
.
k
,
k
);
k
=
bkey_i_to_s_c
(
&
tmp
.
k
);
k
=
bkey_i_to_s_c
(
&
tmp
.
k
);
bch2_btree_
iter_unlock
(
iter
);
bch2_btree_
trans_unlock
(
&
trans
);
bytes
=
min_t
(
unsigned
,
rbio
->
bio
.
bi_iter
.
bi_size
,
bytes
=
min_t
(
unsigned
,
rbio
->
bio
.
bi_iter
.
bi_size
,
(
k
.
k
->
p
.
offset
-
rbio
->
bio
.
bi_iter
.
bi_sector
)
<<
9
);
(
k
.
k
->
p
.
offset
-
rbio
->
bio
.
bi_iter
.
bi_sector
)
<<
9
);
...
@@ -1915,7 +1915,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
...
@@ -1915,7 +1915,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
* If we get here, it better have been because there was an error
* If we get here, it better have been because there was an error
* reading a btree node
* reading a btree node
*/
*/
BUG_ON
(
!
(
iter
->
flags
&
BTREE_ITER_ERROR
));
BUG_ON
(
!
btree_iter_err
(
iter
));
bcache_io_error
(
c
,
&
rbio
->
bio
,
"btree IO error"
);
bcache_io_error
(
c
,
&
rbio
->
bio
,
"btree IO error"
);
bch2_trans_exit
(
&
trans
);
bch2_trans_exit
(
&
trans
);
...
...
fs/bcachefs/migrate.c
View file @
0f238367
...
@@ -52,7 +52,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
...
@@ -52,7 +52,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
while
((
k
=
bch2_btree_iter_peek
(
iter
)).
k
&&
!
(
ret
=
b
tree_iter
_err
(
k
)))
{
!
(
ret
=
b
key
_err
(
k
)))
{
if
(
!
bkey_extent_is_data
(
k
.
k
)
||
if
(
!
bkey_extent_is_data
(
k
.
k
)
||
!
bch2_extent_has_device
(
bkey_s_c_to_extent
(
k
),
dev_idx
))
{
!
bch2_extent_has_device
(
bkey_s_c_to_extent
(
k
),
dev_idx
))
{
ret
=
bch2_mark_bkey_replicas
(
c
,
k
);
ret
=
bch2_mark_bkey_replicas
(
c
,
k
);
...
...
fs/bcachefs/move.c
View file @
0f238367
...
@@ -77,7 +77,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
...
@@ -77,7 +77,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
bool
did_work
=
false
;
bool
did_work
=
false
;
int
nr
;
int
nr
;
ret
=
b
tree_iter
_err
(
k
);
ret
=
b
key
_err
(
k
);
if
(
ret
)
if
(
ret
)
break
;
break
;
...
@@ -539,7 +539,7 @@ int bch2_move_data(struct bch_fs *c,
...
@@ -539,7 +539,7 @@ int bch2_move_data(struct bch_fs *c,
if
(
!
k
.
k
)
if
(
!
k
.
k
)
break
;
break
;
ret
=
b
tree_iter
_err
(
k
);
ret
=
b
key
_err
(
k
);
if
(
ret
)
if
(
ret
)
break
;
break
;
if
(
bkey_cmp
(
bkey_start_pos
(
k
.
k
),
end
)
>=
0
)
if
(
bkey_cmp
(
bkey_start_pos
(
k
.
k
),
end
)
>=
0
)
...
...
fs/bcachefs/quota.c
View file @
0f238367
...
@@ -732,7 +732,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
...
@@ -732,7 +732,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
BTREE_ITER_SLOTS
|
BTREE_ITER_INTENT
);
BTREE_ITER_SLOTS
|
BTREE_ITER_INTENT
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
k
=
bch2_btree_iter_peek_slot
(
iter
);
ret
=
b
tree_iter
_err
(
k
);
ret
=
b
key
_err
(
k
);
if
(
unlikely
(
ret
))
if
(
unlikely
(
ret
))
return
ret
;
return
ret
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment