Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9e5e5b9e
Commit
9e5e5b9e
authored
Mar 25, 2019
by
Kent Overstreet
Committed by
Kent Overstreet
Oct 22, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: Btree iterators now always have a btree_trans
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
424eb881
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
26 additions
and
34 deletions
+26
-34
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.c
+18
-18
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_iter.h
+6
-15
fs/bcachefs/btree_types.h
fs/bcachefs/btree_types.h
+2
-1
No files found.
fs/bcachefs/btree_iter.c
View file @
9e5e5b9e
...
...
@@ -78,7 +78,6 @@ void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
void
__bch2_btree_node_lock_write
(
struct
btree
*
b
,
struct
btree_iter
*
iter
)
{
struct
bch_fs
*
c
=
iter
->
c
;
struct
btree_iter
*
linked
;
unsigned
readers
=
0
;
...
...
@@ -97,7 +96,7 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
*/
atomic64_sub
(
__SIX_VAL
(
read_lock
,
readers
),
&
b
->
lock
.
state
.
counter
);
btree_node_lock_type
(
c
,
b
,
SIX_LOCK_write
);
btree_node_lock_type
(
iter
->
trans
->
c
,
b
,
SIX_LOCK_write
);
atomic64_add
(
__SIX_VAL
(
read_lock
,
readers
),
&
b
->
lock
.
state
.
counter
);
}
...
...
@@ -199,7 +198,6 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
enum
six_lock_type
type
,
bool
may_drop_locks
)
{
struct
bch_fs
*
c
=
iter
->
c
;
struct
btree_iter
*
linked
;
bool
ret
=
true
;
...
...
@@ -254,7 +252,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
}
if
(
ret
)
__btree_node_lock_type
(
c
,
b
,
type
);
__btree_node_lock_type
(
iter
->
trans
->
c
,
b
,
type
);
else
trans_restart
();
...
...
@@ -644,8 +642,8 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
ret
=
bkey_disassemble
(
l
->
b
,
k
,
u
);
if
(
debug_check_bkeys
(
iter
->
c
))
bch2_bkey_debugcheck
(
iter
->
c
,
l
->
b
,
ret
);
if
(
debug_check_bkeys
(
iter
->
trans
->
c
))
bch2_bkey_debugcheck
(
iter
->
trans
->
c
,
l
->
b
,
ret
);
return
ret
;
}
...
...
@@ -834,7 +832,7 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
static
inline
int
btree_iter_lock_root
(
struct
btree_iter
*
iter
,
unsigned
depth_want
)
{
struct
bch_fs
*
c
=
iter
->
c
;
struct
bch_fs
*
c
=
iter
->
trans
->
c
;
struct
btree
*
b
;
enum
six_lock_type
lock_type
;
unsigned
i
;
...
...
@@ -882,11 +880,12 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
noinline
static
void
btree_iter_prefetch
(
struct
btree_iter
*
iter
)
{
struct
bch_fs
*
c
=
iter
->
trans
->
c
;
struct
btree_iter_level
*
l
=
&
iter
->
l
[
iter
->
level
];
struct
btree_node_iter
node_iter
=
l
->
iter
;
struct
bkey_packed
*
k
;
BKEY_PADDED
(
k
)
tmp
;
unsigned
nr
=
test_bit
(
BCH_FS_STARTED
,
&
iter
->
c
->
flags
)
unsigned
nr
=
test_bit
(
BCH_FS_STARTED
,
&
c
->
flags
)
?
(
iter
->
level
>
1
?
0
:
2
)
:
(
iter
->
level
>
1
?
1
:
16
);
bool
was_locked
=
btree_node_locked
(
iter
,
iter
->
level
);
...
...
@@ -901,8 +900,7 @@ static void btree_iter_prefetch(struct btree_iter *iter)
break
;
bch2_bkey_unpack
(
l
->
b
,
&
tmp
.
k
,
k
);
bch2_btree_node_prefetch
(
iter
->
c
,
iter
,
&
tmp
.
k
,
iter
->
level
-
1
);
bch2_btree_node_prefetch
(
c
,
iter
,
&
tmp
.
k
,
iter
->
level
-
1
);
}
if
(
!
was_locked
)
...
...
@@ -911,6 +909,7 @@ static void btree_iter_prefetch(struct btree_iter *iter)
static
inline
int
btree_iter_down
(
struct
btree_iter
*
iter
)
{
struct
bch_fs
*
c
=
iter
->
trans
->
c
;
struct
btree_iter_level
*
l
=
&
iter
->
l
[
iter
->
level
];
struct
btree
*
b
;
unsigned
level
=
iter
->
level
-
1
;
...
...
@@ -922,7 +921,7 @@ static inline int btree_iter_down(struct btree_iter *iter)
bch2_bkey_unpack
(
l
->
b
,
&
tmp
.
k
,
bch2_btree_node_iter_peek
(
&
l
->
iter
,
l
->
b
));
b
=
bch2_btree_node_get
(
iter
->
c
,
iter
,
&
tmp
.
k
,
level
,
lock_type
,
true
);
b
=
bch2_btree_node_get
(
c
,
iter
,
&
tmp
.
k
,
level
,
lock_type
,
true
);
if
(
unlikely
(
IS_ERR
(
b
)))
return
PTR_ERR
(
b
);
...
...
@@ -946,7 +945,7 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
static
int
btree_iter_traverse_error
(
struct
btree_iter
*
iter
,
int
ret
)
{
struct
bch_fs
*
c
=
iter
->
c
;
struct
bch_fs
*
c
=
iter
->
trans
->
c
;
struct
btree_iter
*
linked
,
*
sorted_iters
,
**
i
;
retry_all:
bch2_btree_iter_unlock
(
iter
);
...
...
@@ -1275,9 +1274,9 @@ static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
__bch2_btree_node_iter_peek_all
(
&
l
->
iter
,
l
->
b
));
}
if
(
debug_check_bkeys
(
iter
->
c
)
&&
if
(
debug_check_bkeys
(
iter
->
trans
->
c
)
&&
!
bkey_deleted
(
ret
.
k
))
bch2_bkey_debugcheck
(
iter
->
c
,
l
->
b
,
ret
);
bch2_bkey_debugcheck
(
iter
->
trans
->
c
,
l
->
b
,
ret
);
return
ret
;
}
...
...
@@ -1582,17 +1581,18 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
return
__bch2_btree_iter_peek_slot
(
iter
);
}
static
inline
void
bch2_btree_iter_init
(
struct
btree_
iter
*
iter
,
struct
b
ch_fs
*
c
,
enum
btree_id
btree_id
,
static
inline
void
bch2_btree_iter_init
(
struct
btree_
trans
*
trans
,
struct
b
tree_iter
*
iter
,
enum
btree_id
btree_id
,
struct
bpos
pos
,
unsigned
flags
)
{
struct
bch_fs
*
c
=
trans
->
c
;
unsigned
i
;
if
(
btree_id
==
BTREE_ID_EXTENTS
&&
!
(
flags
&
BTREE_ITER_NODES
))
flags
|=
BTREE_ITER_IS_EXTENTS
;
iter
->
c
=
c
;
iter
->
trans
=
trans
;
iter
->
pos
=
pos
;
bkey_init
(
&
iter
->
k
);
iter
->
k
.
p
=
pos
;
...
...
@@ -1828,7 +1828,7 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
iter
=
&
trans
->
iters
[
idx
];
iter
->
id
=
iter_id
;
bch2_btree_iter_init
(
iter
,
trans
->
c
,
btree_id
,
pos
,
flags
);
bch2_btree_iter_init
(
trans
,
iter
,
btree_id
,
pos
,
flags
);
}
else
{
iter
=
&
trans
->
iters
[
idx
];
...
...
fs/bcachefs/btree_iter.h
View file @
9e5e5b9e
...
...
@@ -193,17 +193,19 @@ static inline int btree_iter_cmp(const struct btree_iter *l,
return
__btree_iter_cmp
(
l
->
btree_id
,
l
->
pos
,
r
);
}
int
bch2_trans_unlock
(
struct
btree_trans
*
);
/*
* Unlocks before scheduling
* Note: does not revalidate iterator
*/
static
inline
void
bch2_
btree_iter_cond_resched
(
struct
btree_iter
*
iter
)
static
inline
void
bch2_
trans_cond_resched
(
struct
btree_trans
*
trans
)
{
if
(
need_resched
())
{
bch2_
btree_iter_unlock
(
iter
);
bch2_
trans_unlock
(
trans
);
schedule
();
}
else
if
(
race_fault
())
{
bch2_
btree_iter_unlock
(
iter
);
bch2_
trans_unlock
(
trans
);
}
}
...
...
@@ -231,7 +233,7 @@ static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
static
inline
struct
bkey_s_c
__bch2_btree_iter_next
(
struct
btree_iter
*
iter
,
unsigned
flags
)
{
bch2_
btree_iter_cond_resched
(
iter
);
bch2_
trans_cond_resched
(
iter
->
trans
);
return
flags
&
BTREE_ITER_SLOTS
?
bch2_btree_iter_next_slot
(
iter
)
...
...
@@ -307,20 +309,9 @@ static inline void bch2_trans_begin_updates(struct btree_trans *trans)
}
void
*
bch2_trans_kmalloc
(
struct
btree_trans
*
,
size_t
);
int
bch2_trans_unlock
(
struct
btree_trans
*
);
void
bch2_trans_init
(
struct
btree_trans
*
,
struct
bch_fs
*
);
int
bch2_trans_exit
(
struct
btree_trans
*
);
static
inline
void
bch2_trans_cond_resched
(
struct
btree_trans
*
trans
)
{
if
(
need_resched
())
{
bch2_trans_unlock
(
trans
);
schedule
();
}
else
if
(
race_fault
())
{
bch2_trans_unlock
(
trans
);
}
}
#ifdef TRACE_TRANSACTION_RESTARTS
#define bch2_trans_begin(_trans) \
do { \
...
...
fs/bcachefs/btree_types.h
View file @
9e5e5b9e
...
...
@@ -11,6 +11,7 @@
struct
open_bucket
;
struct
btree_update
;
struct
btree_trans
;
#define MAX_BSETS 3U
...
...
@@ -209,7 +210,7 @@ enum btree_iter_uptodate {
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct
btree_iter
{
struct
b
ch_fs
*
c
;
struct
b
tree_trans
*
trans
;
struct
bpos
pos
;
u8
flags
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment