Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
a00fd8c5
Commit
a00fd8c5
authored
Aug 21, 2018
by
Kent Overstreet
Committed by
Kent Overstreet
Oct 22, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: Comparison function cleanups
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
f84306a5
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
183 additions
and
155 deletions
+183
-155
fs/bcachefs/bset.c
fs/bcachefs/bset.c
+45
-32
fs/bcachefs/bset.h
fs/bcachefs/bset.h
+25
-56
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.c
+1
-4
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.c
+71
-60
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.c
+3
-3
fs/bcachefs/tests.c
fs/bcachefs/tests.c
+38
-0
No files found.
fs/bcachefs/bset.c
View file @
a00fd8c5
...
...
@@ -143,7 +143,7 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
bkey_unpack_key
(
b
,
k
);
if
(
n
&&
__btree_node
_iter_cmp
(
b
,
k
,
n
)
>
0
)
{
bkey
_iter_cmp
(
b
,
k
,
n
)
>
0
)
{
struct
btree_node_iter_set
*
set
;
struct
bkey
ku
=
bkey_unpack_key
(
b
,
k
);
struct
bkey
nu
=
bkey_unpack_key
(
b
,
n
);
...
...
@@ -202,10 +202,10 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
struct
bkey_packed
*
next
=
(
void
*
)
(
where
->
_data
+
clobber_u64s
);
#if 0
BUG_ON(prev &&
__btree_node
_iter_cmp(b, prev, insert) > 0);
bkey
_iter_cmp(b, prev, insert) > 0);
#else
if
(
prev
&&
__btree_node
_iter_cmp
(
b
,
prev
,
insert
)
>
0
)
{
bkey
_iter_cmp
(
b
,
prev
,
insert
)
>
0
)
{
struct
bkey
k1
=
bkey_unpack_key
(
b
,
prev
);
struct
bkey
k2
=
bkey_unpack_key
(
b
,
insert
);
char
buf1
[
100
];
...
...
@@ -224,10 +224,10 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
#endif
#if 0
BUG_ON(next != btree_bkey_last(b, t) &&
__btree_node
_iter_cmp(b, insert, next) > 0);
bkey
_iter_cmp(b, insert, next) > 0);
#else
if
(
next
!=
btree_bkey_last
(
b
,
t
)
&&
__btree_node
_iter_cmp
(
b
,
insert
,
next
)
>
0
)
{
bkey
_iter_cmp
(
b
,
insert
,
next
)
>
0
)
{
struct
bkey
k1
=
bkey_unpack_key
(
b
,
insert
);
struct
bkey
k2
=
bkey_unpack_key
(
b
,
next
);
char
buf1
[
100
];
...
...
@@ -1272,7 +1272,7 @@ void bch2_bset_delete(struct btree *b,
__flatten
static
struct
bkey_packed
*
bset_search_write_set
(
const
struct
btree
*
b
,
struct
bset_tree
*
t
,
struct
bpos
search
,
struct
bpos
*
search
,
const
struct
bkey_packed
*
packed_search
)
{
unsigned
l
=
0
,
r
=
t
->
size
;
...
...
@@ -1280,7 +1280,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
while
(
l
+
1
!=
r
)
{
unsigned
m
=
(
l
+
r
)
>>
1
;
if
(
bkey_cmp
(
rw_aux_tree
(
b
,
t
)[
m
].
k
,
search
)
<
0
)
if
(
bkey_cmp
(
rw_aux_tree
(
b
,
t
)[
m
].
k
,
*
search
)
<
0
)
l
=
m
;
else
r
=
m
;
...
...
@@ -1302,7 +1302,7 @@ static int bset_search_tree_slowpath(const struct btree *b,
__flatten
static
struct
bkey_packed
*
bset_search_tree
(
const
struct
btree
*
b
,
struct
bset_tree
*
t
,
struct
bpos
search
,
struct
bpos
*
search
,
const
struct
bkey_packed
*
packed_search
)
{
struct
ro_aux_tree
*
base
=
ro_aux_tree_base
(
b
,
t
);
...
...
@@ -1343,7 +1343,7 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
bkey_mantissa
(
packed_search
,
f
,
n
));
else
n
=
n
*
2
+
bset_search_tree_slowpath
(
b
,
t
,
&
search
,
packed_search
,
n
);
search
,
packed_search
,
n
);
}
while
(
n
<
t
->
size
);
inorder
=
__eytzinger1_to_inorder
(
n
>>
1
,
t
->
size
,
t
->
extra
);
...
...
@@ -1370,10 +1370,9 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
__always_inline
__flatten
static
struct
bkey_packed
*
bch2_bset_search
(
struct
btree
*
b
,
struct
bset_tree
*
t
,
struct
bpos
search
,
struct
bpos
*
search
,
struct
bkey_packed
*
packed_search
,
const
struct
bkey_packed
*
lossy_packed_search
,
bool
strictly_greater
)
const
struct
bkey_packed
*
lossy_packed_search
)
{
struct
bkey_packed
*
m
;
...
...
@@ -1407,7 +1406,7 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
* start and end - handle that here:
*/
if
(
bkey_cmp
(
search
,
t
->
max_key
)
>
0
)
if
(
bkey_cmp
(
*
search
,
t
->
max_key
)
>
0
)
return
btree_bkey_last
(
b
,
t
);
m
=
bset_search_tree
(
b
,
t
,
search
,
lossy_packed_search
);
...
...
@@ -1416,21 +1415,21 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
if
(
lossy_packed_search
)
while
(
m
!=
btree_bkey_last
(
b
,
t
)
&&
!
btree_iter_pos
_cmp_p_or_unp
(
b
,
search
,
lossy_packed_search
,
m
,
strictly_greater
)
)
bkey_iter
_cmp_p_or_unp
(
b
,
search
,
lossy_packed_search
,
m
)
>
0
)
m
=
bkey_next
(
m
);
if
(
!
packed_search
)
while
(
m
!=
btree_bkey_last
(
b
,
t
)
&&
!
btree_iter_pos_cmp_packed
(
b
,
&
search
,
m
,
strictly_greater
)
)
bkey_iter_pos_cmp
(
b
,
search
,
m
)
>
0
)
m
=
bkey_next
(
m
);
if
(
btree_keys_expensive_checks
(
b
))
{
struct
bkey_packed
*
prev
=
bch2_bkey_prev_all
(
b
,
t
,
m
);
BUG_ON
(
prev
&&
b
tree_iter_pos
_cmp_p_or_unp
(
b
,
search
,
packed_search
,
prev
,
strictly_greater
)
);
b
key_iter
_cmp_p_or_unp
(
b
,
search
,
packed_search
,
prev
)
<=
0
);
}
return
m
;
...
...
@@ -1438,6 +1437,25 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
/* Btree node iterator */
static
inline
void
__bch2_btree_node_iter_push
(
struct
btree_node_iter
*
iter
,
struct
btree
*
b
,
const
struct
bkey_packed
*
k
,
const
struct
bkey_packed
*
end
)
{
if
(
k
!=
end
)
{
struct
btree_node_iter_set
*
pos
;
btree_node_iter_for_each
(
iter
,
pos
)
;
BUG_ON
(
pos
>=
iter
->
data
+
ARRAY_SIZE
(
iter
->
data
));
*
pos
=
(
struct
btree_node_iter_set
)
{
__btree_node_key_to_offset
(
b
,
k
),
__btree_node_key_to_offset
(
b
,
end
)
};
}
}
void
bch2_btree_node_iter_push
(
struct
btree_node_iter
*
iter
,
struct
btree
*
b
,
const
struct
bkey_packed
*
k
,
...
...
@@ -1449,17 +1467,15 @@ void bch2_btree_node_iter_push(struct btree_node_iter *iter,
noinline
__flatten
__attribute__
((
cold
))
static
void
btree_node_iter_init_pack_failed
(
struct
btree_node_iter
*
iter
,
struct
btree
*
b
,
struct
bpos
search
,
bool
strictly_greater
)
struct
btree
*
b
,
struct
bpos
*
search
)
{
struct
bset_tree
*
t
;
trace_bkey_pack_pos_fail
(
&
search
);
trace_bkey_pack_pos_fail
(
search
);
for_each_bset
(
b
,
t
)
__bch2_btree_node_iter_push
(
iter
,
b
,
bch2_bset_search
(
b
,
t
,
search
,
NULL
,
NULL
,
strictly_greater
),
bch2_bset_search
(
b
,
t
,
search
,
NULL
,
NULL
),
btree_bkey_last
(
b
,
t
));
bch2_btree_node_iter_sort
(
iter
,
b
);
...
...
@@ -1506,18 +1522,17 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
* past any extents that compare equal to the position we searched for.
*/
void
bch2_btree_node_iter_init
(
struct
btree_node_iter
*
iter
,
struct
btree
*
b
,
struct
bpos
search
,
bool
strictly_greater
)
struct
btree
*
b
,
struct
bpos
*
search
)
{
struct
bset_tree
*
t
;
struct
bkey_packed
p
,
*
packed_search
=
NULL
;
EBUG_ON
(
bkey_cmp
(
search
,
b
->
data
->
min_key
)
<
0
);
EBUG_ON
(
bkey_cmp
(
*
search
,
b
->
data
->
min_key
)
<
0
);
bset_aux_tree_verify
(
b
);
memset
(
iter
,
0
,
sizeof
(
*
iter
));
switch
(
bch2_bkey_pack_pos_lossy
(
&
p
,
search
,
b
))
{
switch
(
bch2_bkey_pack_pos_lossy
(
&
p
,
*
search
,
b
))
{
case
BKEY_PACK_POS_EXACT
:
packed_search
=
&
p
;
break
;
...
...
@@ -1525,16 +1540,14 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
packed_search
=
NULL
;
break
;
case
BKEY_PACK_POS_FAIL
:
btree_node_iter_init_pack_failed
(
iter
,
b
,
search
,
strictly_greater
);
btree_node_iter_init_pack_failed
(
iter
,
b
,
search
);
return
;
}
for_each_bset
(
b
,
t
)
__bch2_btree_node_iter_push
(
iter
,
b
,
bch2_bset_search
(
b
,
t
,
search
,
packed_search
,
&
p
,
strictly_greater
),
packed_search
,
&
p
),
btree_bkey_last
(
b
,
t
));
bch2_btree_node_iter_sort
(
iter
,
b
);
...
...
@@ -1668,7 +1681,7 @@ struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *ite
bch2_btree_node_iter_bset_pos
(
iter
,
b
,
t
),
min_key_type
);
if
(
k
&&
(
!
prev
||
__btree_node
_iter_cmp
(
b
,
k
,
prev
)
>
0
))
{
(
!
prev
||
bkey
_iter_cmp
(
b
,
k
,
prev
)
>
0
))
{
prev
=
k
;
end
=
t
->
end_offset
;
}
...
...
fs/bcachefs/bset.h
View file @
a00fd8c5
...
...
@@ -368,41 +368,6 @@ static inline int bkey_cmp_p_or_unp(const struct btree *b,
return
__bch2_bkey_cmp_left_packed_format_checked
(
b
,
l
,
r
);
}
/* Returns true if @k is after iterator position @pos */
static
inline
bool
btree_iter_pos_cmp
(
struct
btree_iter
*
iter
,
const
struct
bkey
*
k
)
{
int
cmp
=
bkey_cmp
(
k
->
p
,
iter
->
pos
);
return
cmp
>
0
||
(
cmp
==
0
&&
!
(
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
)
&&
!
bkey_deleted
(
k
));
}
/* Returns true if @k is after iterator position @pos */
static
inline
bool
btree_iter_pos_cmp_packed
(
const
struct
btree
*
b
,
struct
bpos
*
pos
,
const
struct
bkey_packed
*
k
,
bool
strictly_greater
)
{
int
cmp
=
bkey_cmp_left_packed
(
b
,
k
,
pos
);
return
cmp
>
0
||
(
cmp
==
0
&&
!
strictly_greater
&&
!
bkey_deleted
(
k
));
}
static
inline
bool
btree_iter_pos_cmp_p_or_unp
(
const
struct
btree
*
b
,
struct
bpos
pos
,
const
struct
bkey_packed
*
pos_packed
,
const
struct
bkey_packed
*
k
,
bool
strictly_greater
)
{
int
cmp
=
bkey_cmp_p_or_unp
(
b
,
k
,
pos_packed
,
&
pos
);
return
cmp
>
0
||
(
cmp
==
0
&&
!
strictly_greater
&&
!
bkey_deleted
(
k
));
}
static
inline
struct
bset_tree
*
bch2_bkey_to_bset_inlined
(
struct
btree
*
b
,
struct
bkey_packed
*
k
)
{
...
...
@@ -459,7 +424,7 @@ void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
const
struct
bkey_packed
*
,
const
struct
bkey_packed
*
);
void
bch2_btree_node_iter_init
(
struct
btree_node_iter
*
,
struct
btree
*
,
struct
bpos
,
bool
);
struct
bpos
*
);
void
bch2_btree_node_iter_init_from_start
(
struct
btree_node_iter
*
,
struct
btree
*
);
struct
bkey_packed
*
bch2_btree_node_iter_bset_pos
(
struct
btree_node_iter
*
,
...
...
@@ -488,11 +453,16 @@ static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter)
return
__btree_node_iter_set_end
(
iter
,
0
);
}
static
inline
int
__btree_node_iter_cmp
(
struct
btree
*
b
,
const
struct
bkey_packed
*
l
,
const
struct
bkey_packed
*
r
)
/*
* When keys compare equal, deleted keys compare first:
*
* XXX: only need to compare pointers for keys that are both within a
* btree_node_iterator - we need to break ties for prev() to work correctly
*/
static
inline
int
bkey_iter_cmp
(
struct
btree
*
b
,
const
struct
bkey_packed
*
l
,
const
struct
bkey_packed
*
r
)
{
/* When keys compare equal deleted keys come first */
return
bkey_cmp_packed
(
b
,
l
,
r
)
?:
(
int
)
bkey_deleted
(
r
)
-
(
int
)
bkey_deleted
(
l
)
?:
(
l
>
r
)
-
(
l
<
r
);
...
...
@@ -502,28 +472,27 @@ static inline int btree_node_iter_cmp(struct btree *b,
struct
btree_node_iter_set
l
,
struct
btree_node_iter_set
r
)
{
return
__btree_node
_iter_cmp
(
b
,
return
bkey
_iter_cmp
(
b
,
__btree_node_offset_to_key
(
b
,
l
.
k
),
__btree_node_offset_to_key
(
b
,
r
.
k
));
}
static
inline
void
__bch2_btree_node_iter_push
(
struct
btree_node_iter
*
iter
,
struct
btree
*
b
,
const
struct
bkey_packed
*
k
,
const
struct
bkey_packed
*
end
)
/* These assume l (the search key) is not a deleted key: */
static
inline
int
bkey_iter_pos_cmp
(
struct
btree
*
b
,
struct
bpos
*
l
,
const
struct
bkey_packed
*
r
)
{
if
(
k
!=
end
)
{
struct
btree_node_iter_set
*
pos
;
btree_node_iter_for_each
(
iter
,
pos
)
;
return
-
bkey_cmp_left_packed
(
b
,
r
,
l
)
?:
(
int
)
bkey_deleted
(
r
);
}
BUG_ON
(
pos
>=
iter
->
data
+
ARRAY_SIZE
(
iter
->
data
));
*
pos
=
(
struct
btree_node_iter_set
)
{
__btree_node_key_to_offset
(
b
,
k
),
__btree_node_key_to_offset
(
b
,
end
)
};
}
static
inline
int
bkey_iter_cmp_p_or_unp
(
struct
btree
*
b
,
struct
bpos
*
l
,
const
struct
bkey_packed
*
l_packed
,
const
struct
bkey_packed
*
r
)
{
return
-
bkey_cmp_p_or_unp
(
b
,
r
,
l_packed
,
l
)
?:
(
int
)
bkey_deleted
(
r
);
}
static
inline
struct
bkey_packed
*
...
...
fs/bcachefs/btree_io.c
View file @
a00fd8c5
...
...
@@ -21,10 +21,7 @@
/* btree_node_iter_large: */
#define btree_node_iter_cmp_heap(h, _l, _r) \
__btree_node_iter_cmp(b, \
__btree_node_offset_to_key(b, (_l).k), \
__btree_node_offset_to_key(b, (_r).k))
#define btree_node_iter_cmp_heap(h, _l, _r) btree_node_iter_cmp(b, _l, _r)
void
bch2_btree_node_iter_large_push
(
struct
btree_node_iter_large
*
iter
,
struct
btree
*
b
,
...
...
fs/bcachefs/btree_iter.c
View file @
a00fd8c5
...
...
@@ -24,6 +24,30 @@ static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
iter
->
l
[
l
].
b
!=
BTREE_ITER_NOT_END
;
}
/* Returns < 0 if @k is before iter pos, > 0 if @k is after */
static
inline
int
__btree_iter_pos_cmp
(
struct
btree_iter
*
iter
,
const
struct
btree
*
b
,
const
struct
bkey_packed
*
k
,
bool
interior_node
)
{
int
cmp
=
bkey_cmp_left_packed
(
b
,
k
,
&
iter
->
pos
);
if
(
cmp
)
return
cmp
;
if
(
bkey_deleted
(
k
))
return
-
1
;
if
(
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
)
return
-
1
;
return
1
;
}
static
inline
int
btree_iter_pos_cmp
(
struct
btree_iter
*
iter
,
const
struct
btree
*
b
,
const
struct
bkey_packed
*
k
)
{
return
__btree_iter_pos_cmp
(
iter
,
b
,
k
,
b
->
level
!=
0
);
}
/* Btree node locking: */
/*
...
...
@@ -390,8 +414,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
k
=
b
->
level
||
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
?
bch2_btree_node_iter_prev_filter
(
&
tmp
,
b
,
KEY_TYPE_DISCARD
)
:
bch2_btree_node_iter_prev_all
(
&
tmp
,
b
);
if
(
k
&&
btree_iter_pos_cmp_packed
(
b
,
&
iter
->
pos
,
k
,
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
))
{
if
(
k
&&
btree_iter_pos_cmp
(
iter
,
b
,
k
)
>
0
)
{
char
buf
[
100
];
struct
bkey
uk
=
bkey_unpack_key
(
b
,
k
);
...
...
@@ -401,8 +424,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
}
k
=
bch2_btree_node_iter_peek_all
(
&
l
->
iter
,
b
);
if
(
k
&&
!
btree_iter_pos_cmp_packed
(
b
,
&
iter
->
pos
,
k
,
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
))
{
if
(
k
&&
btree_iter_pos_cmp
(
iter
,
b
,
k
)
<
0
)
{
char
buf
[
100
];
struct
bkey
uk
=
bkey_unpack_key
(
b
,
k
);
...
...
@@ -454,8 +476,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
/* didn't find the bset in the iterator - might have to readd it: */
if
(
new_u64s
&&
btree_iter_pos_cmp_packed
(
b
,
&
iter
->
pos
,
where
,
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
))
{
btree_iter_pos_cmp
(
iter
,
b
,
where
)
>
0
)
{
btree_iter_set_dirty
(
iter
,
BTREE_ITER_NEED_PEEK
);
bch2_btree_node_iter_push
(
node_iter
,
b
,
where
,
end
);
...
...
@@ -475,8 +496,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
return
;
if
(
new_u64s
&&
btree_iter_pos_cmp_packed
(
b
,
&
iter
->
pos
,
where
,
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
))
{
btree_iter_pos_cmp
(
iter
,
b
,
where
)
>
0
)
{
set
->
k
=
offset
;
}
else
if
(
set
->
k
<
offset
+
clobber_u64s
)
{
set
->
k
=
offset
+
new_u64s
;
...
...
@@ -516,9 +536,8 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
* always point to the key for the child node the btree iterator points
* to.
*/
if
(
b
->
level
&&
new_u64s
&&
!
bkey_deleted
(
where
)
&&
btree_iter_pos_cmp_packed
(
b
,
&
iter
->
pos
,
where
,
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
))
{
if
(
b
->
level
&&
new_u64s
&&
btree_iter_pos_cmp
(
iter
,
b
,
where
)
>
0
)
{
struct
bset_tree
*
t
,
*
where_set
=
bch2_bkey_to_bset_inlined
(
b
,
where
);
struct
bkey_packed
*
k
;
...
...
@@ -529,7 +548,7 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
k
=
bch2_bkey_prev_all
(
b
,
t
,
bch2_btree_node_iter_bset_pos
(
node_iter
,
b
,
t
));
if
(
k
&&
__btree_node
_iter_cmp
(
b
,
k
,
where
)
>
0
)
{
bkey
_iter_cmp
(
b
,
k
,
where
)
>
0
)
{
struct
btree_node_iter_set
*
set
;
unsigned
offset
=
__btree_node_key_to_offset
(
b
,
bkey_next
(
k
));
...
...
@@ -610,9 +629,23 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
bch2_btree_node_iter_peek
(
&
l
->
iter
,
l
->
b
));
}
static
inline
void
__btree_iter_advance
(
struct
btree_iter_level
*
l
)
static
inline
bool
btree_iter_advance_to_pos
(
struct
btree_iter
*
iter
,
struct
btree_iter_level
*
l
,
int
max_advance
)
{
bch2_btree_node_iter_advance
(
&
l
->
iter
,
l
->
b
);
struct
bkey_packed
*
k
;
int
nr_advanced
=
0
;
while
((
k
=
bch2_btree_node_iter_peek_all
(
&
l
->
iter
,
l
->
b
))
&&
btree_iter_pos_cmp
(
iter
,
l
->
b
,
k
)
<
0
)
{
if
(
max_advance
>
0
&&
nr_advanced
>=
max_advance
)
return
false
;
bch2_btree_node_iter_advance
(
&
l
->
iter
,
l
->
b
);
nr_advanced
++
;
}
return
true
;
}
/*
...
...
@@ -657,7 +690,8 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
static
inline
bool
btree_iter_pos_after_node
(
struct
btree_iter
*
iter
,
struct
btree
*
b
)
{
return
!
btree_iter_pos_cmp
(
iter
,
&
b
->
key
.
k
)
&&
return
__btree_iter_pos_cmp
(
iter
,
NULL
,
bkey_to_packed
(
&
b
->
key
),
true
)
<
0
&&
bkey_cmp
(
b
->
key
.
k
.
p
,
POS_MAX
);
}
...
...
@@ -670,16 +704,18 @@ static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
}
static
inline
void
__btree_iter_init
(
struct
btree_iter
*
iter
,
struct
btree
*
b
)
unsigned
level
)
{
struct
btree_iter_level
*
l
=
&
iter
->
l
[
b
->
level
];
struct
btree_iter_level
*
l
=
&
iter
->
l
[
level
];
bch2_btree_node_iter_init
(
&
l
->
iter
,
l
->
b
,
&
iter
->
pos
);
bch2_btree_node_iter_init
(
&
l
->
iter
,
b
,
iter
->
pos
,
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
);
if
(
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
)
btree_iter_advance_to_pos
(
iter
,
l
,
-
1
);
/* Skip to first non whiteout: */
if
(
b
->
level
)
bch2_btree_node_iter_peek
(
&
l
->
iter
,
b
);
if
(
level
)
bch2_btree_node_iter_peek
(
&
l
->
iter
,
l
->
b
);
btree_iter_set_dirty
(
iter
,
BTREE_ITER_NEED_PEEK
);
}
...
...
@@ -694,7 +730,7 @@ static inline void btree_iter_node_set(struct btree_iter *iter,
iter
->
l
[
b
->
level
].
lock_seq
=
b
->
lock
.
state
.
seq
;
iter
->
l
[
b
->
level
].
b
=
b
;
__btree_iter_init
(
iter
,
b
);
__btree_iter_init
(
iter
,
b
->
level
);
}
/*
...
...
@@ -748,7 +784,7 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
struct
btree_iter
*
linked
;
for_each_btree_iter_with_node
(
iter
,
b
,
linked
)
__btree_iter_init
(
linked
,
b
);
__btree_iter_init
(
linked
,
b
->
level
);
}
static
inline
int
btree_iter_lock_root
(
struct
btree_iter
*
iter
,
...
...
@@ -987,15 +1023,8 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
*
* XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
*/
if
(
btree_iter_node
(
iter
,
iter
->
level
))
{
struct
btree_iter_level
*
l
=
&
iter
->
l
[
iter
->
level
];
struct
bkey_s_c
k
;
struct
bkey
u
;
while
((
k
=
__btree_iter_peek_all
(
iter
,
l
,
&
u
)).
k
&&
!
btree_iter_pos_cmp
(
iter
,
k
.
k
))
__btree_iter_advance
(
l
);
}
if
(
btree_iter_node
(
iter
,
iter
->
level
))
btree_iter_advance_to_pos
(
iter
,
&
iter
->
l
[
iter
->
level
],
-
1
);
/*
* Note: iter->nodes[iter->level] may be temporarily NULL here - that
...
...
@@ -1138,7 +1167,6 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
void
bch2_btree_iter_set_pos_same_leaf
(
struct
btree_iter
*
iter
,
struct
bpos
new_pos
)
{
struct
btree_iter_level
*
l
=
&
iter
->
l
[
0
];
struct
bkey_packed
*
k
;
EBUG_ON
(
iter
->
level
!=
0
);
EBUG_ON
(
bkey_cmp
(
new_pos
,
iter
->
pos
)
<
0
);
...
...
@@ -1148,12 +1176,10 @@ void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_
iter
->
pos
=
new_pos
;
btree_iter_set_dirty
(
iter
,
BTREE_ITER_NEED_PEEK
);
while
((
k
=
bch2_btree_node_iter_peek_all
(
&
l
->
iter
,
l
->
b
))
&&
!
btree_iter_pos_cmp_packed
(
l
->
b
,
&
iter
->
pos
,
k
,
iter
->
flags
&
BTREE_ITER_IS_EXTENTS
))
__btree_iter_advance
(
l
);
btree_iter_advance_to_pos
(
iter
,
l
,
-
1
);
if
(
!
k
&&
btree_iter_pos_after_node
(
iter
,
l
->
b
))
if
(
bch2_btree_node_iter_end
(
&
l
->
iter
)
&&
btree_iter_pos_after_node
(
iter
,
l
->
b
))
btree_iter_set_dirty
(
iter
,
BTREE_ITER_NEED_TRAVERSE
);
}
...
...
@@ -1170,30 +1196,15 @@ void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
level
=
btree_iter_up_until_locked
(
iter
,
true
);
if
(
btree_iter_node
(
iter
,
level
))
{
unsigned
nr_advanced
=
0
;
struct
btree_iter_level
*
l
=
&
iter
->
l
[
level
];
struct
bkey_s_c
k
;
struct
bkey
u
;
/*
* We might have to skip over many keys, or just a few: try
* advancing the node iterator, and if we have to skip over too
* many keys just reinit it (or if we're rewinding, since that
* is expensive).
*/
if
(
cmp
>
0
)
{
while
((
k
=
__btree_iter_peek_all
(
iter
,
l
,
&
u
)).
k
&&
!
btree_iter_pos_cmp
(
iter
,
k
.
k
))
{
if
(
nr_advanced
>
8
)
goto
reinit_node
;
__btree_iter_advance
(
l
);
nr_advanced
++
;
}
}
else
{
reinit_node:
__btree_iter_init
(
iter
,
iter
->
l
[
level
].
b
);
}
if
(
cmp
<
0
||
!
btree_iter_advance_to_pos
(
iter
,
&
iter
->
l
[
level
],
8
))
__btree_iter_init
(
iter
,
level
);
/* Don't leave it locked if we're not supposed to: */
if
(
btree_lock_want
(
iter
,
level
)
==
BTREE_NODE_UNLOCKED
)
...
...
@@ -1296,7 +1307,7 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
}
do
{
__btree_iter_advance
(
l
);
bch2_btree_node_iter_advance
(
&
l
->
iter
,
l
->
b
);
p
=
bch2_btree_node_iter_peek_all
(
&
l
->
iter
,
l
->
b
);
if
(
unlikely
(
!
p
))
return
bch2_btree_iter_peek_next_leaf
(
iter
);
...
...
@@ -1367,7 +1378,7 @@ __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
while
((
k
=
__btree_iter_peek_all
(
iter
,
l
,
&
iter
->
k
)).
k
&&
bkey_deleted
(
k
.
k
)
&&
bkey_cmp
(
bkey_start_pos
(
k
.
k
),
iter
->
pos
)
==
0
)
__btree_iter_advance
(
l
);
bch2_btree_node_iter_advance
(
&
l
->
iter
,
l
->
b
);
/*
* iterator is now at the correct position for inserting at iter->pos,
...
...
@@ -1464,7 +1475,7 @@ __bch2_btree_iter_peek_slot(struct btree_iter *iter)
while
((
k
=
__btree_iter_peek_all
(
iter
,
l
,
&
iter
->
k
)).
k
&&
bkey_deleted
(
k
.
k
)
&&
bkey_cmp
(
k
.
k
->
p
,
iter
->
pos
)
==
0
)
__btree_iter_advance
(
l
);
bch2_btree_node_iter_advance
(
&
l
->
iter
,
l
->
b
);
/*
* If we got to the end of the node, check if we need to traverse to the
...
...
@@ -1528,7 +1539,7 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
}
if
(
!
bkey_deleted
(
&
iter
->
k
))
__btree_iter_advance
(
&
iter
->
l
[
0
]
);
bch2_btree_node_iter_advance
(
&
iter
->
l
[
0
].
iter
,
iter
->
l
[
0
].
b
);
btree_iter_set_dirty
(
iter
,
BTREE_ITER_NEED_PEEK
);
...
...
fs/bcachefs/btree_update_interior.c
View file @
a00fd8c5
...
...
@@ -35,7 +35,7 @@ static void btree_node_interior_verify(struct btree *b)
BUG_ON
(
!
b
->
level
);
bch2_btree_node_iter_init
(
&
iter
,
b
,
b
->
key
.
k
.
p
,
false
);
bch2_btree_node_iter_init
(
&
iter
,
b
,
&
b
->
key
.
k
.
p
);
#if 1
BUG_ON
(
!
(
k
=
bch2_btree_node_iter_peek
(
&
iter
,
b
))
||
bkey_cmp_left_packed
(
b
,
k
,
&
b
->
key
.
k
.
p
));
...
...
@@ -1191,7 +1191,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
gc_pos_btree_node
(
b
),
&
stats
,
0
,
0
);
while
((
k
=
bch2_btree_node_iter_peek_all
(
node_iter
,
b
))
&&
!
btree_iter_pos_cmp_packed
(
b
,
&
insert
->
k
.
p
,
k
,
false
)
)
bkey_iter_pos_cmp
(
b
,
&
insert
->
k
.
p
,
k
)
>
0
)
bch2_btree_node_iter_advance
(
node_iter
,
b
);
/*
...
...
@@ -1322,7 +1322,7 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
BUG_ON
(
btree_node_type
(
b
)
!=
BKEY_TYPE_BTREE
);
bch2_btree_node_iter_init
(
&
node_iter
,
b
,
k
->
k
.
p
,
false
);
bch2_btree_node_iter_init
(
&
node_iter
,
b
,
&
k
->
k
.
p
);
while
(
!
bch2_keylist_empty
(
keys
))
{
k
=
bch2_keylist_front
(
keys
);
...
...
fs/bcachefs/tests.c
View file @
a00fd8c5
...
...
@@ -271,6 +271,42 @@ static void test_iterate_slots_extents(struct bch_fs *c, u64 nr)
bch2_btree_iter_unlock
(
&
iter
);
}
/*
* XXX: we really want to make sure we've got a btree with depth > 0 for these
* tests
*/
static
void
test_peek_end
(
struct
bch_fs
*
c
,
u64
nr
)
{
struct
btree_iter
iter
;
struct
bkey_s_c
k
;
bch2_btree_iter_init
(
&
iter
,
c
,
BTREE_ID_DIRENTS
,
POS_MIN
,
0
);
k
=
bch2_btree_iter_peek
(
&
iter
);
BUG_ON
(
k
.
k
);
k
=
bch2_btree_iter_peek
(
&
iter
);
BUG_ON
(
k
.
k
);
bch2_btree_iter_unlock
(
&
iter
);
}
static
void
test_peek_end_extents
(
struct
bch_fs
*
c
,
u64
nr
)
{
struct
btree_iter
iter
;
struct
bkey_s_c
k
;
bch2_btree_iter_init
(
&
iter
,
c
,
BTREE_ID_EXTENTS
,
POS_MIN
,
0
);
k
=
bch2_btree_iter_peek
(
&
iter
);
BUG_ON
(
k
.
k
);
k
=
bch2_btree_iter_peek
(
&
iter
);
BUG_ON
(
k
.
k
);
bch2_btree_iter_unlock
(
&
iter
);
}
/* extent unit tests */
u64
test_version
;
...
...
@@ -555,6 +591,8 @@ void bch2_btree_perf_test(struct bch_fs *c, const char *testname,
perf_test
(
test_iterate_extents
);
perf_test
(
test_iterate_slots
);
perf_test
(
test_iterate_slots_extents
);
perf_test
(
test_peek_end
);
perf_test
(
test_peek_end_extents
);
perf_test
(
test_extent_overwrite_front
);
perf_test
(
test_extent_overwrite_back
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment