Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d0cc3def
Commit
d0cc3def
authored
Jan 13, 2019
by
Kent Overstreet
Committed by
Kent Overstreet
Oct 22, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: More allocator startup improvements
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
b8adb833
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
120 additions
and
102 deletions
+120
-102
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_background.c
+72
-59
fs/bcachefs/alloc_background.h
fs/bcachefs/alloc_background.h
+1
-1
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_cache.c
+4
-0
fs/bcachefs/btree_io.c
fs/bcachefs/btree_io.c
+4
-8
fs/bcachefs/btree_io.h
fs/bcachefs/btree_io.h
+24
-29
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_iter.h
+1
-0
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_locking.h
+0
-1
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.c
+13
-0
fs/bcachefs/buckets.c
fs/bcachefs/buckets.c
+1
-1
fs/bcachefs/util.c
fs/bcachefs/util.c
+0
-3
No files found.
fs/bcachefs/alloc_background.c
View file @
d0cc3def
...
@@ -347,12 +347,14 @@ int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
...
@@ -347,12 +347,14 @@ int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
return
ret
;
return
ret
;
}
}
int
bch2_alloc_write
(
struct
bch_fs
*
c
)
int
bch2_alloc_write
(
struct
bch_fs
*
c
,
bool
nowait
,
bool
*
wrote
)
{
{
struct
bch_dev
*
ca
;
struct
bch_dev
*
ca
;
unsigned
i
;
unsigned
i
;
int
ret
=
0
;
int
ret
=
0
;
*
wrote
=
false
;
for_each_rw_member
(
ca
,
c
,
i
)
{
for_each_rw_member
(
ca
,
c
,
i
)
{
struct
btree_iter
iter
;
struct
btree_iter
iter
;
struct
bucket_array
*
buckets
;
struct
bucket_array
*
buckets
;
...
@@ -370,9 +372,14 @@ int bch2_alloc_write(struct bch_fs *c)
...
@@ -370,9 +372,14 @@ int bch2_alloc_write(struct bch_fs *c)
if
(
!
buckets
->
b
[
b
].
mark
.
dirty
)
if
(
!
buckets
->
b
[
b
].
mark
.
dirty
)
continue
;
continue
;
ret
=
__bch2_alloc_write_key
(
c
,
ca
,
b
,
&
iter
,
NULL
,
0
);
ret
=
__bch2_alloc_write_key
(
c
,
ca
,
b
,
&
iter
,
NULL
,
nowait
?
BTREE_INSERT_NOWAIT
:
0
);
if
(
ret
)
if
(
ret
)
break
;
break
;
*
wrote
=
true
;
}
}
up_read
(
&
ca
->
bucket_lock
);
up_read
(
&
ca
->
bucket_lock
);
bch2_btree_iter_unlock
(
&
iter
);
bch2_btree_iter_unlock
(
&
iter
);
...
@@ -1270,20 +1277,23 @@ static void flush_held_btree_writes(struct bch_fs *c)
...
@@ -1270,20 +1277,23 @@ static void flush_held_btree_writes(struct bch_fs *c)
struct
bucket_table
*
tbl
;
struct
bucket_table
*
tbl
;
struct
rhash_head
*
pos
;
struct
rhash_head
*
pos
;
struct
btree
*
b
;
struct
btree
*
b
;
bool
flush_updates
;
bool
nodes_blocked
;
size_t
i
,
nr_pending_updates
;
size_t
i
;
struct
closure
cl
;
closure_init_stack
(
&
cl
);
clear_bit
(
BCH_FS_HOLD_BTREE_WRITES
,
&
c
->
flags
);
clear_bit
(
BCH_FS_HOLD_BTREE_WRITES
,
&
c
->
flags
);
again:
again:
pr_debug
(
"flushing dirty btree nodes"
);
pr_debug
(
"flushing dirty btree nodes"
);
cond_resched
();
cond_resched
();
closure_wait
(
&
c
->
btree_interior_update_wait
,
&
cl
);
flush_updates
=
false
;
nodes_blocked
=
false
;
nr_pending_updates
=
bch2_btree_interior_updates_nr_pending
(
c
);
rcu_read_lock
();
rcu_read_lock
();
for_each_cached_btree
(
b
,
c
,
tbl
,
i
,
pos
)
for_each_cached_btree
(
b
,
c
,
tbl
,
i
,
pos
)
if
(
btree_node_
dirty
(
b
)
&&
(
!
b
->
written
||
b
->
level
))
{
if
(
btree_node_
need_write
(
b
))
{
if
(
btree_node_may_write
(
b
))
{
if
(
btree_node_may_write
(
b
))
{
rcu_read_unlock
();
rcu_read_unlock
();
btree_node_lock_type
(
c
,
b
,
SIX_LOCK_read
);
btree_node_lock_type
(
c
,
b
,
SIX_LOCK_read
);
...
@@ -1291,7 +1301,7 @@ static void flush_held_btree_writes(struct bch_fs *c)
...
@@ -1291,7 +1301,7 @@ static void flush_held_btree_writes(struct bch_fs *c)
six_unlock_read
(
&
b
->
lock
);
six_unlock_read
(
&
b
->
lock
);
goto
again
;
goto
again
;
}
else
{
}
else
{
flush_updates
=
true
;
nodes_blocked
=
true
;
}
}
}
}
rcu_read_unlock
();
rcu_read_unlock
();
...
@@ -1299,17 +1309,16 @@ static void flush_held_btree_writes(struct bch_fs *c)
...
@@ -1299,17 +1309,16 @@ static void flush_held_btree_writes(struct bch_fs *c)
if
(
c
->
btree_roots_dirty
)
if
(
c
->
btree_roots_dirty
)
bch2_journal_meta
(
&
c
->
journal
);
bch2_journal_meta
(
&
c
->
journal
);
/*
if
(
nodes_blocked
)
{
* This is ugly, but it's needed to flush btree node writes
closure_sync
(
&
cl
);
* without spinning...
*/
if
(
flush_updates
)
{
closure_wait_event
(
&
c
->
btree_interior_update_wait
,
bch2_btree_interior_updates_nr_pending
(
c
)
<
nr_pending_updates
);
goto
again
;
goto
again
;
}
}
closure_wake_up
(
&
c
->
btree_interior_update_wait
);
closure_sync
(
&
cl
);
closure_wait_event
(
&
c
->
btree_interior_update_wait
,
!
bch2_btree_interior_updates_nr_pending
(
c
));
}
}
static
void
allocator_start_issue_discards
(
struct
bch_fs
*
c
)
static
void
allocator_start_issue_discards
(
struct
bch_fs
*
c
)
...
@@ -1331,13 +1340,10 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
...
@@ -1331,13 +1340,10 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
unsigned
dev_iter
;
unsigned
dev_iter
;
u64
journal_seq
=
0
;
u64
journal_seq
=
0
;
long
bu
;
long
bu
;
bool
invalidating_data
=
false
;
int
ret
=
0
;
int
ret
=
0
;
if
(
test_alloc_startup
(
c
))
{
if
(
test_alloc_startup
(
c
))
invalidating_data
=
true
;
goto
not_enough
;
goto
not_enough
;
}
/* Scan for buckets that are already invalidated: */
/* Scan for buckets that are already invalidated: */
for_each_rw_member
(
ca
,
c
,
dev_iter
)
{
for_each_rw_member
(
ca
,
c
,
dev_iter
)
{
...
@@ -1384,21 +1390,6 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
...
@@ -1384,21 +1390,6 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
not_enough:
not_enough:
pr_debug
(
"not enough empty buckets; scanning for reclaimable buckets"
);
pr_debug
(
"not enough empty buckets; scanning for reclaimable buckets"
);
for_each_rw_member
(
ca
,
c
,
dev_iter
)
{
find_reclaimable_buckets
(
c
,
ca
);
while
(
!
fifo_full
(
&
ca
->
free
[
RESERVE_BTREE
])
&&
(
bu
=
next_alloc_bucket
(
ca
))
>=
0
)
{
invalidating_data
|=
bch2_invalidate_one_bucket
(
c
,
ca
,
bu
,
&
journal_seq
);
fifo_push
(
&
ca
->
free
[
RESERVE_BTREE
],
bu
);
bucket_set_dirty
(
ca
,
bu
);
}
}
pr_debug
(
"done scanning for reclaimable buckets"
);
/*
/*
* We're moving buckets to freelists _before_ they've been marked as
* We're moving buckets to freelists _before_ they've been marked as
* invalidated on disk - we have to so that we can allocate new btree
* invalidated on disk - we have to so that we can allocate new btree
...
@@ -1408,38 +1399,59 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
...
@@ -1408,38 +1399,59 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
* have cached data in them, which is live until they're marked as
* have cached data in them, which is live until they're marked as
* invalidated on disk:
* invalidated on disk:
*/
*/
if
(
invalidating_data
)
{
set_bit
(
BCH_FS_HOLD_BTREE_WRITES
,
&
c
->
flags
);
pr_debug
(
"invalidating existing data"
);
set_bit
(
BCH_FS_HOLD_BTREE_WRITES
,
&
c
->
flags
);
}
else
{
pr_debug
(
"issuing discards"
);
allocator_start_issue_discards
(
c
);
}
/*
while
(
1
)
{
* XXX: it's possible for this to deadlock waiting on journal reclaim,
bool
wrote
=
false
;
* since we're holding btree writes. What then?
*/
ret
=
bch2_alloc_write
(
c
);
if
(
ret
)
return
ret
;
if
(
invalidating_data
)
{
for_each_rw_member
(
ca
,
c
,
dev_iter
)
{
pr_debug
(
"flushing journal"
);
find_reclaimable_buckets
(
c
,
ca
);
ret
=
bch2_journal_flush_seq
(
&
c
->
journal
,
journal_seq
);
while
(
!
fifo_full
(
&
ca
->
free
[
RESERVE_BTREE
])
&&
if
(
ret
)
(
bu
=
next_alloc_bucket
(
ca
))
>=
0
)
{
return
ret
;
bch2_invalidate_one_bucket
(
c
,
ca
,
bu
,
&
journal_seq
);
fifo_push
(
&
ca
->
free
[
RESERVE_BTREE
],
bu
);
bucket_set_dirty
(
ca
,
bu
);
}
}
pr_debug
(
"done scanning for reclaimable buckets"
);
/*
* XXX: it's possible for this to deadlock waiting on journal reclaim,
* since we're holding btree writes. What then?
*/
ret
=
bch2_alloc_write
(
c
,
true
,
&
wrote
);
pr_debug
(
"issuing discards"
);
/*
allocator_start_issue_discards
(
c
);
* If bch2_alloc_write() did anything, it may have used some
* buckets, and we need the RESERVE_BTREE freelist full - so we
* need to loop and scan again.
* And if it errored, it may have been because there weren't
* enough buckets, so just scan and loop again as long as it
* made some progress:
*/
if
(
!
wrote
&&
ret
)
return
ret
;
if
(
!
wrote
&&
!
ret
)
break
;
}
}
pr_debug
(
"flushing journal"
);
ret
=
bch2_journal_flush
(
&
c
->
journal
);
if
(
ret
)
return
ret
;
pr_debug
(
"issuing discards"
);
allocator_start_issue_discards
(
c
);
set_bit
(
BCH_FS_ALLOCATOR_STARTED
,
&
c
->
flags
);
set_bit
(
BCH_FS_ALLOCATOR_STARTED
,
&
c
->
flags
);
/* now flush dirty btree nodes: */
/* now flush dirty btree nodes: */
if
(
invalidating_data
)
flush_held_btree_writes
(
c
);
flush_held_btree_writes
(
c
);
return
0
;
return
0
;
}
}
...
@@ -1448,6 +1460,7 @@ int bch2_fs_allocator_start(struct bch_fs *c)
...
@@ -1448,6 +1460,7 @@ int bch2_fs_allocator_start(struct bch_fs *c)
{
{
struct
bch_dev
*
ca
;
struct
bch_dev
*
ca
;
unsigned
i
;
unsigned
i
;
bool
wrote
;
int
ret
;
int
ret
;
down_read
(
&
c
->
gc_lock
);
down_read
(
&
c
->
gc_lock
);
...
@@ -1465,7 +1478,7 @@ int bch2_fs_allocator_start(struct bch_fs *c)
...
@@ -1465,7 +1478,7 @@ int bch2_fs_allocator_start(struct bch_fs *c)
}
}
}
}
return
bch2_alloc_write
(
c
);
return
bch2_alloc_write
(
c
,
false
,
&
wrote
);
}
}
void
bch2_fs_allocator_background_init
(
struct
bch_fs
*
c
)
void
bch2_fs_allocator_background_init
(
struct
bch_fs
*
c
)
...
...
fs/bcachefs/alloc_background.h
View file @
d0cc3def
...
@@ -55,7 +55,7 @@ void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
...
@@ -55,7 +55,7 @@ void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
void
bch2_dev_allocator_stop
(
struct
bch_dev
*
);
void
bch2_dev_allocator_stop
(
struct
bch_dev
*
);
int
bch2_dev_allocator_start
(
struct
bch_dev
*
);
int
bch2_dev_allocator_start
(
struct
bch_dev
*
);
int
bch2_alloc_write
(
struct
bch_fs
*
);
int
bch2_alloc_write
(
struct
bch_fs
*
,
bool
,
bool
*
);
int
bch2_fs_allocator_start
(
struct
bch_fs
*
);
int
bch2_fs_allocator_start
(
struct
bch_fs
*
);
void
bch2_fs_allocator_background_init
(
struct
bch_fs
*
);
void
bch2_fs_allocator_background_init
(
struct
bch_fs
*
);
...
...
fs/bcachefs/btree_cache.c
View file @
d0cc3def
...
@@ -171,6 +171,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
...
@@ -171,6 +171,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
if
(
!
btree_node_may_write
(
b
))
if
(
!
btree_node_may_write
(
b
))
goto
out_unlock
;
goto
out_unlock
;
if
(
btree_node_dirty
(
b
)
&&
test_bit
(
BCH_FS_HOLD_BTREE_WRITES
,
&
c
->
flags
))
goto
out_unlock
;
if
(
btree_node_dirty
(
b
)
||
if
(
btree_node_dirty
(
b
)
||
btree_node_write_in_flight
(
b
)
||
btree_node_write_in_flight
(
b
)
||
btree_node_read_in_flight
(
b
))
{
btree_node_read_in_flight
(
b
))
{
...
...
fs/bcachefs/btree_io.c
View file @
d0cc3def
...
@@ -1330,8 +1330,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
...
@@ -1330,8 +1330,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
if
(
!
(
old
&
(
1
<<
BTREE_NODE_dirty
)))
if
(
!
(
old
&
(
1
<<
BTREE_NODE_dirty
)))
return
;
return
;
if
(
b
->
written
&&
if
(
!
btree_node_may_write
(
b
))
!
btree_node_may_write
(
b
))
return
;
return
;
if
(
old
&
(
1
<<
BTREE_NODE_write_in_flight
))
{
if
(
old
&
(
1
<<
BTREE_NODE_write_in_flight
))
{
...
@@ -1347,7 +1346,6 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
...
@@ -1347,7 +1346,6 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
}
while
(
cmpxchg_acquire
(
&
b
->
flags
,
old
,
new
)
!=
old
);
}
while
(
cmpxchg_acquire
(
&
b
->
flags
,
old
,
new
)
!=
old
);
BUG_ON
(
btree_node_fake
(
b
));
BUG_ON
(
btree_node_fake
(
b
));
BUG_ON
(
!
list_empty
(
&
b
->
write_blocked
));
BUG_ON
((
b
->
will_make_reachable
!=
0
)
!=
!
b
->
written
);
BUG_ON
((
b
->
will_make_reachable
!=
0
)
!=
!
b
->
written
);
BUG_ON
(
b
->
written
>=
c
->
opts
.
btree_node_size
);
BUG_ON
(
b
->
written
>=
c
->
opts
.
btree_node_size
);
...
@@ -1685,15 +1683,13 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
...
@@ -1685,15 +1683,13 @@ ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
unsigned
long
flags
=
READ_ONCE
(
b
->
flags
);
unsigned
long
flags
=
READ_ONCE
(
b
->
flags
);
unsigned
idx
=
(
flags
&
(
1
<<
BTREE_NODE_write_idx
))
!=
0
;
unsigned
idx
=
(
flags
&
(
1
<<
BTREE_NODE_write_idx
))
!=
0
;
if
(
//!(flags & (1 << BTREE_NODE_dirty)) &&
if
(
!
(
flags
&
(
1
<<
BTREE_NODE_dirty
)))
!
b
->
writes
[
0
].
wait
.
list
.
first
&&
!
b
->
writes
[
1
].
wait
.
list
.
first
&&
!
(
b
->
will_make_reachable
&
1
))
continue
;
continue
;
pr_buf
(
&
out
,
"%p d %u l %u w %u b %u r %u:%lu c %u p %u
\n
"
,
pr_buf
(
&
out
,
"%p d %u
n %u
l %u w %u b %u r %u:%lu c %u p %u
\n
"
,
b
,
b
,
(
flags
&
(
1
<<
BTREE_NODE_dirty
))
!=
0
,
(
flags
&
(
1
<<
BTREE_NODE_dirty
))
!=
0
,
(
flags
&
(
1
<<
BTREE_NODE_need_write
))
!=
0
,
b
->
level
,
b
->
level
,
b
->
written
,
b
->
written
,
!
list_empty_careful
(
&
b
->
write_blocked
),
!
list_empty_careful
(
&
b
->
write_blocked
),
...
...
fs/bcachefs/btree_io.h
View file @
d0cc3def
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
#define _BCACHEFS_BTREE_IO_H
#define _BCACHEFS_BTREE_IO_H
#include "bset.h"
#include "bset.h"
#include "btree_locking.h"
#include "extents.h"
#include "extents.h"
#include "io_types.h"
#include "io_types.h"
...
@@ -48,7 +49,7 @@ static inline void btree_node_wait_on_io(struct btree *b)
...
@@ -48,7 +49,7 @@ static inline void btree_node_wait_on_io(struct btree *b)
static
inline
bool
btree_node_may_write
(
struct
btree
*
b
)
static
inline
bool
btree_node_may_write
(
struct
btree
*
b
)
{
{
return
list_empty_careful
(
&
b
->
write_blocked
)
&&
return
list_empty_careful
(
&
b
->
write_blocked
)
&&
!
b
->
will_make_reachable
;
(
!
b
->
written
||
!
b
->
will_make_reachable
)
;
}
}
enum
compact_mode
{
enum
compact_mode
{
...
@@ -100,42 +101,36 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
...
@@ -100,42 +101,36 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
void
bch2_btree_node_write
(
struct
bch_fs
*
,
struct
btree
*
,
void
bch2_btree_node_write
(
struct
bch_fs
*
,
struct
btree
*
,
enum
six_lock_type
);
enum
six_lock_type
);
/*
static
inline
void
btree_node_write_if_need
(
struct
bch_fs
*
c
,
struct
btree
*
b
)
* btree_node_dirty() can be cleared with only a read lock,
* and for bch2_btree_node_write_cond() we want to set need_write iff it's
* still dirty:
*/
static
inline
void
set_btree_node_need_write_if_dirty
(
struct
btree
*
b
)
{
{
unsigned
long
old
,
new
,
v
=
READ_ONCE
(
b
->
flags
);
while
(
b
->
written
&&
btree_node_need_write
(
b
)
&&
do
{
btree_node_may_write
(
b
))
{
old
=
new
=
v
;
if
(
!
btree_node_write_in_flight
(
b
))
{
bch2_btree_node_write
(
c
,
b
,
SIX_LOCK_read
);
if
(
!
(
old
&
(
1
<<
BTREE_NODE_dirty
)))
break
;
return
;
}
new
|=
(
1
<<
BTREE_NODE_need_write
);
six_unlock_read
(
&
b
->
lock
);
}
while
((
v
=
cmpxchg
(
&
b
->
flags
,
old
,
new
))
!=
old
);
btree_node_wait_on_io
(
b
);
btree_node_lock_type
(
c
,
b
,
SIX_LOCK_read
);
}
}
}
#define bch2_btree_node_write_cond(_c, _b, cond) \
#define bch2_btree_node_write_cond(_c, _b, cond) \
do { \
do { \
while ((_b)->written && btree_node_dirty(_b) && (cond)) { \
unsigned long old, new, v = READ_ONCE((_b)->flags); \
if (!btree_node_may_write(_b)) { \
\
set_btree_node_need_write_if_dirty(_b); \
do { \
break; \
old = new = v; \
} \
\
\
if (!btree_node_write_in_flight(_b)) { \
if (!(old & (1 << BTREE_NODE_dirty)) || !(cond)) \
bch2_btree_node_write(_c, _b, SIX_LOCK_read); \
break; \
break; \
} \
\
\
six_unlock_read(&(_b)->lock);
\
new |= (1 << BTREE_NODE_need_write);
\
btree_node_wait_on_io(_b);
\
} while ((v = cmpxchg(&(_b)->flags, old, new)) != old);
\
btree_node_lock_type(c, b, SIX_LOCK_read);
\
\
}
\
btree_node_write_if_need(_c, _b);
\
} while (0)
} while (0)
void
bch2_btree_flush_all_reads
(
struct
bch_fs
*
);
void
bch2_btree_flush_all_reads
(
struct
bch_fs
*
);
...
...
fs/bcachefs/btree_iter.h
View file @
d0cc3def
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
#ifndef _BCACHEFS_BTREE_ITER_H
#ifndef _BCACHEFS_BTREE_ITER_H
#define _BCACHEFS_BTREE_ITER_H
#define _BCACHEFS_BTREE_ITER_H
#include "bset.h"
#include "btree_types.h"
#include "btree_types.h"
static
inline
void
btree_iter_set_dirty
(
struct
btree_iter
*
iter
,
static
inline
void
btree_iter_set_dirty
(
struct
btree_iter
*
iter
,
...
...
fs/bcachefs/btree_locking.h
View file @
d0cc3def
...
@@ -11,7 +11,6 @@
...
@@ -11,7 +11,6 @@
*/
*/
#include "btree_iter.h"
#include "btree_iter.h"
#include "btree_io.h"
#include "six.h"
#include "six.h"
/* matches six lock types */
/* matches six lock types */
...
...
fs/bcachefs/btree_update_interior.c
View file @
d0cc3def
...
@@ -367,6 +367,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
...
@@ -367,6 +367,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
set_btree_node_accessed
(
b
);
set_btree_node_accessed
(
b
);
set_btree_node_dirty
(
b
);
set_btree_node_dirty
(
b
);
set_btree_node_need_write
(
b
);
bch2_bset_init_first
(
b
,
&
b
->
data
->
keys
);
bch2_bset_init_first
(
b
,
&
b
->
data
->
keys
);
memset
(
&
b
->
nr
,
0
,
sizeof
(
b
->
nr
));
memset
(
&
b
->
nr
,
0
,
sizeof
(
b
->
nr
));
...
@@ -655,6 +656,12 @@ static void btree_update_nodes_written(struct closure *cl)
...
@@ -655,6 +656,12 @@ static void btree_update_nodes_written(struct closure *cl)
closure_wait
(
&
btree_current_write
(
b
)
->
wait
,
cl
);
closure_wait
(
&
btree_current_write
(
b
)
->
wait
,
cl
);
list_del
(
&
as
->
write_blocked_list
);
list_del
(
&
as
->
write_blocked_list
);
/*
* for flush_held_btree_writes() waiting on updates to flush or
* nodes to be writeable:
*/
closure_wake_up
(
&
c
->
btree_interior_update_wait
);
mutex_unlock
(
&
c
->
btree_interior_update_lock
);
mutex_unlock
(
&
c
->
btree_interior_update_lock
);
/*
/*
...
@@ -958,6 +965,12 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
...
@@ -958,6 +965,12 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
list_for_each_entry_safe
(
p
,
n
,
&
b
->
write_blocked
,
write_blocked_list
)
{
list_for_each_entry_safe
(
p
,
n
,
&
b
->
write_blocked
,
write_blocked_list
)
{
list_del
(
&
p
->
write_blocked_list
);
list_del
(
&
p
->
write_blocked_list
);
btree_update_reparent
(
as
,
p
);
btree_update_reparent
(
as
,
p
);
/*
* for flush_held_btree_writes() waiting on updates to flush or
* nodes to be writeable:
*/
closure_wake_up
(
&
c
->
btree_interior_update_wait
);
}
}
clear_btree_node_dirty
(
b
);
clear_btree_node_dirty
(
b
);
...
...
fs/bcachefs/buckets.c
View file @
d0cc3def
...
@@ -1038,7 +1038,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
...
@@ -1038,7 +1038,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
size_t
reserve_none
=
max_t
(
size_t
,
1
,
nbuckets
>>
9
);
size_t
reserve_none
=
max_t
(
size_t
,
1
,
nbuckets
>>
9
);
size_t
copygc_reserve
=
max_t
(
size_t
,
2
,
nbuckets
>>
7
);
size_t
copygc_reserve
=
max_t
(
size_t
,
2
,
nbuckets
>>
7
);
size_t
free_inc_nr
=
max
(
max_t
(
size_t
,
1
,
nbuckets
>>
12
),
size_t
free_inc_nr
=
max
(
max_t
(
size_t
,
1
,
nbuckets
>>
12
),
btree_reserve
);
btree_reserve
*
2
);
bool
resize
=
ca
->
buckets
[
0
]
!=
NULL
,
bool
resize
=
ca
->
buckets
[
0
]
!=
NULL
,
start_copygc
=
ca
->
copygc_thread
!=
NULL
;
start_copygc
=
ca
->
copygc_thread
!=
NULL
;
int
ret
=
-
ENOMEM
;
int
ret
=
-
ENOMEM
;
...
...
fs/bcachefs/util.c
View file @
d0cc3def
...
@@ -25,9 +25,6 @@
...
@@ -25,9 +25,6 @@
#include "eytzinger.h"
#include "eytzinger.h"
#include "util.h"
#include "util.h"
#define simple_strtoint(c, end, base) simple_strtol(c, end, base)
#define simple_strtouint(c, end, base) simple_strtoul(c, end, base)
static
const
char
si_units
[]
=
"?kMGTPEZY"
;
static
const
char
si_units
[]
=
"?kMGTPEZY"
;
static
int
__bch2_strtoh
(
const
char
*
cp
,
u64
*
res
,
static
int
__bch2_strtoh
(
const
char
*
cp
,
u64
*
res
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment