Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
7b3f84ea
Commit
7b3f84ea
authored
Oct 06, 2018
by
Kent Overstreet
Committed by
Kent Overstreet
Oct 22, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: Split out alloc_background.c
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
f43cc5be
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
21 changed files
with
843 additions
and
815 deletions
+843
-815
fs/bcachefs/Makefile
fs/bcachefs/Makefile
+2
-1
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_background.c
+7
-759
fs/bcachefs/alloc_background.h
fs/bcachefs/alloc_background.h
+62
-0
fs/bcachefs/alloc_foreground.c
fs/bcachefs/alloc_foreground.c
+741
-0
fs/bcachefs/alloc_foreground.h
fs/bcachefs/alloc_foreground.h
+15
-42
fs/bcachefs/bkey_methods.c
fs/bcachefs/bkey_methods.c
+1
-1
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.c
+1
-1
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.c
+1
-1
fs/bcachefs/buckets.c
fs/bcachefs/buckets.c
+1
-1
fs/bcachefs/chardev.c
fs/bcachefs/chardev.c
+0
-1
fs/bcachefs/fs-io.c
fs/bcachefs/fs-io.c
+1
-0
fs/bcachefs/io.c
fs/bcachefs/io.c
+1
-1
fs/bcachefs/io.h
fs/bcachefs/io.h
+0
-1
fs/bcachefs/journal.c
fs/bcachefs/journal.c
+1
-1
fs/bcachefs/journal_io.c
fs/bcachefs/journal_io.c
+2
-1
fs/bcachefs/move.c
fs/bcachefs/move.c
+1
-0
fs/bcachefs/movinggc.c
fs/bcachefs/movinggc.c
+1
-0
fs/bcachefs/rebalance.c
fs/bcachefs/rebalance.c
+1
-1
fs/bcachefs/recovery.c
fs/bcachefs/recovery.c
+1
-1
fs/bcachefs/super.c
fs/bcachefs/super.c
+2
-1
fs/bcachefs/sysfs.c
fs/bcachefs/sysfs.c
+1
-1
No files found.
fs/bcachefs/Makefile
View file @
7b3f84ea
...
@@ -3,7 +3,8 @@ obj-$(CONFIG_BCACHEFS_FS) += bcachefs.o
...
@@ -3,7 +3,8 @@ obj-$(CONFIG_BCACHEFS_FS) += bcachefs.o
bcachefs-y
:=
\
bcachefs-y
:=
\
acl.o
\
acl.o
\
alloc.o
\
alloc_background.o
\
alloc_foreground.o
\
bkey.o
\
bkey.o
\
bkey_methods.o
\
bkey_methods.o
\
bset.o
\
bset.o
\
...
...
fs/bcachefs/alloc.c
→
fs/bcachefs/alloc
_background
.c
View file @
7b3f84ea
/*
// SPDX-License-Identifier: GPL-2.0
* Primary bucket allocation code
*
* Copyright 2012 Google, Inc.
*
* Allocation in bcache is done in terms of buckets:
*
* Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
* btree pointers - they must match for the pointer to be considered valid.
*
* Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
* bucket simply by incrementing its gen.
*
* The gens (along with the priorities; it's really the gens are important but
* the code is named as if it's the priorities) are written in an arbitrary list
* of buckets on disk, with a pointer to them in the journal header.
*
* When we invalidate a bucket, we have to write its new gen to disk and wait
* for that write to complete before we use it - otherwise after a crash we
* could have pointers that appeared to be good but pointed to data that had
* been overwritten.
*
* Since the gens and priorities are all stored contiguously on disk, we can
* batch this up: We fill up the free_inc list with freshly invalidated buckets,
* call prio_write(), and when prio_write() finishes we pull buckets off the
* free_inc list and optionally discard them.
*
* free_inc isn't the only freelist - if it was, we'd often have to sleep while
* priorities and gens were being written before we could allocate. c->free is a
* smaller freelist, and buckets on that list are always ready to be used.
*
* If we've got discards enabled, that happens when a bucket moves from the
* free_inc list to the free list.
*
* It's important to ensure that gens don't wrap around - with respect to
* either the oldest gen in the btree or the gen on disk. This is quite
* difficult to do in practice, but we explicitly guard against it anyways - if
* a bucket is in danger of wrapping around we simply skip invalidating it that
* time around, and we garbage collect or rewrite the priorities sooner than we
* would have otherwise.
*
* bch2_bucket_alloc() allocates a single bucket from a specific device.
*
* bch2_bucket_alloc_set() allocates one or more buckets from different devices
* in a given filesystem.
*
* invalidate_buckets() drives all the processes described above. It's called
* from bch2_bucket_alloc() and a few other places that need to make sure free
* buckets are ready.
*
* invalidate_buckets_(lru|fifo)() find buckets that are available to be
* invalidated, and then invalidate them and stick them on the free_inc list -
* in either lru or fifo order.
*/
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_cache.h"
#include "btree_cache.h"
#include "btree_io.h"
#include "btree_io.h"
#include "btree_update.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
#include "btree_gc.h"
#include "buckets.h"
#include "buckets.h"
#include "checksum.h"
#include "clock.h"
#include "clock.h"
#include "debug.h"
#include "debug.h"
#include "disk_groups.h"
#include "error.h"
#include "error.h"
#include "extents.h"
#include "io.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_io.h"
#include "super-io.h"
#include "trace.h"
#include "trace.h"
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/kthread.h>
#include <linux/math64.h>
#include <linux/math64.h>
#include <linux/random.h>
#include <linux/random.h>
...
@@ -496,23 +436,6 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
...
@@ -496,23 +436,6 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
* commands to the newly free buckets, then puts them on the various freelists.
* commands to the newly free buckets, then puts them on the various freelists.
*/
*/
static
void
verify_not_on_freelist
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
size_t
bucket
)
{
if
(
expensive_debug_checks
(
c
)
&&
test_bit
(
BCH_FS_ALLOCATOR_STARTED
,
&
c
->
flags
))
{
size_t
iter
;
long
i
;
unsigned
j
;
for
(
j
=
0
;
j
<
RESERVE_NR
;
j
++
)
fifo_for_each_entry
(
i
,
&
ca
->
free
[
j
],
iter
)
BUG_ON
(
i
==
bucket
);
fifo_for_each_entry
(
i
,
&
ca
->
free_inc
,
iter
)
BUG_ON
(
i
==
bucket
);
}
}
#define BUCKET_GC_GEN_MAX 96U
#define BUCKET_GC_GEN_MAX 96U
/**
/**
...
@@ -1044,668 +967,6 @@ static int bch2_allocator_thread(void *arg)
...
@@ -1044,668 +967,6 @@ static int bch2_allocator_thread(void *arg)
return
0
;
return
0
;
}
}
/* Allocation */
/*
* Open buckets represent a bucket that's currently being allocated from. They
* serve two purposes:
*
* - They track buckets that have been partially allocated, allowing for
* sub-bucket sized allocations - they're used by the sector allocator below
*
* - They provide a reference to the buckets they own that mark and sweep GC
* can find, until the new allocation has a pointer to it inserted into the
* btree
*
* When allocating some space with the sector allocator, the allocation comes
* with a reference to an open bucket - the caller is required to put that
* reference _after_ doing the index update that makes its allocation reachable.
*/
void
__bch2_open_bucket_put
(
struct
bch_fs
*
c
,
struct
open_bucket
*
ob
)
{
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
percpu_down_read
(
&
c
->
usage_lock
);
spin_lock
(
&
ob
->
lock
);
bch2_mark_alloc_bucket
(
c
,
ca
,
PTR_BUCKET_NR
(
ca
,
&
ob
->
ptr
),
false
,
gc_pos_alloc
(
c
,
ob
),
0
);
ob
->
valid
=
false
;
spin_unlock
(
&
ob
->
lock
);
percpu_up_read
(
&
c
->
usage_lock
);
spin_lock
(
&
c
->
freelist_lock
);
ob
->
freelist
=
c
->
open_buckets_freelist
;
c
->
open_buckets_freelist
=
ob
-
c
->
open_buckets
;
c
->
open_buckets_nr_free
++
;
spin_unlock
(
&
c
->
freelist_lock
);
closure_wake_up
(
&
c
->
open_buckets_wait
);
}
static
struct
open_bucket
*
bch2_open_bucket_alloc
(
struct
bch_fs
*
c
)
{
struct
open_bucket
*
ob
;
BUG_ON
(
!
c
->
open_buckets_freelist
||
!
c
->
open_buckets_nr_free
);
ob
=
c
->
open_buckets
+
c
->
open_buckets_freelist
;
c
->
open_buckets_freelist
=
ob
->
freelist
;
atomic_set
(
&
ob
->
pin
,
1
);
c
->
open_buckets_nr_free
--
;
return
ob
;
}
/* _only_ for allocating the journal on a new device: */
long
bch2_bucket_alloc_new_fs
(
struct
bch_dev
*
ca
)
{
struct
bucket_array
*
buckets
;
ssize_t
b
;
rcu_read_lock
();
buckets
=
bucket_array
(
ca
);
for
(
b
=
ca
->
mi
.
first_bucket
;
b
<
ca
->
mi
.
nbuckets
;
b
++
)
if
(
is_available_bucket
(
buckets
->
b
[
b
].
mark
))
goto
success
;
b
=
-
1
;
success:
rcu_read_unlock
();
return
b
;
}
static
inline
unsigned
open_buckets_reserved
(
enum
alloc_reserve
reserve
)
{
switch
(
reserve
)
{
case
RESERVE_ALLOC
:
return
0
;
case
RESERVE_BTREE
:
return
BTREE_NODE_RESERVE
/
2
;
default:
return
BTREE_NODE_RESERVE
;
}
}
/**
* bch_bucket_alloc - allocate a single bucket from a specific device
*
* Returns index of bucket on success, 0 on failure
* */
int
bch2_bucket_alloc
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
enum
alloc_reserve
reserve
,
bool
may_alloc_partial
,
struct
closure
*
cl
)
{
struct
bucket_array
*
buckets
;
struct
open_bucket
*
ob
;
long
bucket
;
spin_lock
(
&
c
->
freelist_lock
);
if
(
may_alloc_partial
&&
ca
->
open_buckets_partial_nr
)
{
int
ret
=
ca
->
open_buckets_partial
[
--
ca
->
open_buckets_partial_nr
];
c
->
open_buckets
[
ret
].
on_partial_list
=
false
;
spin_unlock
(
&
c
->
freelist_lock
);
return
ret
;
}
if
(
unlikely
(
c
->
open_buckets_nr_free
<=
open_buckets_reserved
(
reserve
)))
{
if
(
cl
)
closure_wait
(
&
c
->
open_buckets_wait
,
cl
);
spin_unlock
(
&
c
->
freelist_lock
);
trace_open_bucket_alloc_fail
(
ca
,
reserve
);
return
OPEN_BUCKETS_EMPTY
;
}
if
(
likely
(
fifo_pop
(
&
ca
->
free
[
RESERVE_NONE
],
bucket
)))
goto
out
;
switch
(
reserve
)
{
case
RESERVE_ALLOC
:
if
(
fifo_pop
(
&
ca
->
free
[
RESERVE_BTREE
],
bucket
))
goto
out
;
break
;
case
RESERVE_BTREE
:
if
(
fifo_used
(
&
ca
->
free
[
RESERVE_BTREE
])
*
2
>=
ca
->
free
[
RESERVE_BTREE
].
size
&&
fifo_pop
(
&
ca
->
free
[
RESERVE_BTREE
],
bucket
))
goto
out
;
break
;
case
RESERVE_MOVINGGC
:
if
(
fifo_pop
(
&
ca
->
free
[
RESERVE_MOVINGGC
],
bucket
))
goto
out
;
break
;
default:
break
;
}
if
(
cl
)
closure_wait
(
&
c
->
freelist_wait
,
cl
);
spin_unlock
(
&
c
->
freelist_lock
);
trace_bucket_alloc_fail
(
ca
,
reserve
);
return
FREELIST_EMPTY
;
out:
verify_not_on_freelist
(
c
,
ca
,
bucket
);
ob
=
bch2_open_bucket_alloc
(
c
);
spin_lock
(
&
ob
->
lock
);
buckets
=
bucket_array
(
ca
);
ob
->
valid
=
true
;
ob
->
sectors_free
=
ca
->
mi
.
bucket_size
;
ob
->
ptr
=
(
struct
bch_extent_ptr
)
{
.
gen
=
buckets
->
b
[
bucket
].
mark
.
gen
,
.
offset
=
bucket_to_sector
(
ca
,
bucket
),
.
dev
=
ca
->
dev_idx
,
};
bucket_io_clock_reset
(
c
,
ca
,
bucket
,
READ
);
bucket_io_clock_reset
(
c
,
ca
,
bucket
,
WRITE
);
spin_unlock
(
&
ob
->
lock
);
spin_unlock
(
&
c
->
freelist_lock
);
bch2_wake_allocator
(
ca
);
trace_bucket_alloc
(
ca
,
reserve
);
return
ob
-
c
->
open_buckets
;
}
static
int
__dev_alloc_cmp
(
struct
write_point
*
wp
,
unsigned
l
,
unsigned
r
)
{
return
((
wp
->
next_alloc
[
l
]
>
wp
->
next_alloc
[
r
])
-
(
wp
->
next_alloc
[
l
]
<
wp
->
next_alloc
[
r
]));
}
#define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
struct
dev_alloc_list
bch2_wp_alloc_list
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
struct
bch_devs_mask
*
devs
)
{
struct
dev_alloc_list
ret
=
{
.
nr
=
0
};
struct
bch_dev
*
ca
;
unsigned
i
;
for_each_member_device_rcu
(
ca
,
c
,
i
,
devs
)
ret
.
devs
[
ret
.
nr
++
]
=
i
;
bubble_sort
(
ret
.
devs
,
ret
.
nr
,
dev_alloc_cmp
);
return
ret
;
}
void
bch2_wp_rescale
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
struct
write_point
*
wp
)
{
u64
*
v
=
wp
->
next_alloc
+
ca
->
dev_idx
;
u64
free_space
=
dev_buckets_free
(
c
,
ca
);
u64
free_space_inv
=
free_space
?
div64_u64
(
1ULL
<<
48
,
free_space
)
:
1ULL
<<
48
;
u64
scale
=
*
v
/
4
;
if
(
*
v
+
free_space_inv
>=
*
v
)
*
v
+=
free_space_inv
;
else
*
v
=
U64_MAX
;
for
(
v
=
wp
->
next_alloc
;
v
<
wp
->
next_alloc
+
ARRAY_SIZE
(
wp
->
next_alloc
);
v
++
)
*
v
=
*
v
<
scale
?
0
:
*
v
-
scale
;
}
static
enum
bucket_alloc_ret
bch2_bucket_alloc_set
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
unsigned
nr_replicas
,
enum
alloc_reserve
reserve
,
struct
bch_devs_mask
*
devs
,
struct
closure
*
cl
)
{
enum
bucket_alloc_ret
ret
=
NO_DEVICES
;
struct
dev_alloc_list
devs_sorted
;
struct
bch_dev
*
ca
;
unsigned
i
,
nr_ptrs_effective
=
0
;
bool
have_cache_dev
=
false
;
BUG_ON
(
nr_replicas
>
ARRAY_SIZE
(
wp
->
ptrs
));
for
(
i
=
wp
->
first_ptr
;
i
<
wp
->
nr_ptrs
;
i
++
)
{
ca
=
bch_dev_bkey_exists
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
);
nr_ptrs_effective
+=
ca
->
mi
.
durability
;
have_cache_dev
|=
!
ca
->
mi
.
durability
;
}
if
(
nr_ptrs_effective
>=
nr_replicas
)
return
ALLOC_SUCCESS
;
devs_sorted
=
bch2_wp_alloc_list
(
c
,
wp
,
devs
);
for
(
i
=
0
;
i
<
devs_sorted
.
nr
;
i
++
)
{
int
ob
;
ca
=
rcu_dereference
(
c
->
devs
[
devs_sorted
.
devs
[
i
]]);
if
(
!
ca
)
continue
;
if
(
!
ca
->
mi
.
durability
&&
(
have_cache_dev
||
wp
->
type
!=
BCH_DATA_USER
))
continue
;
ob
=
bch2_bucket_alloc
(
c
,
ca
,
reserve
,
wp
->
type
==
BCH_DATA_USER
,
cl
);
if
(
ob
<
0
)
{
ret
=
ob
;
if
(
ret
==
OPEN_BUCKETS_EMPTY
)
break
;
continue
;
}
BUG_ON
(
ob
<=
0
||
ob
>
U8_MAX
);
BUG_ON
(
wp
->
nr_ptrs
>=
ARRAY_SIZE
(
wp
->
ptrs
));
wp
->
ptrs
[
wp
->
nr_ptrs
++
]
=
c
->
open_buckets
+
ob
;
bch2_wp_rescale
(
c
,
ca
,
wp
);
nr_ptrs_effective
+=
ca
->
mi
.
durability
;
have_cache_dev
|=
!
ca
->
mi
.
durability
;
__clear_bit
(
ca
->
dev_idx
,
devs
->
d
);
if
(
nr_ptrs_effective
>=
nr_replicas
)
{
ret
=
ALLOC_SUCCESS
;
break
;
}
}
EBUG_ON
(
reserve
==
RESERVE_MOVINGGC
&&
ret
!=
ALLOC_SUCCESS
&&
ret
!=
OPEN_BUCKETS_EMPTY
);
switch
(
ret
)
{
case
ALLOC_SUCCESS
:
return
0
;
case
NO_DEVICES
:
return
-
EROFS
;
case
FREELIST_EMPTY
:
case
OPEN_BUCKETS_EMPTY
:
return
cl
?
-
EAGAIN
:
-
ENOSPC
;
default:
BUG
();
}
}
/* Sector allocator */
static
void
writepoint_drop_ptr
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
unsigned
i
)
{
struct
open_bucket
*
ob
=
wp
->
ptrs
[
i
];
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
BUG_ON
(
ca
->
open_buckets_partial_nr
>=
ARRAY_SIZE
(
ca
->
open_buckets_partial
));
if
(
wp
->
type
==
BCH_DATA_USER
)
{
spin_lock
(
&
c
->
freelist_lock
);
ob
->
on_partial_list
=
true
;
ca
->
open_buckets_partial
[
ca
->
open_buckets_partial_nr
++
]
=
ob
-
c
->
open_buckets
;
spin_unlock
(
&
c
->
freelist_lock
);
closure_wake_up
(
&
c
->
open_buckets_wait
);
closure_wake_up
(
&
c
->
freelist_wait
);
}
else
{
bch2_open_bucket_put
(
c
,
ob
);
}
array_remove_item
(
wp
->
ptrs
,
wp
->
nr_ptrs
,
i
);
if
(
i
<
wp
->
first_ptr
)
wp
->
first_ptr
--
;
}
static
void
writepoint_drop_ptrs
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
u16
target
,
bool
in_target
)
{
int
i
;
for
(
i
=
wp
->
first_ptr
-
1
;
i
>=
0
;
--
i
)
if
(
bch2_dev_in_target
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
,
target
)
==
in_target
)
writepoint_drop_ptr
(
c
,
wp
,
i
);
}
static
void
verify_not_stale
(
struct
bch_fs
*
c
,
const
struct
write_point
*
wp
)
{
#ifdef CONFIG_BCACHEFS_DEBUG
struct
open_bucket
*
ob
;
unsigned
i
;
writepoint_for_each_ptr_all
(
wp
,
ob
,
i
)
{
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
BUG_ON
(
ptr_stale
(
ca
,
&
ob
->
ptr
));
}
#endif
}
static
int
open_bucket_add_buckets
(
struct
bch_fs
*
c
,
u16
target
,
struct
write_point
*
wp
,
struct
bch_devs_list
*
devs_have
,
unsigned
nr_replicas
,
enum
alloc_reserve
reserve
,
struct
closure
*
cl
)
{
struct
bch_devs_mask
devs
=
c
->
rw_devs
[
wp
->
type
];
const
struct
bch_devs_mask
*
t
;
struct
open_bucket
*
ob
;
unsigned
i
;
int
ret
;
percpu_down_read
(
&
c
->
usage_lock
);
rcu_read_lock
();
/* Don't allocate from devices we already have pointers to: */
for
(
i
=
0
;
i
<
devs_have
->
nr
;
i
++
)
__clear_bit
(
devs_have
->
devs
[
i
],
devs
.
d
);
writepoint_for_each_ptr_all
(
wp
,
ob
,
i
)
__clear_bit
(
ob
->
ptr
.
dev
,
devs
.
d
);
t
=
bch2_target_to_mask
(
c
,
target
);
if
(
t
)
bitmap_and
(
devs
.
d
,
devs
.
d
,
t
->
d
,
BCH_SB_MEMBERS_MAX
);
ret
=
bch2_bucket_alloc_set
(
c
,
wp
,
nr_replicas
,
reserve
,
&
devs
,
cl
);
rcu_read_unlock
();
percpu_up_read
(
&
c
->
usage_lock
);
return
ret
;
}
static
struct
write_point
*
__writepoint_find
(
struct
hlist_head
*
head
,
unsigned
long
write_point
)
{
struct
write_point
*
wp
;
hlist_for_each_entry_rcu
(
wp
,
head
,
node
)
if
(
wp
->
write_point
==
write_point
)
return
wp
;
return
NULL
;
}
static
struct
hlist_head
*
writepoint_hash
(
struct
bch_fs
*
c
,
unsigned
long
write_point
)
{
unsigned
hash
=
hash_long
(
write_point
,
ilog2
(
ARRAY_SIZE
(
c
->
write_points_hash
)));
return
&
c
->
write_points_hash
[
hash
];
}
static
struct
write_point
*
writepoint_find
(
struct
bch_fs
*
c
,
unsigned
long
write_point
)
{
struct
write_point
*
wp
,
*
oldest
;
struct
hlist_head
*
head
;
if
(
!
(
write_point
&
1UL
))
{
wp
=
(
struct
write_point
*
)
write_point
;
mutex_lock
(
&
wp
->
lock
);
return
wp
;
}
head
=
writepoint_hash
(
c
,
write_point
);
restart_find:
wp
=
__writepoint_find
(
head
,
write_point
);
if
(
wp
)
{
lock_wp:
mutex_lock
(
&
wp
->
lock
);
if
(
wp
->
write_point
==
write_point
)
goto
out
;
mutex_unlock
(
&
wp
->
lock
);
goto
restart_find
;
}
oldest
=
NULL
;
for
(
wp
=
c
->
write_points
;
wp
<
c
->
write_points
+
ARRAY_SIZE
(
c
->
write_points
);
wp
++
)
if
(
!
oldest
||
time_before64
(
wp
->
last_used
,
oldest
->
last_used
))
oldest
=
wp
;
mutex_lock
(
&
oldest
->
lock
);
mutex_lock
(
&
c
->
write_points_hash_lock
);
wp
=
__writepoint_find
(
head
,
write_point
);
if
(
wp
&&
wp
!=
oldest
)
{
mutex_unlock
(
&
c
->
write_points_hash_lock
);
mutex_unlock
(
&
oldest
->
lock
);
goto
lock_wp
;
}
wp
=
oldest
;
hlist_del_rcu
(
&
wp
->
node
);
wp
->
write_point
=
write_point
;
hlist_add_head_rcu
(
&
wp
->
node
,
head
);
mutex_unlock
(
&
c
->
write_points_hash_lock
);
out:
wp
->
last_used
=
sched_clock
();
return
wp
;
}
/*
* Get us an open_bucket we can allocate from, return with it locked:
*/
struct
write_point
*
bch2_alloc_sectors_start
(
struct
bch_fs
*
c
,
unsigned
target
,
struct
write_point_specifier
write_point
,
struct
bch_devs_list
*
devs_have
,
unsigned
nr_replicas
,
unsigned
nr_replicas_required
,
enum
alloc_reserve
reserve
,
unsigned
flags
,
struct
closure
*
cl
)
{
struct
write_point
*
wp
;
struct
open_bucket
*
ob
;
struct
bch_dev
*
ca
;
unsigned
nr_ptrs_have
,
nr_ptrs_effective
;
int
ret
,
i
,
cache_idx
=
-
1
;
BUG_ON
(
!
nr_replicas
||
!
nr_replicas_required
);
wp
=
writepoint_find
(
c
,
write_point
.
v
);
wp
->
first_ptr
=
0
;
/* does writepoint have ptrs we can't use? */
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
if
(
bch2_dev_list_has_dev
(
*
devs_have
,
ob
->
ptr
.
dev
))
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
}
nr_ptrs_have
=
wp
->
first_ptr
;
/* does writepoint have ptrs we don't want to use? */
if
(
target
)
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
if
(
!
bch2_dev_in_target
(
c
,
ob
->
ptr
.
dev
,
target
))
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
}
if
(
flags
&
BCH_WRITE_ONLY_SPECIFIED_DEVS
)
{
ret
=
open_bucket_add_buckets
(
c
,
target
,
wp
,
devs_have
,
nr_replicas
,
reserve
,
cl
);
}
else
{
ret
=
open_bucket_add_buckets
(
c
,
target
,
wp
,
devs_have
,
nr_replicas
,
reserve
,
NULL
);
if
(
!
ret
)
goto
alloc_done
;
wp
->
first_ptr
=
nr_ptrs_have
;
ret
=
open_bucket_add_buckets
(
c
,
0
,
wp
,
devs_have
,
nr_replicas
,
reserve
,
cl
);
}
if
(
ret
&&
ret
!=
-
EROFS
)
goto
err
;
alloc_done:
/* check for more than one cache: */
for
(
i
=
wp
->
nr_ptrs
-
1
;
i
>=
wp
->
first_ptr
;
--
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
);
if
(
ca
->
mi
.
durability
)
continue
;
/*
* if we ended up with more than one cache device, prefer the
* one in the target we want:
*/
if
(
cache_idx
>=
0
)
{
if
(
!
bch2_dev_in_target
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
,
target
))
{
writepoint_drop_ptr
(
c
,
wp
,
i
);
}
else
{
writepoint_drop_ptr
(
c
,
wp
,
cache_idx
);
cache_idx
=
i
;
}
}
else
{
cache_idx
=
i
;
}
}
/* we might have more effective replicas than required: */
nr_ptrs_effective
=
0
;
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
nr_ptrs_effective
+=
ca
->
mi
.
durability
;
}
if
(
ret
==
-
EROFS
&&
nr_ptrs_effective
>=
nr_replicas_required
)
ret
=
0
;
if
(
ret
)
goto
err
;
if
(
nr_ptrs_effective
>
nr_replicas
)
{
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
if
(
ca
->
mi
.
durability
&&
ca
->
mi
.
durability
<=
nr_ptrs_effective
-
nr_replicas
&&
!
bch2_dev_in_target
(
c
,
ob
->
ptr
.
dev
,
target
))
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
nr_ptrs_effective
-=
ca
->
mi
.
durability
;
}
}
}
if
(
nr_ptrs_effective
>
nr_replicas
)
{
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
if
(
ca
->
mi
.
durability
&&
ca
->
mi
.
durability
<=
nr_ptrs_effective
-
nr_replicas
)
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
nr_ptrs_effective
-=
ca
->
mi
.
durability
;
}
}
}
/* Remove pointers we don't want to use: */
if
(
target
)
writepoint_drop_ptrs
(
c
,
wp
,
target
,
false
);
BUG_ON
(
wp
->
first_ptr
>=
wp
->
nr_ptrs
);
BUG_ON
(
nr_ptrs_effective
<
nr_replicas_required
);
wp
->
sectors_free
=
UINT_MAX
;
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
wp
->
sectors_free
=
min
(
wp
->
sectors_free
,
ob
->
sectors_free
);
BUG_ON
(
!
wp
->
sectors_free
||
wp
->
sectors_free
==
UINT_MAX
);
verify_not_stale
(
c
,
wp
);
return
wp
;
err:
mutex_unlock
(
&
wp
->
lock
);
return
ERR_PTR
(
ret
);
}
/*
* Append pointers to the space we just allocated to @k, and mark @sectors space
* as allocated out of @ob
*/
void
bch2_alloc_sectors_append_ptrs
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
struct
bkey_i_extent
*
e
,
unsigned
sectors
)
{
struct
open_bucket
*
ob
;
unsigned
i
;
BUG_ON
(
sectors
>
wp
->
sectors_free
);
wp
->
sectors_free
-=
sectors
;
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
struct
bch_extent_ptr
tmp
=
ob
->
ptr
;
EBUG_ON
(
bch2_extent_has_device
(
extent_i_to_s_c
(
e
),
ob
->
ptr
.
dev
));
tmp
.
cached
=
bkey_extent_is_cached
(
&
e
->
k
)
||
(
!
ca
->
mi
.
durability
&&
wp
->
type
==
BCH_DATA_USER
);
tmp
.
offset
+=
ca
->
mi
.
bucket_size
-
ob
->
sectors_free
;
extent_ptr_append
(
e
,
tmp
);
BUG_ON
(
sectors
>
ob
->
sectors_free
);
ob
->
sectors_free
-=
sectors
;
}
}
/*
* Append pointers to the space we just allocated to @k, and mark @sectors space
* as allocated out of @ob
*/
void
bch2_alloc_sectors_done
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
)
{
int
i
;
for
(
i
=
wp
->
nr_ptrs
-
1
;
i
>=
0
;
--
i
)
{
struct
open_bucket
*
ob
=
wp
->
ptrs
[
i
];
if
(
!
ob
->
sectors_free
)
{
array_remove_item
(
wp
->
ptrs
,
wp
->
nr_ptrs
,
i
);
bch2_open_bucket_put
(
c
,
ob
);
}
}
mutex_unlock
(
&
wp
->
lock
);
}
/* Startup/shutdown (ro/rw): */
/* Startup/shutdown (ro/rw): */
void
bch2_recalc_capacity
(
struct
bch_fs
*
c
)
void
bch2_recalc_capacity
(
struct
bch_fs
*
c
)
...
@@ -1792,19 +1053,6 @@ void bch2_recalc_capacity(struct bch_fs *c)
...
@@ -1792,19 +1053,6 @@ void bch2_recalc_capacity(struct bch_fs *c)
closure_wake_up
(
&
c
->
freelist_wait
);
closure_wake_up
(
&
c
->
freelist_wait
);
}
}
static
void
bch2_stop_write_point
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
struct
write_point
*
wp
)
{
struct
bch_devs_mask
not_self
;
bitmap_complement
(
not_self
.
d
,
ca
->
self
.
d
,
BCH_SB_MEMBERS_MAX
);
mutex_lock
(
&
wp
->
lock
);
wp
->
first_ptr
=
wp
->
nr_ptrs
;
writepoint_drop_ptrs
(
c
,
wp
,
dev_to_target
(
ca
->
dev_idx
),
true
);
mutex_unlock
(
&
wp
->
lock
);
}
static
bool
bch2_dev_has_open_write_point
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
)
static
bool
bch2_dev_has_open_write_point
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
)
{
{
struct
open_bucket
*
ob
;
struct
open_bucket
*
ob
;
...
@@ -1842,11 +1090,11 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
...
@@ -1842,11 +1090,11 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
/* Next, close write points that point to this device... */
/* Next, close write points that point to this device... */
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
c
->
write_points
);
i
++
)
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
c
->
write_points
);
i
++
)
bch2_
stop_write_point
(
c
,
ca
,
&
c
->
write_points
[
i
]);
bch2_
writepoint_stop
(
c
,
ca
,
&
c
->
write_points
[
i
]);
bch2_
stop_write_point
(
c
,
ca
,
&
ca
->
copygc_write_point
);
bch2_
writepoint_stop
(
c
,
ca
,
&
ca
->
copygc_write_point
);
bch2_
stop_write_point
(
c
,
ca
,
&
c
->
rebalance_write_point
);
bch2_
writepoint_stop
(
c
,
ca
,
&
c
->
rebalance_write_point
);
bch2_
stop_write_point
(
c
,
ca
,
&
c
->
btree_write_point
);
bch2_
writepoint_stop
(
c
,
ca
,
&
c
->
btree_write_point
);
mutex_lock
(
&
c
->
btree_reserve_cache_lock
);
mutex_lock
(
&
c
->
btree_reserve_cache_lock
);
while
(
c
->
btree_reserve_cache_nr
)
{
while
(
c
->
btree_reserve_cache_nr
)
{
...
...
fs/bcachefs/alloc_background.h
0 → 100644
View file @
7b3f84ea
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_ALLOC_BACKGROUND_H
#define _BCACHEFS_ALLOC_BACKGROUND_H
#include "bcachefs.h"
#include "alloc_types.h"
#include "debug.h"
#define ALLOC_SCAN_BATCH(ca) ((ca)->mi.nbuckets >> 9)
const
char
*
bch2_alloc_invalid
(
const
struct
bch_fs
*
,
struct
bkey_s_c
);
int
bch2_alloc_to_text
(
struct
bch_fs
*
,
char
*
,
size_t
,
struct
bkey_s_c
);
#define bch2_bkey_alloc_ops (struct bkey_ops) { \
.key_invalid = bch2_alloc_invalid, \
.val_to_text = bch2_alloc_to_text, \
}
int
bch2_alloc_read
(
struct
bch_fs
*
,
struct
list_head
*
);
int
bch2_alloc_replay_key
(
struct
bch_fs
*
,
struct
bpos
);
static
inline
void
bch2_wake_allocator
(
struct
bch_dev
*
ca
)
{
struct
task_struct
*
p
;
rcu_read_lock
();
p
=
rcu_dereference
(
ca
->
alloc_thread
);
if
(
p
)
wake_up_process
(
p
);
rcu_read_unlock
();
}
static
inline
void
verify_not_on_freelist
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
size_t
bucket
)
{
if
(
expensive_debug_checks
(
c
)
&&
test_bit
(
BCH_FS_ALLOCATOR_STARTED
,
&
c
->
flags
))
{
size_t
iter
;
long
i
;
unsigned
j
;
for
(
j
=
0
;
j
<
RESERVE_NR
;
j
++
)
fifo_for_each_entry
(
i
,
&
ca
->
free
[
j
],
iter
)
BUG_ON
(
i
==
bucket
);
fifo_for_each_entry
(
i
,
&
ca
->
free_inc
,
iter
)
BUG_ON
(
i
==
bucket
);
}
}
void
bch2_recalc_capacity
(
struct
bch_fs
*
);
void
bch2_dev_allocator_remove
(
struct
bch_fs
*
,
struct
bch_dev
*
);
void
bch2_dev_allocator_add
(
struct
bch_fs
*
,
struct
bch_dev
*
);
void
bch2_dev_allocator_stop
(
struct
bch_dev
*
);
int
bch2_dev_allocator_start
(
struct
bch_dev
*
);
int
bch2_alloc_write
(
struct
bch_fs
*
);
int
bch2_fs_allocator_start
(
struct
bch_fs
*
);
void
bch2_fs_allocator_init
(
struct
bch_fs
*
);
#endif
/* _BCACHEFS_ALLOC_BACKGROUND_H */
fs/bcachefs/alloc_foreground.c
0 → 100644
View file @
7b3f84ea
// SPDX-License-Identifier: GPL-2.0
/*
* Primary bucket allocation code
*
* Copyright 2012 Google, Inc.
*
* Allocation in bcache is done in terms of buckets:
*
* Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
* btree pointers - they must match for the pointer to be considered valid.
*
* Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
* bucket simply by incrementing its gen.
*
* The gens (along with the priorities; it's really the gens are important but
* the code is named as if it's the priorities) are written in an arbitrary list
* of buckets on disk, with a pointer to them in the journal header.
*
* When we invalidate a bucket, we have to write its new gen to disk and wait
* for that write to complete before we use it - otherwise after a crash we
* could have pointers that appeared to be good but pointed to data that had
* been overwritten.
*
* Since the gens and priorities are all stored contiguously on disk, we can
* batch this up: We fill up the free_inc list with freshly invalidated buckets,
* call prio_write(), and when prio_write() finishes we pull buckets off the
* free_inc list and optionally discard them.
*
* free_inc isn't the only freelist - if it was, we'd often have to sleep while
* priorities and gens were being written before we could allocate. c->free is a
* smaller freelist, and buckets on that list are always ready to be used.
*
* If we've got discards enabled, that happens when a bucket moves from the
* free_inc list to the free list.
*
* It's important to ensure that gens don't wrap around - with respect to
* either the oldest gen in the btree or the gen on disk. This is quite
* difficult to do in practice, but we explicitly guard against it anyways - if
* a bucket is in danger of wrapping around we simply skip invalidating it that
* time around, and we garbage collect or rewrite the priorities sooner than we
* would have otherwise.
*
* bch2_bucket_alloc() allocates a single bucket from a specific device.
*
* bch2_bucket_alloc_set() allocates one or more buckets from different devices
* in a given filesystem.
*
* invalidate_buckets() drives all the processes described above. It's called
* from bch2_bucket_alloc() and a few other places that need to make sure free
* buckets are ready.
*
* invalidate_buckets_(lru|fifo)() find buckets that are available to be
* invalidated, and then invalidate them and stick them on the free_inc list -
* in either lru or fifo order.
*/
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_gc.h"
#include "buckets.h"
#include "clock.h"
#include "debug.h"
#include "disk_groups.h"
#include "io.h"
#include "trace.h"
#include <linux/math64.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
enum
bucket_alloc_ret
{
ALLOC_SUCCESS
=
0
,
OPEN_BUCKETS_EMPTY
=
-
1
,
FREELIST_EMPTY
=
-
2
,
/* Allocator thread not keeping up */
NO_DEVICES
=
-
3
,
/* -EROFS */
};
/*
* Open buckets represent a bucket that's currently being allocated from. They
* serve two purposes:
*
* - They track buckets that have been partially allocated, allowing for
* sub-bucket sized allocations - they're used by the sector allocator below
*
* - They provide a reference to the buckets they own that mark and sweep GC
* can find, until the new allocation has a pointer to it inserted into the
* btree
*
* When allocating some space with the sector allocator, the allocation comes
* with a reference to an open bucket - the caller is required to put that
* reference _after_ doing the index update that makes its allocation reachable.
*/
void
__bch2_open_bucket_put
(
struct
bch_fs
*
c
,
struct
open_bucket
*
ob
)
{
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
percpu_down_read
(
&
c
->
usage_lock
);
spin_lock
(
&
ob
->
lock
);
bch2_mark_alloc_bucket
(
c
,
ca
,
PTR_BUCKET_NR
(
ca
,
&
ob
->
ptr
),
false
,
gc_pos_alloc
(
c
,
ob
),
0
);
ob
->
valid
=
false
;
spin_unlock
(
&
ob
->
lock
);
percpu_up_read
(
&
c
->
usage_lock
);
spin_lock
(
&
c
->
freelist_lock
);
ob
->
freelist
=
c
->
open_buckets_freelist
;
c
->
open_buckets_freelist
=
ob
-
c
->
open_buckets
;
c
->
open_buckets_nr_free
++
;
spin_unlock
(
&
c
->
freelist_lock
);
closure_wake_up
(
&
c
->
open_buckets_wait
);
}
static
struct
open_bucket
*
bch2_open_bucket_alloc
(
struct
bch_fs
*
c
)
{
struct
open_bucket
*
ob
;
BUG_ON
(
!
c
->
open_buckets_freelist
||
!
c
->
open_buckets_nr_free
);
ob
=
c
->
open_buckets
+
c
->
open_buckets_freelist
;
c
->
open_buckets_freelist
=
ob
->
freelist
;
atomic_set
(
&
ob
->
pin
,
1
);
c
->
open_buckets_nr_free
--
;
return
ob
;
}
/* _only_ for allocating the journal on a new device: */
long
bch2_bucket_alloc_new_fs
(
struct
bch_dev
*
ca
)
{
struct
bucket_array
*
buckets
;
ssize_t
b
;
rcu_read_lock
();
buckets
=
bucket_array
(
ca
);
for
(
b
=
ca
->
mi
.
first_bucket
;
b
<
ca
->
mi
.
nbuckets
;
b
++
)
if
(
is_available_bucket
(
buckets
->
b
[
b
].
mark
))
goto
success
;
b
=
-
1
;
success:
rcu_read_unlock
();
return
b
;
}
static
inline
unsigned
open_buckets_reserved
(
enum
alloc_reserve
reserve
)
{
switch
(
reserve
)
{
case
RESERVE_ALLOC
:
return
0
;
case
RESERVE_BTREE
:
return
BTREE_NODE_RESERVE
/
2
;
default:
return
BTREE_NODE_RESERVE
;
}
}
/**
* bch_bucket_alloc - allocate a single bucket from a specific device
*
* Returns index of bucket on success, 0 on failure
* */
int
bch2_bucket_alloc
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
enum
alloc_reserve
reserve
,
bool
may_alloc_partial
,
struct
closure
*
cl
)
{
struct
bucket_array
*
buckets
;
struct
open_bucket
*
ob
;
long
bucket
;
spin_lock
(
&
c
->
freelist_lock
);
if
(
may_alloc_partial
&&
ca
->
open_buckets_partial_nr
)
{
int
ret
=
ca
->
open_buckets_partial
[
--
ca
->
open_buckets_partial_nr
];
c
->
open_buckets
[
ret
].
on_partial_list
=
false
;
spin_unlock
(
&
c
->
freelist_lock
);
return
ret
;
}
if
(
unlikely
(
c
->
open_buckets_nr_free
<=
open_buckets_reserved
(
reserve
)))
{
if
(
cl
)
closure_wait
(
&
c
->
open_buckets_wait
,
cl
);
spin_unlock
(
&
c
->
freelist_lock
);
trace_open_bucket_alloc_fail
(
ca
,
reserve
);
return
OPEN_BUCKETS_EMPTY
;
}
if
(
likely
(
fifo_pop
(
&
ca
->
free
[
RESERVE_NONE
],
bucket
)))
goto
out
;
switch
(
reserve
)
{
case
RESERVE_ALLOC
:
if
(
fifo_pop
(
&
ca
->
free
[
RESERVE_BTREE
],
bucket
))
goto
out
;
break
;
case
RESERVE_BTREE
:
if
(
fifo_used
(
&
ca
->
free
[
RESERVE_BTREE
])
*
2
>=
ca
->
free
[
RESERVE_BTREE
].
size
&&
fifo_pop
(
&
ca
->
free
[
RESERVE_BTREE
],
bucket
))
goto
out
;
break
;
case
RESERVE_MOVINGGC
:
if
(
fifo_pop
(
&
ca
->
free
[
RESERVE_MOVINGGC
],
bucket
))
goto
out
;
break
;
default:
break
;
}
if
(
cl
)
closure_wait
(
&
c
->
freelist_wait
,
cl
);
spin_unlock
(
&
c
->
freelist_lock
);
trace_bucket_alloc_fail
(
ca
,
reserve
);
return
FREELIST_EMPTY
;
out:
verify_not_on_freelist
(
c
,
ca
,
bucket
);
ob
=
bch2_open_bucket_alloc
(
c
);
spin_lock
(
&
ob
->
lock
);
buckets
=
bucket_array
(
ca
);
ob
->
valid
=
true
;
ob
->
sectors_free
=
ca
->
mi
.
bucket_size
;
ob
->
ptr
=
(
struct
bch_extent_ptr
)
{
.
gen
=
buckets
->
b
[
bucket
].
mark
.
gen
,
.
offset
=
bucket_to_sector
(
ca
,
bucket
),
.
dev
=
ca
->
dev_idx
,
};
bucket_io_clock_reset
(
c
,
ca
,
bucket
,
READ
);
bucket_io_clock_reset
(
c
,
ca
,
bucket
,
WRITE
);
spin_unlock
(
&
ob
->
lock
);
spin_unlock
(
&
c
->
freelist_lock
);
bch2_wake_allocator
(
ca
);
trace_bucket_alloc
(
ca
,
reserve
);
return
ob
-
c
->
open_buckets
;
}
static
int
__dev_alloc_cmp
(
struct
write_point
*
wp
,
unsigned
l
,
unsigned
r
)
{
return
((
wp
->
next_alloc
[
l
]
>
wp
->
next_alloc
[
r
])
-
(
wp
->
next_alloc
[
l
]
<
wp
->
next_alloc
[
r
]));
}
#define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
struct
dev_alloc_list
bch2_wp_alloc_list
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
struct
bch_devs_mask
*
devs
)
{
struct
dev_alloc_list
ret
=
{
.
nr
=
0
};
struct
bch_dev
*
ca
;
unsigned
i
;
for_each_member_device_rcu
(
ca
,
c
,
i
,
devs
)
ret
.
devs
[
ret
.
nr
++
]
=
i
;
bubble_sort
(
ret
.
devs
,
ret
.
nr
,
dev_alloc_cmp
);
return
ret
;
}
void
bch2_wp_rescale
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
struct
write_point
*
wp
)
{
u64
*
v
=
wp
->
next_alloc
+
ca
->
dev_idx
;
u64
free_space
=
dev_buckets_free
(
c
,
ca
);
u64
free_space_inv
=
free_space
?
div64_u64
(
1ULL
<<
48
,
free_space
)
:
1ULL
<<
48
;
u64
scale
=
*
v
/
4
;
if
(
*
v
+
free_space_inv
>=
*
v
)
*
v
+=
free_space_inv
;
else
*
v
=
U64_MAX
;
for
(
v
=
wp
->
next_alloc
;
v
<
wp
->
next_alloc
+
ARRAY_SIZE
(
wp
->
next_alloc
);
v
++
)
*
v
=
*
v
<
scale
?
0
:
*
v
-
scale
;
}
static
enum
bucket_alloc_ret
bch2_bucket_alloc_set
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
unsigned
nr_replicas
,
enum
alloc_reserve
reserve
,
struct
bch_devs_mask
*
devs
,
struct
closure
*
cl
)
{
enum
bucket_alloc_ret
ret
=
NO_DEVICES
;
struct
dev_alloc_list
devs_sorted
;
struct
bch_dev
*
ca
;
unsigned
i
,
nr_ptrs_effective
=
0
;
bool
have_cache_dev
=
false
;
BUG_ON
(
nr_replicas
>
ARRAY_SIZE
(
wp
->
ptrs
));
for
(
i
=
wp
->
first_ptr
;
i
<
wp
->
nr_ptrs
;
i
++
)
{
ca
=
bch_dev_bkey_exists
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
);
nr_ptrs_effective
+=
ca
->
mi
.
durability
;
have_cache_dev
|=
!
ca
->
mi
.
durability
;
}
if
(
nr_ptrs_effective
>=
nr_replicas
)
return
ALLOC_SUCCESS
;
devs_sorted
=
bch2_wp_alloc_list
(
c
,
wp
,
devs
);
for
(
i
=
0
;
i
<
devs_sorted
.
nr
;
i
++
)
{
int
ob
;
ca
=
rcu_dereference
(
c
->
devs
[
devs_sorted
.
devs
[
i
]]);
if
(
!
ca
)
continue
;
if
(
!
ca
->
mi
.
durability
&&
(
have_cache_dev
||
wp
->
type
!=
BCH_DATA_USER
))
continue
;
ob
=
bch2_bucket_alloc
(
c
,
ca
,
reserve
,
wp
->
type
==
BCH_DATA_USER
,
cl
);
if
(
ob
<
0
)
{
ret
=
ob
;
if
(
ret
==
OPEN_BUCKETS_EMPTY
)
break
;
continue
;
}
BUG_ON
(
ob
<=
0
||
ob
>
U8_MAX
);
BUG_ON
(
wp
->
nr_ptrs
>=
ARRAY_SIZE
(
wp
->
ptrs
));
wp
->
ptrs
[
wp
->
nr_ptrs
++
]
=
c
->
open_buckets
+
ob
;
bch2_wp_rescale
(
c
,
ca
,
wp
);
nr_ptrs_effective
+=
ca
->
mi
.
durability
;
have_cache_dev
|=
!
ca
->
mi
.
durability
;
__clear_bit
(
ca
->
dev_idx
,
devs
->
d
);
if
(
nr_ptrs_effective
>=
nr_replicas
)
{
ret
=
ALLOC_SUCCESS
;
break
;
}
}
EBUG_ON
(
reserve
==
RESERVE_MOVINGGC
&&
ret
!=
ALLOC_SUCCESS
&&
ret
!=
OPEN_BUCKETS_EMPTY
);
switch
(
ret
)
{
case
ALLOC_SUCCESS
:
return
0
;
case
NO_DEVICES
:
return
-
EROFS
;
case
FREELIST_EMPTY
:
case
OPEN_BUCKETS_EMPTY
:
return
cl
?
-
EAGAIN
:
-
ENOSPC
;
default:
BUG
();
}
}
/* Sector allocator */
static
void
bch2_writepoint_drop_ptr
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
unsigned
i
)
{
struct
open_bucket
*
ob
=
wp
->
ptrs
[
i
];
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
BUG_ON
(
ca
->
open_buckets_partial_nr
>=
ARRAY_SIZE
(
ca
->
open_buckets_partial
));
if
(
wp
->
type
==
BCH_DATA_USER
)
{
spin_lock
(
&
c
->
freelist_lock
);
ob
->
on_partial_list
=
true
;
ca
->
open_buckets_partial
[
ca
->
open_buckets_partial_nr
++
]
=
ob
-
c
->
open_buckets
;
spin_unlock
(
&
c
->
freelist_lock
);
closure_wake_up
(
&
c
->
open_buckets_wait
);
closure_wake_up
(
&
c
->
freelist_wait
);
}
else
{
bch2_open_bucket_put
(
c
,
ob
);
}
array_remove_item
(
wp
->
ptrs
,
wp
->
nr_ptrs
,
i
);
if
(
i
<
wp
->
first_ptr
)
wp
->
first_ptr
--
;
}
void
bch2_writepoint_drop_ptrs
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
u16
target
,
bool
in_target
)
{
int
i
;
for
(
i
=
wp
->
first_ptr
-
1
;
i
>=
0
;
--
i
)
if
(
bch2_dev_in_target
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
,
target
)
==
in_target
)
bch2_writepoint_drop_ptr
(
c
,
wp
,
i
);
}
static
void
verify_not_stale
(
struct
bch_fs
*
c
,
const
struct
write_point
*
wp
)
{
#ifdef CONFIG_BCACHEFS_DEBUG
struct
open_bucket
*
ob
;
unsigned
i
;
writepoint_for_each_ptr_all
(
wp
,
ob
,
i
)
{
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
BUG_ON
(
ptr_stale
(
ca
,
&
ob
->
ptr
));
}
#endif
}
static
int
open_bucket_add_buckets
(
struct
bch_fs
*
c
,
u16
target
,
struct
write_point
*
wp
,
struct
bch_devs_list
*
devs_have
,
unsigned
nr_replicas
,
enum
alloc_reserve
reserve
,
struct
closure
*
cl
)
{
struct
bch_devs_mask
devs
=
c
->
rw_devs
[
wp
->
type
];
const
struct
bch_devs_mask
*
t
;
struct
open_bucket
*
ob
;
unsigned
i
;
int
ret
;
percpu_down_read
(
&
c
->
usage_lock
);
rcu_read_lock
();
/* Don't allocate from devices we already have pointers to: */
for
(
i
=
0
;
i
<
devs_have
->
nr
;
i
++
)
__clear_bit
(
devs_have
->
devs
[
i
],
devs
.
d
);
writepoint_for_each_ptr_all
(
wp
,
ob
,
i
)
__clear_bit
(
ob
->
ptr
.
dev
,
devs
.
d
);
t
=
bch2_target_to_mask
(
c
,
target
);
if
(
t
)
bitmap_and
(
devs
.
d
,
devs
.
d
,
t
->
d
,
BCH_SB_MEMBERS_MAX
);
ret
=
bch2_bucket_alloc_set
(
c
,
wp
,
nr_replicas
,
reserve
,
&
devs
,
cl
);
rcu_read_unlock
();
percpu_up_read
(
&
c
->
usage_lock
);
return
ret
;
}
void
bch2_writepoint_stop
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
,
struct
write_point
*
wp
)
{
struct
bch_devs_mask
not_self
;
bitmap_complement
(
not_self
.
d
,
ca
->
self
.
d
,
BCH_SB_MEMBERS_MAX
);
mutex_lock
(
&
wp
->
lock
);
wp
->
first_ptr
=
wp
->
nr_ptrs
;
bch2_writepoint_drop_ptrs
(
c
,
wp
,
dev_to_target
(
ca
->
dev_idx
),
true
);
mutex_unlock
(
&
wp
->
lock
);
}
static
struct
write_point
*
__writepoint_find
(
struct
hlist_head
*
head
,
unsigned
long
write_point
)
{
struct
write_point
*
wp
;
hlist_for_each_entry_rcu
(
wp
,
head
,
node
)
if
(
wp
->
write_point
==
write_point
)
return
wp
;
return
NULL
;
}
static
struct
write_point
*
writepoint_find
(
struct
bch_fs
*
c
,
unsigned
long
write_point
)
{
struct
write_point
*
wp
,
*
oldest
;
struct
hlist_head
*
head
;
if
(
!
(
write_point
&
1UL
))
{
wp
=
(
struct
write_point
*
)
write_point
;
mutex_lock
(
&
wp
->
lock
);
return
wp
;
}
head
=
writepoint_hash
(
c
,
write_point
);
restart_find:
wp
=
__writepoint_find
(
head
,
write_point
);
if
(
wp
)
{
lock_wp:
mutex_lock
(
&
wp
->
lock
);
if
(
wp
->
write_point
==
write_point
)
goto
out
;
mutex_unlock
(
&
wp
->
lock
);
goto
restart_find
;
}
oldest
=
NULL
;
for
(
wp
=
c
->
write_points
;
wp
<
c
->
write_points
+
ARRAY_SIZE
(
c
->
write_points
);
wp
++
)
if
(
!
oldest
||
time_before64
(
wp
->
last_used
,
oldest
->
last_used
))
oldest
=
wp
;
mutex_lock
(
&
oldest
->
lock
);
mutex_lock
(
&
c
->
write_points_hash_lock
);
wp
=
__writepoint_find
(
head
,
write_point
);
if
(
wp
&&
wp
!=
oldest
)
{
mutex_unlock
(
&
c
->
write_points_hash_lock
);
mutex_unlock
(
&
oldest
->
lock
);
goto
lock_wp
;
}
wp
=
oldest
;
hlist_del_rcu
(
&
wp
->
node
);
wp
->
write_point
=
write_point
;
hlist_add_head_rcu
(
&
wp
->
node
,
head
);
mutex_unlock
(
&
c
->
write_points_hash_lock
);
out:
wp
->
last_used
=
sched_clock
();
return
wp
;
}
/*
* Get us an open_bucket we can allocate from, return with it locked:
*/
struct
write_point
*
bch2_alloc_sectors_start
(
struct
bch_fs
*
c
,
unsigned
target
,
struct
write_point_specifier
write_point
,
struct
bch_devs_list
*
devs_have
,
unsigned
nr_replicas
,
unsigned
nr_replicas_required
,
enum
alloc_reserve
reserve
,
unsigned
flags
,
struct
closure
*
cl
)
{
struct
write_point
*
wp
;
struct
open_bucket
*
ob
;
struct
bch_dev
*
ca
;
unsigned
nr_ptrs_have
,
nr_ptrs_effective
;
int
ret
,
i
,
cache_idx
=
-
1
;
BUG_ON
(
!
nr_replicas
||
!
nr_replicas_required
);
wp
=
writepoint_find
(
c
,
write_point
.
v
);
wp
->
first_ptr
=
0
;
/* does writepoint have ptrs we can't use? */
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
if
(
bch2_dev_list_has_dev
(
*
devs_have
,
ob
->
ptr
.
dev
))
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
}
nr_ptrs_have
=
wp
->
first_ptr
;
/* does writepoint have ptrs we don't want to use? */
if
(
target
)
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
if
(
!
bch2_dev_in_target
(
c
,
ob
->
ptr
.
dev
,
target
))
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
}
if
(
flags
&
BCH_WRITE_ONLY_SPECIFIED_DEVS
)
{
ret
=
open_bucket_add_buckets
(
c
,
target
,
wp
,
devs_have
,
nr_replicas
,
reserve
,
cl
);
}
else
{
ret
=
open_bucket_add_buckets
(
c
,
target
,
wp
,
devs_have
,
nr_replicas
,
reserve
,
NULL
);
if
(
!
ret
)
goto
alloc_done
;
wp
->
first_ptr
=
nr_ptrs_have
;
ret
=
open_bucket_add_buckets
(
c
,
0
,
wp
,
devs_have
,
nr_replicas
,
reserve
,
cl
);
}
if
(
ret
&&
ret
!=
-
EROFS
)
goto
err
;
alloc_done:
/* check for more than one cache: */
for
(
i
=
wp
->
nr_ptrs
-
1
;
i
>=
wp
->
first_ptr
;
--
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
);
if
(
ca
->
mi
.
durability
)
continue
;
/*
* if we ended up with more than one cache device, prefer the
* one in the target we want:
*/
if
(
cache_idx
>=
0
)
{
if
(
!
bch2_dev_in_target
(
c
,
wp
->
ptrs
[
i
]
->
ptr
.
dev
,
target
))
{
bch2_writepoint_drop_ptr
(
c
,
wp
,
i
);
}
else
{
bch2_writepoint_drop_ptr
(
c
,
wp
,
cache_idx
);
cache_idx
=
i
;
}
}
else
{
cache_idx
=
i
;
}
}
/* we might have more effective replicas than required: */
nr_ptrs_effective
=
0
;
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
nr_ptrs_effective
+=
ca
->
mi
.
durability
;
}
if
(
ret
==
-
EROFS
&&
nr_ptrs_effective
>=
nr_replicas_required
)
ret
=
0
;
if
(
ret
)
goto
err
;
if
(
nr_ptrs_effective
>
nr_replicas
)
{
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
if
(
ca
->
mi
.
durability
&&
ca
->
mi
.
durability
<=
nr_ptrs_effective
-
nr_replicas
&&
!
bch2_dev_in_target
(
c
,
ob
->
ptr
.
dev
,
target
))
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
nr_ptrs_effective
-=
ca
->
mi
.
durability
;
}
}
}
if
(
nr_ptrs_effective
>
nr_replicas
)
{
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
if
(
ca
->
mi
.
durability
&&
ca
->
mi
.
durability
<=
nr_ptrs_effective
-
nr_replicas
)
{
swap
(
wp
->
ptrs
[
i
],
wp
->
ptrs
[
wp
->
first_ptr
]);
wp
->
first_ptr
++
;
nr_ptrs_effective
-=
ca
->
mi
.
durability
;
}
}
}
/* Remove pointers we don't want to use: */
if
(
target
)
bch2_writepoint_drop_ptrs
(
c
,
wp
,
target
,
false
);
BUG_ON
(
wp
->
first_ptr
>=
wp
->
nr_ptrs
);
BUG_ON
(
nr_ptrs_effective
<
nr_replicas_required
);
wp
->
sectors_free
=
UINT_MAX
;
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
wp
->
sectors_free
=
min
(
wp
->
sectors_free
,
ob
->
sectors_free
);
BUG_ON
(
!
wp
->
sectors_free
||
wp
->
sectors_free
==
UINT_MAX
);
verify_not_stale
(
c
,
wp
);
return
wp
;
err:
mutex_unlock
(
&
wp
->
lock
);
return
ERR_PTR
(
ret
);
}
/*
* Append pointers to the space we just allocated to @k, and mark @sectors space
* as allocated out of @ob
*/
void
bch2_alloc_sectors_append_ptrs
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
,
struct
bkey_i_extent
*
e
,
unsigned
sectors
)
{
struct
open_bucket
*
ob
;
unsigned
i
;
BUG_ON
(
sectors
>
wp
->
sectors_free
);
wp
->
sectors_free
-=
sectors
;
writepoint_for_each_ptr
(
wp
,
ob
,
i
)
{
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
ob
->
ptr
.
dev
);
struct
bch_extent_ptr
tmp
=
ob
->
ptr
;
EBUG_ON
(
bch2_extent_has_device
(
extent_i_to_s_c
(
e
),
ob
->
ptr
.
dev
));
tmp
.
cached
=
bkey_extent_is_cached
(
&
e
->
k
)
||
(
!
ca
->
mi
.
durability
&&
wp
->
type
==
BCH_DATA_USER
);
tmp
.
offset
+=
ca
->
mi
.
bucket_size
-
ob
->
sectors_free
;
extent_ptr_append
(
e
,
tmp
);
BUG_ON
(
sectors
>
ob
->
sectors_free
);
ob
->
sectors_free
-=
sectors
;
}
}
/*
* Append pointers to the space we just allocated to @k, and mark @sectors space
* as allocated out of @ob
*/
void
bch2_alloc_sectors_done
(
struct
bch_fs
*
c
,
struct
write_point
*
wp
)
{
int
i
;
for
(
i
=
wp
->
nr_ptrs
-
1
;
i
>=
0
;
--
i
)
{
struct
open_bucket
*
ob
=
wp
->
ptrs
[
i
];
if
(
!
ob
->
sectors_free
)
{
array_remove_item
(
wp
->
ptrs
,
wp
->
nr_ptrs
,
i
);
bch2_open_bucket_put
(
c
,
ob
);
}
}
mutex_unlock
(
&
wp
->
lock
);
}
fs/bcachefs/alloc.h
→
fs/bcachefs/alloc
_foreground
.h
View file @
7b3f84ea
#ifndef _BCACHEFS_ALLOC_H
/* SPDX-License-Identifier: GPL-2.0 */
#define _BCACHEFS_ALLOC_H
#ifndef _BCACHEFS_ALLOC_FOREGROUND_H
#define _BCACHEFS_ALLOC_FOREGROUND_H
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc_types.h"
#include "alloc_types.h"
...
@@ -9,16 +10,6 @@ struct bch_dev;
...
@@ -9,16 +10,6 @@ struct bch_dev;
struct
bch_fs
;
struct
bch_fs
;
struct
bch_devs_List
;
struct
bch_devs_List
;
#define ALLOC_SCAN_BATCH(ca) ((ca)->mi.nbuckets >> 9)
const
char
*
bch2_alloc_invalid
(
const
struct
bch_fs
*
,
struct
bkey_s_c
);
int
bch2_alloc_to_text
(
struct
bch_fs
*
,
char
*
,
size_t
,
struct
bkey_s_c
);
#define bch2_bkey_alloc_ops (struct bkey_ops) { \
.key_invalid = bch2_alloc_invalid, \
.val_to_text = bch2_alloc_to_text, \
}
struct
dev_alloc_list
{
struct
dev_alloc_list
{
unsigned
nr
;
unsigned
nr
;
u8
devs
[
BCH_SB_MEMBERS_MAX
];
u8
devs
[
BCH_SB_MEMBERS_MAX
];
...
@@ -30,16 +21,6 @@ struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *,
...
@@ -30,16 +21,6 @@ struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *,
void
bch2_wp_rescale
(
struct
bch_fs
*
,
struct
bch_dev
*
,
void
bch2_wp_rescale
(
struct
bch_fs
*
,
struct
bch_dev
*
,
struct
write_point
*
);
struct
write_point
*
);
int
bch2_alloc_read
(
struct
bch_fs
*
,
struct
list_head
*
);
int
bch2_alloc_replay_key
(
struct
bch_fs
*
,
struct
bpos
);
enum
bucket_alloc_ret
{
ALLOC_SUCCESS
=
0
,
OPEN_BUCKETS_EMPTY
=
-
1
,
FREELIST_EMPTY
=
-
2
,
/* Allocator thread not keeping up */
NO_DEVICES
=
-
3
,
/* -EROFS */
};
long
bch2_bucket_alloc_new_fs
(
struct
bch_dev
*
);
long
bch2_bucket_alloc_new_fs
(
struct
bch_dev
*
);
int
bch2_bucket_alloc
(
struct
bch_fs
*
,
struct
bch_dev
*
,
enum
alloc_reserve
,
bool
,
int
bch2_bucket_alloc
(
struct
bch_fs
*
,
struct
bch_dev
*
,
enum
alloc_reserve
,
bool
,
...
@@ -100,15 +81,19 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
...
@@ -100,15 +81,19 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
struct
bkey_i_extent
*
,
unsigned
);
struct
bkey_i_extent
*
,
unsigned
);
void
bch2_alloc_sectors_done
(
struct
bch_fs
*
,
struct
write_point
*
);
void
bch2_alloc_sectors_done
(
struct
bch_fs
*
,
struct
write_point
*
);
static
inline
void
bch2_wake_allocator
(
struct
bch_dev
*
ca
)
void
bch2_writepoint_stop
(
struct
bch_fs
*
,
struct
bch_dev
*
,
struct
write_point
*
);
void
bch2_writepoint_drop_ptrs
(
struct
bch_fs
*
,
struct
write_point
*
,
u16
,
bool
);
static
inline
struct
hlist_head
*
writepoint_hash
(
struct
bch_fs
*
c
,
unsigned
long
write_point
)
{
{
struct
task_struct
*
p
;
unsigned
hash
=
hash_long
(
write_point
,
ilog2
(
ARRAY_SIZE
(
c
->
write_points_hash
)));
rcu_read_lock
();
return
&
c
->
write_points_hash
[
hash
];
p
=
rcu_dereference
(
ca
->
alloc_thread
);
if
(
p
)
wake_up_process
(
p
);
rcu_read_unlock
();
}
}
static
inline
struct
write_point_specifier
writepoint_hashed
(
unsigned
long
v
)
static
inline
struct
write_point_specifier
writepoint_hashed
(
unsigned
long
v
)
...
@@ -121,14 +106,6 @@ static inline struct write_point_specifier writepoint_ptr(struct write_point *wp
...
@@ -121,14 +106,6 @@ static inline struct write_point_specifier writepoint_ptr(struct write_point *wp
return
(
struct
write_point_specifier
)
{
.
v
=
(
unsigned
long
)
wp
};
return
(
struct
write_point_specifier
)
{
.
v
=
(
unsigned
long
)
wp
};
}
}
void
bch2_recalc_capacity
(
struct
bch_fs
*
);
void
bch2_dev_allocator_remove
(
struct
bch_fs
*
,
struct
bch_dev
*
);
void
bch2_dev_allocator_add
(
struct
bch_fs
*
,
struct
bch_dev
*
);
void
bch2_dev_allocator_stop
(
struct
bch_dev
*
);
int
bch2_dev_allocator_start
(
struct
bch_dev
*
);
static
inline
void
writepoint_init
(
struct
write_point
*
wp
,
static
inline
void
writepoint_init
(
struct
write_point
*
wp
,
enum
bch_data_type
type
)
enum
bch_data_type
type
)
{
{
...
@@ -136,8 +113,4 @@ static inline void writepoint_init(struct write_point *wp,
...
@@ -136,8 +113,4 @@ static inline void writepoint_init(struct write_point *wp,
wp
->
type
=
type
;
wp
->
type
=
type
;
}
}
int
bch2_alloc_write
(
struct
bch_fs
*
);
#endif
/* _BCACHEFS_ALLOC_FOREGROUND_H */
int
bch2_fs_allocator_start
(
struct
bch_fs
*
);
void
bch2_fs_allocator_init
(
struct
bch_fs
*
);
#endif
/* _BCACHEFS_ALLOC_H */
fs/bcachefs/bkey_methods.c
View file @
7b3f84ea
...
@@ -3,7 +3,7 @@
...
@@ -3,7 +3,7 @@
#include "bcachefs.h"
#include "bcachefs.h"
#include "bkey_methods.h"
#include "bkey_methods.h"
#include "btree_types.h"
#include "btree_types.h"
#include "alloc.h"
#include "alloc
_background
.h"
#include "dirent.h"
#include "dirent.h"
#include "error.h"
#include "error.h"
#include "extents.h"
#include "extents.h"
...
...
fs/bcachefs/btree_gc.c
View file @
7b3f84ea
...
@@ -5,7 +5,7 @@
...
@@ -5,7 +5,7 @@
*/
*/
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_background
.h"
#include "bkey_methods.h"
#include "bkey_methods.h"
#include "btree_locking.h"
#include "btree_locking.h"
#include "btree_update_interior.h"
#include "btree_update_interior.h"
...
...
fs/bcachefs/btree_update_interior.c
View file @
7b3f84ea
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_foreground
.h"
#include "bkey_methods.h"
#include "bkey_methods.h"
#include "btree_cache.h"
#include "btree_cache.h"
#include "btree_gc.h"
#include "btree_gc.h"
...
...
fs/bcachefs/buckets.c
View file @
7b3f84ea
...
@@ -64,7 +64,7 @@
...
@@ -64,7 +64,7 @@
*/
*/
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_background
.h"
#include "btree_gc.h"
#include "btree_gc.h"
#include "buckets.h"
#include "buckets.h"
#include "error.h"
#include "error.h"
...
...
fs/bcachefs/chardev.c
View file @
7b3f84ea
...
@@ -2,7 +2,6 @@
...
@@ -2,7 +2,6 @@
#ifndef NO_BCACHEFS_CHARDEV
#ifndef NO_BCACHEFS_CHARDEV
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "bcachefs_ioctl.h"
#include "bcachefs_ioctl.h"
#include "buckets.h"
#include "buckets.h"
#include "chardev.h"
#include "chardev.h"
...
...
fs/bcachefs/fs-io.c
View file @
7b3f84ea
...
@@ -2,6 +2,7 @@
...
@@ -2,6 +2,7 @@
#ifndef NO_BCACHEFS_FS
#ifndef NO_BCACHEFS_FS
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc_foreground.h"
#include "btree_update.h"
#include "btree_update.h"
#include "buckets.h"
#include "buckets.h"
#include "clock.h"
#include "clock.h"
...
...
fs/bcachefs/io.c
View file @
7b3f84ea
...
@@ -7,7 +7,7 @@
...
@@ -7,7 +7,7 @@
*/
*/
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_foreground
.h"
#include "bset.h"
#include "bset.h"
#include "btree_update.h"
#include "btree_update.h"
#include "buckets.h"
#include "buckets.h"
...
...
fs/bcachefs/io.h
View file @
7b3f84ea
...
@@ -2,7 +2,6 @@
...
@@ -2,7 +2,6 @@
#ifndef _BCACHEFS_IO_H
#ifndef _BCACHEFS_IO_H
#define _BCACHEFS_IO_H
#define _BCACHEFS_IO_H
#include "alloc.h"
#include "checksum.h"
#include "checksum.h"
#include "io_types.h"
#include "io_types.h"
...
...
fs/bcachefs/journal.c
View file @
7b3f84ea
...
@@ -6,7 +6,7 @@
...
@@ -6,7 +6,7 @@
*/
*/
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_foreground
.h"
#include "bkey_methods.h"
#include "bkey_methods.h"
#include "btree_gc.h"
#include "btree_gc.h"
#include "buckets.h"
#include "buckets.h"
...
...
fs/bcachefs/journal_io.c
View file @
7b3f84ea
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_gc.h"
#include "btree_gc.h"
#include "btree_update.h"
#include "btree_update.h"
#include "buckets.h"
#include "buckets.h"
...
...
fs/bcachefs/move.c
View file @
7b3f84ea
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc_foreground.h"
#include "btree_gc.h"
#include "btree_gc.h"
#include "btree_update.h"
#include "btree_update.h"
#include "buckets.h"
#include "buckets.h"
...
...
fs/bcachefs/movinggc.c
View file @
7b3f84ea
...
@@ -6,6 +6,7 @@
...
@@ -6,6 +6,7 @@
*/
*/
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc_foreground.h"
#include "btree_iter.h"
#include "btree_iter.h"
#include "btree_update.h"
#include "btree_update.h"
#include "buckets.h"
#include "buckets.h"
...
...
fs/bcachefs/rebalance.c
View file @
7b3f84ea
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_foreground
.h"
#include "btree_iter.h"
#include "btree_iter.h"
#include "buckets.h"
#include "buckets.h"
#include "clock.h"
#include "clock.h"
...
...
fs/bcachefs/recovery.c
View file @
7b3f84ea
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_background
.h"
#include "btree_gc.h"
#include "btree_gc.h"
#include "btree_update.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_update_interior.h"
...
...
fs/bcachefs/super.c
View file @
7b3f84ea
...
@@ -8,7 +8,8 @@
...
@@ -8,7 +8,8 @@
*/
*/
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "btree_cache.h"
#include "btree_cache.h"
#include "btree_gc.h"
#include "btree_gc.h"
#include "btree_update_interior.h"
#include "btree_update_interior.h"
...
...
fs/bcachefs/sysfs.c
View file @
7b3f84ea
...
@@ -9,7 +9,7 @@
...
@@ -9,7 +9,7 @@
#ifndef NO_BCACHEFS_SYSFS
#ifndef NO_BCACHEFS_SYSFS
#include "bcachefs.h"
#include "bcachefs.h"
#include "alloc.h"
#include "alloc
_background
.h"
#include "compress.h"
#include "compress.h"
#include "sysfs.h"
#include "sysfs.h"
#include "btree_cache.h"
#include "btree_cache.h"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment