Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
23f80d2b
Commit
23f80d2b
authored
Dec 17, 2018
by
Kent Overstreet
Committed by
Kent Overstreet
Oct 22, 2023
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: Factor out acc_u64s()
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
e47c0171
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
79 additions
and
169 deletions
+79
-169
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.c
+49
-58
fs/bcachefs/buckets.c
fs/bcachefs/buckets.c
+13
-109
fs/bcachefs/buckets.h
fs/bcachefs/buckets.h
+0
-2
fs/bcachefs/util.h
fs/bcachefs/util.h
+17
-0
No files found.
fs/bcachefs/btree_gc.c
View file @
23f80d2b
...
...
@@ -482,29 +482,35 @@ static void bch2_gc_free(struct bch_fs *c)
c
->
usage
[
1
]
=
NULL
;
}
static
void
fs_usage_reset
(
struct
bch_fs_usage
*
fs_usage
)
/*
* Accumulate percpu counters onto one cpu's copy - only valid when access
* against any percpu counter is guarded against
*/
static
u64
*
acc_percpu_u64s
(
u64
__percpu
*
p
,
unsigned
nr
)
{
unsigned
offset
=
offsetof
(
typeof
(
*
fs_usage
),
s
.
gc_start
);
u64
*
ret
;
int
cpu
;
memset
((
void
*
)
fs_usage
+
offset
,
0
,
sizeof
(
*
fs_usage
)
-
offset
);
}
preempt_disable
();
ret
=
this_cpu_ptr
(
p
);
preempt_enable
();
static
void
fs_usage_cpy
(
struct
bch_fs_usage
*
dst
,
struct
bch_fs_usage
*
src
)
{
unsigned
offset
=
offsetof
(
typeof
(
*
dst
),
s
.
gc_start
);
for_each_possible_cpu
(
cpu
)
{
u64
*
i
=
per_cpu_ptr
(
p
,
cpu
);
memcpy
((
void
*
)
dst
+
offset
,
(
void
*
)
src
+
offset
,
sizeof
(
*
dst
)
-
offset
);
if
(
i
!=
ret
)
{
acc_u64s
(
ret
,
i
,
nr
);
memset
(
i
,
0
,
nr
*
sizeof
(
u64
));
}
}
return
ret
;
}
static
void
bch2_gc_done_nocheck
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
int
cpu
;
{
struct
genradix_iter
dst_iter
=
genradix_iter_init
(
&
c
->
stripes
[
0
],
0
);
...
...
@@ -534,42 +540,39 @@ static void bch2_gc_done_nocheck(struct bch_fs *c)
};
for_each_member_device
(
ca
,
c
,
i
)
{
struct
bch_dev_usage
*
p
;
for_each_possible_cpu
(
cpu
)
{
p
=
per_cpu_ptr
(
ca
->
usage
[
0
],
cpu
);
memset
(
p
,
0
,
sizeof
(
*
p
));
}
unsigned
nr
=
sizeof
(
struct
bch_dev_usage
)
/
sizeof
(
u64
);
struct
bch_dev_usage
*
dst
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
ca
->
usage
[
0
],
nr
);
struct
bch_dev_usage
*
src
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
ca
->
usage
[
1
],
nr
);
preempt_disable
();
*
this_cpu_ptr
(
ca
->
usage
[
0
])
=
__bch2_dev_usage_read
(
ca
,
1
);
preempt_enable
();
*
dst
=
*
src
;
}
{
struct
bch_fs_usage
src
=
__bch2_fs_usage_read
(
c
,
1
);
for_each_possible_cpu
(
cpu
)
fs_usage_reset
(
per_cpu_ptr
(
c
->
usage
[
0
],
cpu
));
preempt_disable
();
fs_usage_cpy
(
this_cpu_ptr
(
c
->
usage
[
0
]),
&
src
);
preempt_enable
();
unsigned
nr
=
sizeof
(
struct
bch_fs_usage
)
/
sizeof
(
u64
);
struct
bch_fs_usage
*
dst
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
c
->
usage
[
0
],
nr
);
struct
bch_fs_usage
*
src
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
c
->
usage
[
1
],
nr
);
unsigned
offset
=
offsetof
(
typeof
(
*
dst
),
s
.
gc_start
);
memcpy
((
void
*
)
dst
+
offset
,
(
void
*
)
src
+
offset
,
sizeof
(
*
dst
)
-
offset
);
}
}
static
void
bch2_gc_done
(
struct
bch_fs
*
c
,
bool
initial
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
int
cpu
;
#define copy_field(_f, _msg, ...) \
if (dst
._f != src._f) {
\
bch_err(c, _msg ": got %llu, should be %llu, fixing"\
, ##__VA_ARGS__, dst
._f, src.
_f); \
dst
._f = src.
_f; \
if (dst
->_f != src->_f) {
\
bch_err(c, _msg ": got %llu, should be %llu, fixing"
\
, ##__VA_ARGS__, dst
->_f, src->
_f); \
dst
->_f = src->
_f; \
}
#define copy_stripe_field(_f, _msg, ...) \
if (dst->_f != src->_f) { \
...
...
@@ -650,9 +653,11 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
};
for_each_member_device
(
ca
,
c
,
i
)
{
struct
bch_dev_usage
dst
=
__bch2_dev_usage_read
(
ca
,
0
);
struct
bch_dev_usage
src
=
__bch2_dev_usage_read
(
ca
,
1
);
struct
bch_dev_usage
*
p
;
unsigned
nr
=
sizeof
(
struct
bch_dev_usage
)
/
sizeof
(
u64
);
struct
bch_dev_usage
*
dst
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
ca
->
usage
[
0
],
nr
);
struct
bch_dev_usage
*
src
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
ca
->
usage
[
1
],
nr
);
unsigned
b
;
for
(
b
=
0
;
b
<
BCH_DATA_NR
;
b
++
)
...
...
@@ -666,21 +671,14 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
"sectors[%s]"
,
bch2_data_types
[
b
]);
copy_dev_field
(
sectors_fragmented
,
"sectors_fragmented"
);
for_each_possible_cpu
(
cpu
)
{
p
=
per_cpu_ptr
(
ca
->
usage
[
0
],
cpu
);
memset
(
p
,
0
,
sizeof
(
*
p
));
}
preempt_disable
();
p
=
this_cpu_ptr
(
ca
->
usage
[
0
]);
*
p
=
dst
;
preempt_enable
();
}
{
struct
bch_fs_usage
dst
=
__bch2_fs_usage_read
(
c
,
0
);
struct
bch_fs_usage
src
=
__bch2_fs_usage_read
(
c
,
1
);
unsigned
nr
=
sizeof
(
struct
bch_fs_usage
)
/
sizeof
(
u64
);
struct
bch_fs_usage
*
dst
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
c
->
usage
[
0
],
nr
);
struct
bch_fs_usage
*
src
=
(
void
*
)
acc_percpu_u64s
((
void
*
)
c
->
usage
[
1
],
nr
);
unsigned
r
,
b
;
copy_fs_field
(
s
.
hidden
,
"hidden"
);
...
...
@@ -703,13 +701,6 @@ static void bch2_gc_done(struct bch_fs *c, bool initial)
for
(
b
=
0
;
b
<
BCH_DATA_NR
;
b
++
)
copy_fs_field
(
buckets
[
b
],
"buckets[%s]"
,
bch2_data_types
[
b
]);
for_each_possible_cpu
(
cpu
)
fs_usage_reset
(
per_cpu_ptr
(
c
->
usage
[
0
],
cpu
));
preempt_disable
();
fs_usage_cpy
(
this_cpu_ptr
(
c
->
usage
[
0
]),
&
dst
);
preempt_enable
();
}
out:
percpu_up_write
(
&
c
->
mark_lock
);
...
...
fs/bcachefs/buckets.c
View file @
23f80d2b
...
...
@@ -78,77 +78,6 @@
static
inline
u64
__bch2_fs_sectors_used
(
struct
bch_fs
*
,
struct
bch_fs_usage
);
#ifdef DEBUG_BUCKETS
#define lg_local_lock lg_global_lock
#define lg_local_unlock lg_global_unlock
static
void
bch2_fs_stats_verify
(
struct
bch_fs
*
c
)
{
struct
bch_fs_usage
stats
=
_bch2_fs_usage_read
(
c
);
unsigned
i
,
j
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
stats
.
replicas
);
i
++
)
{
for
(
j
=
0
;
j
<
ARRAY_SIZE
(
stats
.
replicas
[
i
].
data
);
j
++
)
if
((
s64
)
stats
.
replicas
[
i
].
data
[
j
]
<
0
)
panic
(
"replicas %u %s sectors underflow: %lli
\n
"
,
i
+
1
,
bch_data_types
[
j
],
stats
.
replicas
[
i
].
data
[
j
]);
if
((
s64
)
stats
.
replicas
[
i
].
persistent_reserved
<
0
)
panic
(
"replicas %u reserved underflow: %lli
\n
"
,
i
+
1
,
stats
.
replicas
[
i
].
persistent_reserved
);
}
for
(
j
=
0
;
j
<
ARRAY_SIZE
(
stats
.
buckets
);
j
++
)
if
((
s64
)
stats
.
replicas
[
i
].
data_buckets
[
j
]
<
0
)
panic
(
"%s buckets underflow: %lli
\n
"
,
bch_data_types
[
j
],
stats
.
buckets
[
j
]);
if
((
s64
)
stats
.
s
.
online_reserved
<
0
)
panic
(
"sectors_online_reserved underflow: %lli
\n
"
,
stats
.
s
.
online_reserved
);
}
static
void
bch2_dev_stats_verify
(
struct
bch_dev
*
ca
)
{
struct
bch_dev_usage
stats
=
__bch2_dev_usage_read
(
ca
);
u64
n
=
ca
->
mi
.
nbuckets
-
ca
->
mi
.
first_bucket
;
unsigned
i
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
stats
.
buckets
);
i
++
)
BUG_ON
(
stats
.
buckets
[
i
]
>
n
);
BUG_ON
(
stats
.
buckets_alloc
>
n
);
BUG_ON
(
stats
.
buckets_unavailable
>
n
);
}
static
void
bch2_disk_reservations_verify
(
struct
bch_fs
*
c
,
int
flags
)
{
if
(
!
(
flags
&
BCH_DISK_RESERVATION_NOFAIL
))
{
u64
used
=
__bch2_fs_sectors_used
(
c
);
u64
cached
=
0
;
u64
avail
=
atomic64_read
(
&
c
->
sectors_available
);
int
cpu
;
for_each_possible_cpu
(
cpu
)
cached
+=
per_cpu_ptr
(
c
->
usage_percpu
,
cpu
)
->
available_cache
;
if
(
used
+
avail
+
cached
>
c
->
capacity
)
panic
(
"used %llu avail %llu cached %llu capacity %llu
\n
"
,
used
,
avail
,
cached
,
c
->
capacity
);
}
}
#else
static
void
bch2_fs_stats_verify
(
struct
bch_fs
*
c
)
{}
static
void
bch2_dev_stats_verify
(
struct
bch_dev
*
ca
)
{}
static
void
bch2_disk_reservations_verify
(
struct
bch_fs
*
c
,
int
flags
)
{}
#endif
/*
* Clear journal_seq_valid for buckets for which it's not needed, to prevent
* wraparound:
...
...
@@ -186,43 +115,23 @@ void bch2_bucket_seq_cleanup(struct bch_fs *c)
}
}
#define bch2_usage_add(_acc, _stats) \
do { \
typeof(_acc) _a = (_acc), _s = (_stats); \
unsigned i; \
\
for (i = 0; i < sizeof(*_a) / sizeof(u64); i++) \
((u64 *) (_a))[i] += ((u64 *) (_s))[i]; \
} while (0)
#define bch2_usage_read_raw(_stats) \
({ \
typeof(*this_cpu_ptr(_stats)) _acc; \
int cpu; \
\
memset(&_acc, 0, sizeof(_acc)); \
\
for_each_possible_cpu(cpu)
\
bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu));
\
acc_u64s_percpu((u64 *) &_acc,
\
(u64 __percpu *) _stats,
\
sizeof(_acc) / sizeof(u64));
\
\
_acc; \
})
struct
bch_dev_usage
__bch2_dev_usage_read
(
struct
bch_dev
*
ca
,
bool
gc
)
{
return
bch2_usage_read_raw
(
ca
->
usage
[
gc
]);
}
struct
bch_dev_usage
bch2_dev_usage_read
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
)
{
return
bch2_usage_read_raw
(
ca
->
usage
[
0
]);
}
struct
bch_fs_usage
__bch2_fs_usage_read
(
struct
bch_fs
*
c
,
bool
gc
)
{
return
bch2_usage_read_raw
(
c
->
usage
[
gc
]);
}
struct
bch_fs_usage
bch2_fs_usage_read
(
struct
bch_fs
*
c
)
{
return
bch2_usage_read_raw
(
c
->
usage
[
0
]);
...
...
@@ -326,12 +235,16 @@ void bch2_fs_usage_apply(struct bch_fs *c,
}
preempt_disable
();
bch2_usage_add
(
this_cpu_ptr
(
c
->
usage
[
0
]),
fs_usage
);
if
(
gc_visited
(
c
,
gc_pos
))
bch2_usage_add
(
this_cpu_ptr
(
c
->
usage
[
1
]),
fs_usage
);
bch2_fs_stats_verify
(
c
);
acc_u64s
((
u64
*
)
this_cpu_ptr
(
c
->
usage
[
0
]),
(
u64
*
)
fs_usage
,
sizeof
(
*
fs_usage
)
/
sizeof
(
u64
));
if
(
gc_visited
(
c
,
gc_pos
))
{
BUG_ON
(
!
c
->
usage
[
1
]);
acc_u64s
((
u64
*
)
this_cpu_ptr
(
c
->
usage
[
1
]),
(
u64
*
)
fs_usage
,
sizeof
(
*
fs_usage
)
/
sizeof
(
u64
));
}
preempt_enable
();
memset
(
fs_usage
,
0
,
sizeof
(
*
fs_usage
));
...
...
@@ -392,8 +305,6 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
if
(
!
is_available_bucket
(
old
)
&&
is_available_bucket
(
new
))
bch2_wake_allocator
(
ca
);
bch2_dev_stats_verify
(
ca
);
}
void
bch2_dev_usage_from_buckets
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
)
...
...
@@ -1011,8 +922,6 @@ void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
percpu_down_read
(
&
c
->
mark_lock
);
this_cpu_sub
(
c
->
usage
[
0
]
->
s
.
online_reserved
,
res
->
sectors
);
bch2_fs_stats_verify
(
c
);
percpu_up_read
(
&
c
->
mark_lock
);
res
->
sectors
=
0
;
...
...
@@ -1055,8 +964,6 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
this_cpu_add
(
c
->
usage
[
0
]
->
s
.
online_reserved
,
sectors
);
res
->
sectors
+=
sectors
;
bch2_disk_reservations_verify
(
c
,
flags
);
bch2_fs_stats_verify
(
c
);
preempt_enable
();
percpu_up_read
(
&
c
->
mark_lock
);
return
0
;
...
...
@@ -1089,14 +996,11 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
this_cpu_add
(
c
->
usage
[
0
]
->
s
.
online_reserved
,
sectors
);
res
->
sectors
+=
sectors
;
ret
=
0
;
bch2_disk_reservations_verify
(
c
,
flags
);
}
else
{
atomic64_set
(
&
c
->
sectors_available
,
sectors_available
);
ret
=
-
ENOSPC
;
}
bch2_fs_stats_verify
(
c
);
percpu_up_write
(
&
c
->
mark_lock
);
if
(
!
(
flags
&
BCH_DISK_RESERVATION_GC_LOCK_HELD
))
...
...
fs/bcachefs/buckets.h
View file @
23f80d2b
...
...
@@ -180,7 +180,6 @@ static inline bool bucket_needs_journal_commit(struct bucket_mark m,
/* Device usage: */
struct
bch_dev_usage
__bch2_dev_usage_read
(
struct
bch_dev
*
,
bool
);
struct
bch_dev_usage
bch2_dev_usage_read
(
struct
bch_fs
*
,
struct
bch_dev
*
);
static
inline
u64
__dev_buckets_available
(
struct
bch_dev
*
ca
,
...
...
@@ -219,7 +218,6 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
/* Filesystem usage: */
struct
bch_fs_usage
__bch2_fs_usage_read
(
struct
bch_fs
*
,
bool
);
struct
bch_fs_usage
bch2_fs_usage_read
(
struct
bch_fs
*
);
u64
bch2_fs_sectors_used
(
struct
bch_fs
*
,
struct
bch_fs_usage
);
...
...
fs/bcachefs/util.h
View file @
23f80d2b
...
...
@@ -700,4 +700,21 @@ do { \
} \
} while (0)
static
inline
void
acc_u64s
(
u64
*
acc
,
const
u64
*
src
,
unsigned
nr
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
nr
;
i
++
)
acc
[
i
]
+=
src
[
i
];
}
static
inline
void
acc_u64s_percpu
(
u64
*
acc
,
const
u64
__percpu
*
src
,
unsigned
nr
)
{
int
cpu
;
for_each_possible_cpu
(
cpu
)
acc_u64s
(
acc
,
per_cpu_ptr
(
src
,
cpu
),
nr
);
}
#endif
/* _BCACHEFS_UTIL_H */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment