btree_iter.h 9.29 KB
Newer Older
1 2 3 4
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_ITER_H
#define _BCACHEFS_BTREE_ITER_H

5
#include "bset.h"
6 7 8 9 10 11 12 13 14 15 16 17 18 19
#include "btree_types.h"

static inline void btree_iter_set_dirty(struct btree_iter *iter,
					enum btree_iter_uptodate u)
{
	iter->uptodate = max_t(unsigned, iter->uptodate, u);
}

static inline struct btree *btree_iter_node(struct btree_iter *iter,
					    unsigned level)
{
	return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
}

20 21 22 23 24 25 26 27 28 29 30 31 32
static inline bool btree_node_lock_seq_matches(const struct btree_iter *iter,
					const struct btree *b, unsigned level)
{
	/*
	 * We don't compare the low bits of the lock sequence numbers because
	 * @iter might have taken a write lock on @b, and we don't want to skip
	 * the linked iterator if the sequence numbers were equal before taking
	 * that write lock. The lock sequence number is incremented by taking
	 * and releasing write locks and is even when unlocked:
	 */
	return iter->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
}

33 34 35
static inline struct btree *btree_node_parent(struct btree_iter *iter,
					      struct btree *b)
{
36
	return btree_iter_node(iter, b->c.level + 1);
37 38
}

39 40 41 42 43 44 45 46 47 48 49 50
static inline bool btree_trans_has_multiple_iters(const struct btree_trans *trans)
{
	return hweight64(trans->iters_linked) > 1;
}

static inline int btree_iter_err(const struct btree_iter *iter)
{
	return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
}

/* Iterate over iters within a transaction: */

51 52 53 54 55
#define trans_for_each_iter_all(_trans, _iter)				\
	for (_iter = (_trans)->iters;					\
	     _iter < (_trans)->iters + (_trans)->nr_iters;		\
	     _iter++)

56
static inline struct btree_iter *
Kent Overstreet's avatar
Kent Overstreet committed
57
__trans_next_iter(struct btree_trans *trans, unsigned idx)
58
{
Kent Overstreet's avatar
Kent Overstreet committed
59
	EBUG_ON(idx < trans->nr_iters && trans->iters[idx].idx != idx);
60

Kent Overstreet's avatar
Kent Overstreet committed
61
	for (; idx < trans->nr_iters; idx++)
62 63 64 65 66 67 68
		if (trans->iters_linked & (1ULL << idx))
			return &trans->iters[idx];

	return NULL;
}

#define trans_for_each_iter(_trans, _iter)				\
Kent Overstreet's avatar
Kent Overstreet committed
69 70 71
	for (_iter = __trans_next_iter((_trans), 0);			\
	     (_iter);							\
	     _iter = __trans_next_iter((_trans), (_iter)->idx + 1))
72

73 74 75
static inline bool __iter_has_node(const struct btree_iter *iter,
				   const struct btree *b)
{
76 77
	return iter->l[b->c.level].b == b &&
		btree_node_lock_seq_matches(iter, b, b->c.level);
78 79 80
}

static inline struct btree_iter *
81
__trans_next_iter_with_node(struct btree_trans *trans, struct btree *b,
Kent Overstreet's avatar
Kent Overstreet committed
82
			    unsigned idx)
83
{
84
	struct btree_iter *iter = __trans_next_iter(trans, idx);
85

86 87
	while (iter && !__iter_has_node(iter, b))
		iter = __trans_next_iter(trans, iter->idx + 1);
88

89
	return iter;
90 91
}

92
#define trans_for_each_iter_with_node(_trans, _b, _iter)		\
Kent Overstreet's avatar
Kent Overstreet committed
93 94 95 96
	for (_iter = __trans_next_iter_with_node((_trans), (_b), 0);	\
	     (_iter);							\
	     _iter = __trans_next_iter_with_node((_trans), (_b),	\
						 (_iter)->idx + 1))
97 98 99

#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
100
void bch2_btree_trans_verify_locks(struct btree_trans *);
101 102 103
#else
static inline void bch2_btree_iter_verify(struct btree_iter *iter,
					  struct btree *b) {}
104
static inline void bch2_btree_trans_verify_locks(struct btree_trans *iter) {}
105 106
#endif

107 108
void bch2_btree_iter_fix_key_modified(struct btree_iter *, struct btree *,
					   struct bkey_packed *);
109
void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
110 111
			      struct btree_node_iter *, struct bkey_packed *,
			      unsigned, unsigned);
112

113 114
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
115 116 117 118 119

bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *, unsigned);

static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
120
					   unsigned new_locks_want)
121 122 123 124
{
	new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);

	return iter->locks_want < new_locks_want
125
		? (!iter->trans->nounlock
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
		   ? __bch2_btree_iter_upgrade(iter, new_locks_want)
		   : __bch2_btree_iter_upgrade_nounlock(iter, new_locks_want))
		: iter->uptodate <= BTREE_ITER_NEED_PEEK;
}

void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);

static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
{
	if (iter->locks_want > (iter->flags & BTREE_ITER_INTENT) ? 1 : 0)
		__bch2_btree_iter_downgrade(iter, 0);
}

void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);

void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);

144 145 146 147 148 149 150 151 152 153
int __must_check __bch2_btree_iter_traverse(struct btree_iter *);

static inline int __must_check
bch2_btree_iter_traverse(struct btree_iter *iter)
{
	return iter->uptodate >= BTREE_ITER_NEED_RELOCK
		? __bch2_btree_iter_traverse(iter)
		: 0;
}

154
int bch2_btree_iter_traverse_all(struct btree_trans *);
155 156 157 158 159 160

struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);

struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
161 162

struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
163 164 165 166 167 168
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);

struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);

void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
169
void __bch2_btree_iter_set_pos(struct btree_iter *, struct bpos, bool);
170 171 172 173 174 175 176 177
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);

static inline struct bpos btree_type_successor(enum btree_id id,
					       struct bpos pos)
{
	if (id == BTREE_ID_INODES) {
		pos.inode++;
		pos.offset = 0;
178
	} else if (!btree_node_type_is_extents(id)) {
179 180 181 182 183 184 185 186 187 188 189 190
		pos = bkey_successor(pos);
	}

	return pos;
}

static inline struct bpos btree_type_predecessor(enum btree_id id,
					       struct bpos pos)
{
	if (id == BTREE_ID_INODES) {
		--pos.inode;
		pos.offset = 0;
191
	} else {
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
		pos = bkey_predecessor(pos);
	}

	return pos;
}

static inline int __btree_iter_cmp(enum btree_id id,
				   struct bpos pos,
				   const struct btree_iter *r)
{
	if (id != r->btree_id)
		return id < r->btree_id ? -1 : 1;
	return bkey_cmp(pos, r->pos);
}

static inline int btree_iter_cmp(const struct btree_iter *l,
				 const struct btree_iter *r)
{
	return __btree_iter_cmp(l->btree_id, l->pos, r);
}

/*
 * Unlocks before scheduling
 * Note: does not revalidate iterator
 */
217
static inline int bch2_trans_cond_resched(struct btree_trans *trans)
218
{
219
	if (need_resched() || race_fault()) {
220
		bch2_trans_unlock(trans);
221
		schedule();
222 223 224
		return bch2_trans_relock(trans) ? 0 : -EINTR;
	} else {
		return 0;
225 226 227
	}
}

228
#define __for_each_btree_node(_trans, _iter, _btree_id, _start,	\
229
			      _locks_want, _depth, _flags, _b)		\
230 231
	for (iter = bch2_trans_get_node_iter((_trans), (_btree_id),	\
				_start, _locks_want, _depth, _flags),	\
232 233 234 235
	     _b = bch2_btree_iter_peek_node(_iter);			\
	     (_b);							\
	     (_b) = bch2_btree_iter_next_node(_iter, _depth))

236 237 238 239
#define for_each_btree_node(_trans, _iter, _btree_id, _start,		\
			    _flags, _b)					\
	__for_each_btree_node(_trans, _iter, _btree_id, _start,		\
			      0, 0, _flags, _b)
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
						     unsigned flags)
{
	return flags & BTREE_ITER_SLOTS
		? bch2_btree_iter_peek_slot(iter)
		: bch2_btree_iter_peek(iter);
}

static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
						     unsigned flags)
{
	return flags & BTREE_ITER_SLOTS
		? bch2_btree_iter_next_slot(iter)
		: bch2_btree_iter_next(iter);
}

257 258 259 260 261
static inline int bkey_err(struct bkey_s_c k)
{
	return PTR_ERR_OR_ZERO(k.k);
}

262 263 264 265 266 267 268
#define for_each_btree_key(_trans, _iter, _btree_id,			\
			   _start, _flags, _k, _ret)			\
	for ((_ret) = PTR_ERR_OR_ZERO((_iter) =				\
			bch2_trans_get_iter((_trans), (_btree_id),	\
					    (_start), (_flags))) ?:	\
		      PTR_ERR_OR_ZERO(((_k) =				\
			__bch2_btree_iter_peek(_iter, _flags)).k);	\
269
	     !_ret && (_k).k;						\
270 271
	     (_ret) = PTR_ERR_OR_ZERO(((_k) =				\
			__bch2_btree_iter_next(_iter, _flags)).k))
272

273
#define for_each_btree_key_continue(_iter, _flags, _k, _ret)		\
274
	for ((_k) = __bch2_btree_iter_peek(_iter, _flags);		\
275
	     !((_ret) = bkey_err(_k)) && (_k).k;			\
276 277 278 279
	     (_k) = __bch2_btree_iter_next(_iter, _flags))

/* new multiple iterator interface: */

280 281
int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
282

283
void bch2_trans_unlink_iters(struct btree_trans *);
284

285 286
struct btree_iter *bch2_trans_get_iter(struct btree_trans *, enum btree_id,
				       struct bpos, unsigned);
287 288
struct btree_iter *bch2_trans_copy_iter(struct btree_trans *,
					struct btree_iter *);
289 290 291
struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
				enum btree_id, struct bpos,
				unsigned, unsigned, unsigned);
292

293 294
#define TRANS_RESET_ITERS		(1 << 0)
#define TRANS_RESET_MEM			(1 << 1)
295
#define TRANS_RESET_NOTRAVERSE		(1 << 2)
296

297
void bch2_trans_reset(struct btree_trans *, unsigned);
298

299
static inline void bch2_trans_begin(struct btree_trans *trans)
300
{
301
	return bch2_trans_reset(trans, TRANS_RESET_ITERS|TRANS_RESET_MEM);
302 303 304
}

void *bch2_trans_kmalloc(struct btree_trans *, size_t);
305
void bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned, size_t);
306 307
int bch2_trans_exit(struct btree_trans *);

308 309 310
void bch2_fs_btree_iter_exit(struct bch_fs *);
int bch2_fs_btree_iter_init(struct bch_fs *);

311
#endif /* _BCACHEFS_BTREE_ITER_H */