btree_update.h 5.61 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_BTREE_UPDATE_H
#define _BCACHEFS_BTREE_UPDATE_H

#include "btree_iter.h"
#include "journal.h"

struct bch_fs;
struct btree;

void bch2_btree_node_lock_for_insert(struct bch_fs *, struct btree *,
				     struct btree_iter *);
bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
				struct btree_node_iter *, struct bkey_i *);
15
void bch2_btree_journal_key(struct btree_trans *, struct btree_iter *,
16 17
			    struct bkey_i *);

18 19 20 21 22
void bch2_deferred_update_free(struct bch_fs *,
			       struct deferred_update *);
struct deferred_update *
bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned);

23 24 25 26 27 28
#define BTREE_INSERT_ENTRY(_iter, _k)					\
	((struct btree_insert_entry) {					\
		.iter		= (_iter),				\
		.k		= (_k),					\
	})

29 30 31 32 33 34 35
#define BTREE_INSERT_DEFERRED(_d, _k)					\
	((struct btree_insert_entry) {					\
		.k		= (_k),					\
		.d		= (_d),					\
		.deferred	= true,					\
	})

36 37 38 39
enum {
	__BTREE_INSERT_ATOMIC,
	__BTREE_INSERT_NOUNLOCK,
	__BTREE_INSERT_NOFAIL,
40
	__BTREE_INSERT_NOCHECK_RW,
Kent Overstreet's avatar
Kent Overstreet committed
41
	__BTREE_INSERT_LAZY_RW,
42 43 44
	__BTREE_INSERT_USE_RESERVE,
	__BTREE_INSERT_USE_ALLOC_RESERVE,
	__BTREE_INSERT_JOURNAL_REPLAY,
45
	__BTREE_INSERT_JOURNAL_RESERVED,
46
	__BTREE_INSERT_NOMARK_INSERT,
47
	__BTREE_INSERT_NOMARK_OVERWRITES,
48
	__BTREE_INSERT_NOMARK,
49 50
	__BTREE_INSERT_MARK_INMEM,
	__BTREE_INSERT_NO_CLEAR_REPLICAS,
51
	__BTREE_INSERT_BUCKET_INVALIDATE,
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
	__BTREE_INSERT_NOWAIT,
	__BTREE_INSERT_GC_LOCK_HELD,
	__BCH_HASH_SET_MUST_CREATE,
	__BCH_HASH_SET_MUST_REPLACE,
};

/*
 * Don't drop/retake locks before doing btree update, instead return -EINTR if
 * we had to drop locks for any reason
 */
#define BTREE_INSERT_ATOMIC		(1 << __BTREE_INSERT_ATOMIC)

/*
 * Don't drop locks _after_ successfully updating btree:
 */
#define BTREE_INSERT_NOUNLOCK		(1 << __BTREE_INSERT_NOUNLOCK)

/* Don't check for -ENOSPC: */
#define BTREE_INSERT_NOFAIL		(1 << __BTREE_INSERT_NOFAIL)

72
#define BTREE_INSERT_NOCHECK_RW		(1 << __BTREE_INSERT_NOCHECK_RW)
Kent Overstreet's avatar
Kent Overstreet committed
73
#define BTREE_INSERT_LAZY_RW		(1 << __BTREE_INSERT_LAZY_RW)
74

75 76 77 78
/* for copygc, or when merging btree nodes */
#define BTREE_INSERT_USE_RESERVE	(1 << __BTREE_INSERT_USE_RESERVE)
#define BTREE_INSERT_USE_ALLOC_RESERVE	(1 << __BTREE_INSERT_USE_ALLOC_RESERVE)

79
/* Insert is for journal replay - don't get journal reservations: */
80 81
#define BTREE_INSERT_JOURNAL_REPLAY	(1 << __BTREE_INSERT_JOURNAL_REPLAY)

82 83
#define BTREE_INSERT_JOURNAL_RESERVED	(1 << __BTREE_INSERT_JOURNAL_RESERVED)

84 85 86
/* Don't mark new key, just overwrites: */
#define BTREE_INSERT_NOMARK_INSERT	(1 << __BTREE_INSERT_NOMARK_INSERT)

87 88 89
/* Don't mark overwrites, just new key: */
#define BTREE_INSERT_NOMARK_OVERWRITES	(1 << __BTREE_INSERT_NOMARK_OVERWRITES)

90
/* Don't call mark new key at all: */
91 92
#define BTREE_INSERT_NOMARK		(1 << __BTREE_INSERT_NOMARK)

93 94 95 96 97
/* Don't mark transactionally: */
#define BTREE_INSERT_MARK_INMEM		(1 << __BTREE_INSERT_MARK_INMEM)

#define BTREE_INSERT_NO_CLEAR_REPLICAS	(1 << __BTREE_INSERT_NO_CLEAR_REPLICAS)

98 99
#define BTREE_INSERT_BUCKET_INVALIDATE	(1 << __BTREE_INSERT_BUCKET_INVALIDATE)

100 101 102 103 104 105 106
/* Don't block on allocation failure (for new btree nodes: */
#define BTREE_INSERT_NOWAIT		(1 << __BTREE_INSERT_NOWAIT)
#define BTREE_INSERT_GC_LOCK_HELD	(1 << __BTREE_INSERT_GC_LOCK_HELD)

#define BCH_HASH_SET_MUST_CREATE	(1 << __BCH_HASH_SET_MUST_CREATE)
#define BCH_HASH_SET_MUST_REPLACE	(1 << __BCH_HASH_SET_MUST_REPLACE)

107
int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
108 109

int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
110
		     struct disk_reservation *, u64 *, int flags);
111

112 113
int bch2_btree_delete_at_range(struct btree_trans *, struct btree_iter *,
			       struct bpos, u64 *);
114
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
115
			    struct bpos, struct bpos, u64 *);
116 117 118 119

int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
			    __le64, unsigned);
int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
120
			       struct btree *, struct bkey_i_btree_ptr *);
121 122 123 124 125

int bch2_trans_commit(struct btree_trans *,
		      struct disk_reservation *,
		      u64 *, unsigned);

126 127 128
struct btree_insert_entry *bch2_trans_update(struct btree_trans *,
					     struct btree_insert_entry);

129 130 131 132 133
#define bch2_trans_do(_c, _journal_seq, _flags, _do)			\
({									\
	struct btree_trans trans;					\
	int _ret;							\
									\
134
	bch2_trans_init(&trans, (_c), 0, 0);				\
135 136 137 138
									\
	do {								\
		bch2_trans_begin(&trans);				\
									\
139
		_ret = (_do) ?:	bch2_trans_commit(&trans, NULL,		\
140 141 142 143 144 145 146
					(_journal_seq), (_flags));	\
	} while (_ret == -EINTR);					\
									\
	bch2_trans_exit(&trans);					\
	_ret;								\
})

147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
/*
 * We sort transaction entries so that if multiple iterators point to the same
 * leaf node they'll be adjacent:
 */
static inline bool same_leaf_as_prev(struct btree_trans *trans,
				     struct btree_insert_entry *i)
{
	return i != trans->updates &&
		!i->deferred &&
		i[0].iter->l[0].b == i[-1].iter->l[0].b;
}

#define __trans_next_update(_trans, _i, _filter)			\
({									\
	while ((_i) < (_trans)->updates + (_trans->nr_updates) && !(_filter))\
		(_i)++;							\
									\
	(_i) < (_trans)->updates + (_trans->nr_updates);		\
})

#define __trans_for_each_update(_trans, _i, _filter)			\
	for ((_i) = (_trans)->updates;					\
	     __trans_next_update(_trans, _i, _filter);			\
	     (_i)++)

#define trans_for_each_update(trans, i)					\
	__trans_for_each_update(trans, i, true)

#define trans_for_each_update_iter(trans, i)				\
	__trans_for_each_update(trans, i, !(i)->deferred)

#define trans_for_each_update_leaf(trans, i)				\
	__trans_for_each_update(trans, i, !(i)->deferred &&		\
			       !same_leaf_as_prev(trans, i))

182
#endif /* _BCACHEFS_BTREE_UPDATE_H */