1  // SPDX-License-Identifier: GPL-2.0
2  
3  #include "bcachefs.h"
4  #include "btree_update.h"
5  #include "btree_iter.h"
6  #include "btree_journal_iter.h"
7  #include "btree_locking.h"
8  #include "buckets.h"
9  #include "debug.h"
10  #include "errcode.h"
11  #include "error.h"
12  #include "extents.h"
13  #include "keylist.h"
14  #include "snapshot.h"
15  #include "trace.h"
16  
btree_insert_entry_cmp(const struct btree_insert_entry * l,const struct btree_insert_entry * r)17  static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
18  					 const struct btree_insert_entry *r)
19  {
20  	return   cmp_int(l->btree_id,	r->btree_id) ?:
21  		 cmp_int(l->cached,	r->cached) ?:
22  		 -cmp_int(l->level,	r->level) ?:
23  		 bpos_cmp(l->k->k.p,	r->k->k.p);
24  }
25  
26  static int __must_check
27  bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
28  			  struct bkey_i *, enum btree_iter_update_trigger_flags,
29  			  unsigned long ip);
30  
extent_front_merge(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k,struct bkey_i ** insert,enum btree_iter_update_trigger_flags flags)31  static noinline int extent_front_merge(struct btree_trans *trans,
32  				       struct btree_iter *iter,
33  				       struct bkey_s_c k,
34  				       struct bkey_i **insert,
35  				       enum btree_iter_update_trigger_flags flags)
36  {
37  	struct bch_fs *c = trans->c;
38  	struct bkey_i *update;
39  	int ret;
40  
41  	if (unlikely(trans->journal_replay_not_finished))
42  		return 0;
43  
44  	update = bch2_bkey_make_mut_noupdate(trans, k);
45  	ret = PTR_ERR_OR_ZERO(update);
46  	if (ret)
47  		return ret;
48  
49  	if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
50  		return 0;
51  
52  	ret =   bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?:
53  		bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p);
54  	if (ret < 0)
55  		return ret;
56  	if (ret)
57  		return 0;
58  
59  	ret = bch2_btree_delete_at(trans, iter, flags);
60  	if (ret)
61  		return ret;
62  
63  	*insert = update;
64  	return 0;
65  }
66  
extent_back_merge(struct btree_trans * trans,struct btree_iter * iter,struct bkey_i * insert,struct bkey_s_c k)67  static noinline int extent_back_merge(struct btree_trans *trans,
68  				      struct btree_iter *iter,
69  				      struct bkey_i *insert,
70  				      struct bkey_s_c k)
71  {
72  	struct bch_fs *c = trans->c;
73  	int ret;
74  
75  	if (unlikely(trans->journal_replay_not_finished))
76  		return 0;
77  
78  	ret =   bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
79  		bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
80  	if (ret < 0)
81  		return ret;
82  	if (ret)
83  		return 0;
84  
85  	bch2_bkey_merge(c, bkey_i_to_s(insert), k);
86  	return 0;
87  }
88  
89  /*
90   * When deleting, check if we need to emit a whiteout (because we're overwriting
91   * something in an ancestor snapshot)
92   */
need_whiteout_for_snapshot(struct btree_trans * trans,enum btree_id btree_id,struct bpos pos)93  static int need_whiteout_for_snapshot(struct btree_trans *trans,
94  				      enum btree_id btree_id, struct bpos pos)
95  {
96  	struct btree_iter iter;
97  	struct bkey_s_c k;
98  	u32 snapshot = pos.snapshot;
99  	int ret;
100  
101  	if (!bch2_snapshot_parent(trans->c, pos.snapshot))
102  		return 0;
103  
104  	pos.snapshot++;
105  
106  	for_each_btree_key_norestart(trans, iter, btree_id, pos,
107  			   BTREE_ITER_all_snapshots|
108  			   BTREE_ITER_nopreserve, k, ret) {
109  		if (!bkey_eq(k.k->p, pos))
110  			break;
111  
112  		if (bch2_snapshot_is_ancestor(trans->c, snapshot,
113  					      k.k->p.snapshot)) {
114  			ret = !bkey_whiteout(k.k);
115  			break;
116  		}
117  	}
118  	bch2_trans_iter_exit(trans, &iter);
119  
120  	return ret;
121  }
122  
__bch2_insert_snapshot_whiteouts(struct btree_trans * trans,enum btree_id id,struct bpos old_pos,struct bpos new_pos)123  int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
124  				   enum btree_id id,
125  				   struct bpos old_pos,
126  				   struct bpos new_pos)
127  {
128  	struct bch_fs *c = trans->c;
129  	struct btree_iter old_iter, new_iter = { NULL };
130  	struct bkey_s_c old_k, new_k;
131  	snapshot_id_list s;
132  	struct bkey_i *update;
133  	int ret = 0;
134  
135  	if (!bch2_snapshot_has_children(c, old_pos.snapshot))
136  		return 0;
137  
138  	darray_init(&s);
139  
140  	bch2_trans_iter_init(trans, &old_iter, id, old_pos,
141  			     BTREE_ITER_not_extents|
142  			     BTREE_ITER_all_snapshots);
143  	while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
144  	       !(ret = bkey_err(old_k)) &&
145  	       bkey_eq(old_pos, old_k.k->p)) {
146  		struct bpos whiteout_pos =
147  			SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);;
148  
149  		if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
150  		    snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
151  			continue;
152  
153  		new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
154  					   BTREE_ITER_not_extents|
155  					   BTREE_ITER_intent);
156  		ret = bkey_err(new_k);
157  		if (ret)
158  			break;
159  
160  		if (new_k.k->type == KEY_TYPE_deleted) {
161  			update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
162  			ret = PTR_ERR_OR_ZERO(update);
163  			if (ret)
164  				break;
165  
166  			bkey_init(&update->k);
167  			update->k.p		= whiteout_pos;
168  			update->k.type		= KEY_TYPE_whiteout;
169  
170  			ret = bch2_trans_update(trans, &new_iter, update,
171  						BTREE_UPDATE_internal_snapshot_node);
172  		}
173  		bch2_trans_iter_exit(trans, &new_iter);
174  
175  		ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
176  		if (ret)
177  			break;
178  	}
179  	bch2_trans_iter_exit(trans, &new_iter);
180  	bch2_trans_iter_exit(trans, &old_iter);
181  	darray_exit(&s);
182  
183  	return ret;
184  }
185  
bch2_trans_update_extent_overwrite(struct btree_trans * trans,struct btree_iter * iter,enum btree_iter_update_trigger_flags flags,struct bkey_s_c old,struct bkey_s_c new)186  int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
187  				       struct btree_iter *iter,
188  				       enum btree_iter_update_trigger_flags flags,
189  				       struct bkey_s_c old,
190  				       struct bkey_s_c new)
191  {
192  	enum btree_id btree_id = iter->btree_id;
193  	struct bkey_i *update;
194  	struct bpos new_start = bkey_start_pos(new.k);
195  	unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start);
196  	unsigned back_split  = bkey_gt(old.k->p, new.k->p);
197  	unsigned middle_split = (front_split || back_split) &&
198  		old.k->p.snapshot != new.k->p.snapshot;
199  	unsigned nr_splits = front_split + back_split + middle_split;
200  	int ret = 0, compressed_sectors;
201  
202  	/*
203  	 * If we're going to be splitting a compressed extent, note it
204  	 * so that __bch2_trans_commit() can increase our disk
205  	 * reservation:
206  	 */
207  	if (nr_splits > 1 &&
208  	    (compressed_sectors = bch2_bkey_sectors_compressed(old)))
209  		trans->extra_disk_res += compressed_sectors * (nr_splits - 1);
210  
211  	if (front_split) {
212  		update = bch2_bkey_make_mut_noupdate(trans, old);
213  		if ((ret = PTR_ERR_OR_ZERO(update)))
214  			return ret;
215  
216  		bch2_cut_back(new_start, update);
217  
218  		ret =   bch2_insert_snapshot_whiteouts(trans, btree_id,
219  					old.k->p, update->k.p) ?:
220  			bch2_btree_insert_nonextent(trans, btree_id, update,
221  					BTREE_UPDATE_internal_snapshot_node|flags);
222  		if (ret)
223  			return ret;
224  	}
225  
226  	/* If we're overwriting in a different snapshot - middle split: */
227  	if (middle_split) {
228  		update = bch2_bkey_make_mut_noupdate(trans, old);
229  		if ((ret = PTR_ERR_OR_ZERO(update)))
230  			return ret;
231  
232  		bch2_cut_front(new_start, update);
233  		bch2_cut_back(new.k->p, update);
234  
235  		ret =   bch2_insert_snapshot_whiteouts(trans, btree_id,
236  					old.k->p, update->k.p) ?:
237  			bch2_btree_insert_nonextent(trans, btree_id, update,
238  					  BTREE_UPDATE_internal_snapshot_node|flags);
239  		if (ret)
240  			return ret;
241  	}
242  
243  	if (bkey_le(old.k->p, new.k->p)) {
244  		update = bch2_trans_kmalloc(trans, sizeof(*update));
245  		if ((ret = PTR_ERR_OR_ZERO(update)))
246  			return ret;
247  
248  		bkey_init(&update->k);
249  		update->k.p = old.k->p;
250  		update->k.p.snapshot = new.k->p.snapshot;
251  
252  		if (new.k->p.snapshot != old.k->p.snapshot) {
253  			update->k.type = KEY_TYPE_whiteout;
254  		} else if (btree_type_has_snapshots(btree_id)) {
255  			ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
256  			if (ret < 0)
257  				return ret;
258  			if (ret)
259  				update->k.type = KEY_TYPE_whiteout;
260  		}
261  
262  		ret = bch2_btree_insert_nonextent(trans, btree_id, update,
263  					  BTREE_UPDATE_internal_snapshot_node|flags);
264  		if (ret)
265  			return ret;
266  	}
267  
268  	if (back_split) {
269  		update = bch2_bkey_make_mut_noupdate(trans, old);
270  		if ((ret = PTR_ERR_OR_ZERO(update)))
271  			return ret;
272  
273  		bch2_cut_front(new.k->p, update);
274  
275  		ret = bch2_trans_update_by_path(trans, iter->path, update,
276  					  BTREE_UPDATE_internal_snapshot_node|
277  					  flags, _RET_IP_);
278  		if (ret)
279  			return ret;
280  	}
281  
282  	return 0;
283  }
284  
bch2_trans_update_extent(struct btree_trans * trans,struct btree_iter * orig_iter,struct bkey_i * insert,enum btree_iter_update_trigger_flags flags)285  static int bch2_trans_update_extent(struct btree_trans *trans,
286  				    struct btree_iter *orig_iter,
287  				    struct bkey_i *insert,
288  				    enum btree_iter_update_trigger_flags flags)
289  {
290  	struct btree_iter iter;
291  	struct bkey_s_c k;
292  	enum btree_id btree_id = orig_iter->btree_id;
293  	int ret = 0;
294  
295  	bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
296  			     BTREE_ITER_intent|
297  			     BTREE_ITER_with_updates|
298  			     BTREE_ITER_not_extents);
299  	k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
300  	if ((ret = bkey_err(k)))
301  		goto err;
302  	if (!k.k)
303  		goto out;
304  
305  	if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
306  		if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
307  			ret = extent_front_merge(trans, &iter, k, &insert, flags);
308  			if (ret)
309  				goto err;
310  		}
311  
312  		goto next;
313  	}
314  
315  	while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
316  		bool done = bkey_lt(insert->k.p, k.k->p);
317  
318  		ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
319  		if (ret)
320  			goto err;
321  
322  		if (done)
323  			goto out;
324  next:
325  		bch2_btree_iter_advance(&iter);
326  		k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
327  		if ((ret = bkey_err(k)))
328  			goto err;
329  		if (!k.k)
330  			goto out;
331  	}
332  
333  	if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
334  		ret = extent_back_merge(trans, &iter, insert, k);
335  		if (ret)
336  			goto err;
337  	}
338  out:
339  	if (!bkey_deleted(&insert->k))
340  		ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
341  err:
342  	bch2_trans_iter_exit(trans, &iter);
343  
344  	return ret;
345  }
346  
flush_new_cached_update(struct btree_trans * trans,struct btree_insert_entry * i,enum btree_iter_update_trigger_flags flags,unsigned long ip)347  static noinline int flush_new_cached_update(struct btree_trans *trans,
348  					    struct btree_insert_entry *i,
349  					    enum btree_iter_update_trigger_flags flags,
350  					    unsigned long ip)
351  {
352  	struct bkey k;
353  	int ret;
354  
355  	btree_path_idx_t path_idx =
356  		bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
357  			      BTREE_ITER_intent, _THIS_IP_);
358  	ret = bch2_btree_path_traverse(trans, path_idx, 0);
359  	if (ret)
360  		goto out;
361  
362  	struct btree_path *btree_path = trans->paths + path_idx;
363  
364  	/*
365  	 * The old key in the insert entry might actually refer to an existing
366  	 * key in the btree that has been deleted from cache and not yet
367  	 * flushed. Check for this and skip the flush so we don't run triggers
368  	 * against a stale key.
369  	 */
370  	bch2_btree_path_peek_slot_exact(btree_path, &k);
371  	if (!bkey_deleted(&k))
372  		goto out;
373  
374  	i->key_cache_already_flushed = true;
375  	i->flags |= BTREE_TRIGGER_norun;
376  
377  	btree_path_set_should_be_locked(trans, btree_path);
378  	ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
379  out:
380  	bch2_path_put(trans, path_idx, true);
381  	return ret;
382  }
383  
384  static int __must_check
bch2_trans_update_by_path(struct btree_trans * trans,btree_path_idx_t path_idx,struct bkey_i * k,enum btree_iter_update_trigger_flags flags,unsigned long ip)385  bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
386  			  struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
387  			  unsigned long ip)
388  {
389  	struct bch_fs *c = trans->c;
390  	struct btree_insert_entry *i, n;
391  	int cmp;
392  
393  	struct btree_path *path = trans->paths + path_idx;
394  	EBUG_ON(!path->should_be_locked);
395  	EBUG_ON(trans->nr_updates >= trans->nr_paths);
396  	EBUG_ON(!bpos_eq(k->k.p, path->pos));
397  
398  	n = (struct btree_insert_entry) {
399  		.flags		= flags,
400  		.bkey_type	= __btree_node_type(path->level, path->btree_id),
401  		.btree_id	= path->btree_id,
402  		.level		= path->level,
403  		.cached		= path->cached,
404  		.path		= path_idx,
405  		.k		= k,
406  		.ip_allocated	= ip,
407  	};
408  
409  #ifdef CONFIG_BCACHEFS_DEBUG
410  	trans_for_each_update(trans, i)
411  		BUG_ON(i != trans->updates &&
412  		       btree_insert_entry_cmp(i - 1, i) >= 0);
413  #endif
414  
415  	/*
416  	 * Pending updates are kept sorted: first, find position of new update,
417  	 * then delete/trim any updates the new update overwrites:
418  	 */
419  	for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
420  		cmp = btree_insert_entry_cmp(&n, i);
421  		if (cmp <= 0)
422  			break;
423  	}
424  
425  	bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
426  
427  	if (overwrite) {
428  		EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
429  
430  		bch2_path_put(trans, i->path, true);
431  		i->flags	= n.flags;
432  		i->cached	= n.cached;
433  		i->k		= n.k;
434  		i->path		= n.path;
435  		i->ip_allocated	= n.ip_allocated;
436  	} else {
437  		array_insert_item(trans->updates, trans->nr_updates,
438  				  i - trans->updates, n);
439  
440  		i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
441  		i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
442  
443  		if (unlikely(trans->journal_replay_not_finished)) {
444  			struct bkey_i *j_k =
445  				bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
446  
447  			if (j_k) {
448  				i->old_k = j_k->k;
449  				i->old_v = &j_k->v;
450  			}
451  		}
452  	}
453  
454  	__btree_path_get(trans, trans->paths + i->path, true);
455  
456  	trace_update_by_path(trans, path, i, overwrite);
457  
458  	/*
459  	 * If a key is present in the key cache, it must also exist in the
460  	 * btree - this is necessary for cache coherency. When iterating over
461  	 * a btree that's cached in the key cache, the btree iter code checks
462  	 * the key cache - but the key has to exist in the btree for that to
463  	 * work:
464  	 */
465  	if (path->cached && !i->old_btree_u64s)
466  		return flush_new_cached_update(trans, i, flags, ip);
467  
468  	return 0;
469  }
470  
bch2_trans_update_get_key_cache(struct btree_trans * trans,struct btree_iter * iter,struct btree_path * path)471  static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
472  						    struct btree_iter *iter,
473  						    struct btree_path *path)
474  {
475  	struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
476  
477  	if (!key_cache_path ||
478  	    !key_cache_path->should_be_locked ||
479  	    !bpos_eq(key_cache_path->pos, iter->pos)) {
480  		struct bkey_cached *ck;
481  		int ret;
482  
483  		if (!iter->key_cache_path)
484  			iter->key_cache_path =
485  				bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
486  					      BTREE_ITER_intent|
487  					      BTREE_ITER_cached, _THIS_IP_);
488  
489  		iter->key_cache_path =
490  			bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
491  						iter->flags & BTREE_ITER_intent,
492  						_THIS_IP_);
493  
494  		ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_cached);
495  		if (unlikely(ret))
496  			return ret;
497  
498  		ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
499  
500  		if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
501  			trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
502  			return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
503  		}
504  
505  		btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
506  	}
507  
508  	return 0;
509  }
510  
bch2_trans_update(struct btree_trans * trans,struct btree_iter * iter,struct bkey_i * k,enum btree_iter_update_trigger_flags flags)511  int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
512  				   struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
513  {
514  	btree_path_idx_t path_idx = iter->update_path ?: iter->path;
515  	int ret;
516  
517  	if (iter->flags & BTREE_ITER_is_extents)
518  		return bch2_trans_update_extent(trans, iter, k, flags);
519  
520  	if (bkey_deleted(&k->k) &&
521  	    !(flags & BTREE_UPDATE_key_cache_reclaim) &&
522  	    (iter->flags & BTREE_ITER_filter_snapshots)) {
523  		ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
524  		if (unlikely(ret < 0))
525  			return ret;
526  
527  		if (ret)
528  			k->k.type = KEY_TYPE_whiteout;
529  	}
530  
531  	/*
532  	 * Ensure that updates to cached btrees go to the key cache:
533  	 */
534  	struct btree_path *path = trans->paths + path_idx;
535  	if (!(flags & BTREE_UPDATE_key_cache_reclaim) &&
536  	    !path->cached &&
537  	    !path->level &&
538  	    btree_id_cached(trans->c, path->btree_id)) {
539  		ret = bch2_trans_update_get_key_cache(trans, iter, path);
540  		if (ret)
541  			return ret;
542  
543  		path_idx = iter->key_cache_path;
544  	}
545  
546  	return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
547  }
548  
bch2_btree_insert_clone_trans(struct btree_trans * trans,enum btree_id btree,struct bkey_i * k)549  int bch2_btree_insert_clone_trans(struct btree_trans *trans,
550  				  enum btree_id btree,
551  				  struct bkey_i *k)
552  {
553  	struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
554  	int ret = PTR_ERR_OR_ZERO(n);
555  	if (ret)
556  		return ret;
557  
558  	bkey_copy(n, k);
559  	return bch2_btree_insert_trans(trans, btree, n, 0);
560  }
561  
__bch2_trans_jset_entry_alloc(struct btree_trans * trans,unsigned u64s)562  struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
563  {
564  	unsigned new_top = trans->journal_entries_u64s + u64s;
565  	unsigned old_size = trans->journal_entries_size;
566  
567  	if (new_top > trans->journal_entries_size) {
568  		trans->journal_entries_size = roundup_pow_of_two(new_top);
569  
570  		btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size;
571  	}
572  
573  	struct jset_entry *n =
574  		bch2_trans_kmalloc_nomemzero(trans,
575  				trans->journal_entries_size * sizeof(u64));
576  	if (IS_ERR(n))
577  		return ERR_CAST(n);
578  
579  	if (trans->journal_entries)
580  		memcpy(n, trans->journal_entries, old_size * sizeof(u64));
581  	trans->journal_entries = n;
582  
583  	struct jset_entry *e = btree_trans_journal_entries_top(trans);
584  	trans->journal_entries_u64s = new_top;
585  	return e;
586  }
587  
bch2_bkey_get_empty_slot(struct btree_trans * trans,struct btree_iter * iter,enum btree_id btree,struct bpos end)588  int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
589  			     enum btree_id btree, struct bpos end)
590  {
591  	struct bkey_s_c k;
592  	int ret = 0;
593  
594  	bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_intent);
595  	k = bch2_btree_iter_prev(iter);
596  	ret = bkey_err(k);
597  	if (ret)
598  		goto err;
599  
600  	bch2_btree_iter_advance(iter);
601  	k = bch2_btree_iter_peek_slot(iter);
602  	ret = bkey_err(k);
603  	if (ret)
604  		goto err;
605  
606  	BUG_ON(k.k->type != KEY_TYPE_deleted);
607  
608  	if (bkey_gt(k.k->p, end)) {
609  		ret = -BCH_ERR_ENOSPC_btree_slot;
610  		goto err;
611  	}
612  
613  	return 0;
614  err:
615  	bch2_trans_iter_exit(trans, iter);
616  	return ret;
617  }
618  
bch2_trans_commit_hook(struct btree_trans * trans,struct btree_trans_commit_hook * h)619  void bch2_trans_commit_hook(struct btree_trans *trans,
620  			    struct btree_trans_commit_hook *h)
621  {
622  	h->next = trans->hooks;
623  	trans->hooks = h;
624  }
625  
bch2_btree_insert_nonextent(struct btree_trans * trans,enum btree_id btree,struct bkey_i * k,enum btree_iter_update_trigger_flags flags)626  int bch2_btree_insert_nonextent(struct btree_trans *trans,
627  				enum btree_id btree, struct bkey_i *k,
628  				enum btree_iter_update_trigger_flags flags)
629  {
630  	struct btree_iter iter;
631  	int ret;
632  
633  	bch2_trans_iter_init(trans, &iter, btree, k->k.p,
634  			     BTREE_ITER_cached|
635  			     BTREE_ITER_not_extents|
636  			     BTREE_ITER_intent);
637  	ret   = bch2_btree_iter_traverse(&iter) ?:
638  		bch2_trans_update(trans, &iter, k, flags);
639  	bch2_trans_iter_exit(trans, &iter);
640  	return ret;
641  }
642  
bch2_btree_insert_trans(struct btree_trans * trans,enum btree_id id,struct bkey_i * k,enum btree_iter_update_trigger_flags flags)643  int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
644  			    struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
645  {
646  	struct btree_iter iter;
647  	bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
648  			     BTREE_ITER_intent|flags);
649  	int ret = bch2_btree_iter_traverse(&iter) ?:
650  		  bch2_trans_update(trans, &iter, k, flags);
651  	bch2_trans_iter_exit(trans, &iter);
652  	return ret;
653  }
654  
655  /**
656   * bch2_btree_insert - insert keys into the extent btree
657   * @c:			pointer to struct bch_fs
658   * @id:			btree to insert into
659   * @k:			key to insert
660   * @disk_res:		must be non-NULL whenever inserting or potentially
661   *			splitting data extents
662   * @flags:		transaction commit flags
663   * @iter_flags:		btree iter update trigger flags
664   *
665   * Returns:		0 on success, error code on failure
666   */
bch2_btree_insert(struct bch_fs * c,enum btree_id id,struct bkey_i * k,struct disk_reservation * disk_res,int flags,enum btree_iter_update_trigger_flags iter_flags)667  int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
668  		      struct disk_reservation *disk_res, int flags,
669  		      enum btree_iter_update_trigger_flags iter_flags)
670  {
671  	return bch2_trans_commit_do(c, disk_res, NULL, flags,
672  			     bch2_btree_insert_trans(trans, id, k, iter_flags));
673  }
674  
bch2_btree_delete_extent_at(struct btree_trans * trans,struct btree_iter * iter,unsigned len,unsigned update_flags)675  int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter,
676  				unsigned len, unsigned update_flags)
677  {
678  	struct bkey_i *k;
679  
680  	k = bch2_trans_kmalloc(trans, sizeof(*k));
681  	if (IS_ERR(k))
682  		return PTR_ERR(k);
683  
684  	bkey_init(&k->k);
685  	k->k.p = iter->pos;
686  	bch2_key_resize(&k->k, len);
687  	return bch2_trans_update(trans, iter, k, update_flags);
688  }
689  
bch2_btree_delete_at(struct btree_trans * trans,struct btree_iter * iter,unsigned update_flags)690  int bch2_btree_delete_at(struct btree_trans *trans,
691  			 struct btree_iter *iter, unsigned update_flags)
692  {
693  	return bch2_btree_delete_extent_at(trans, iter, 0, update_flags);
694  }
695  
bch2_btree_delete(struct btree_trans * trans,enum btree_id btree,struct bpos pos,unsigned update_flags)696  int bch2_btree_delete(struct btree_trans *trans,
697  		      enum btree_id btree, struct bpos pos,
698  		      unsigned update_flags)
699  {
700  	struct btree_iter iter;
701  	int ret;
702  
703  	bch2_trans_iter_init(trans, &iter, btree, pos,
704  			     BTREE_ITER_cached|
705  			     BTREE_ITER_intent);
706  	ret   = bch2_btree_iter_traverse(&iter) ?:
707  		bch2_btree_delete_at(trans, &iter, update_flags);
708  	bch2_trans_iter_exit(trans, &iter);
709  
710  	return ret;
711  }
712  
bch2_btree_delete_range_trans(struct btree_trans * trans,enum btree_id id,struct bpos start,struct bpos end,unsigned update_flags,u64 * journal_seq)713  int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
714  				  struct bpos start, struct bpos end,
715  				  unsigned update_flags,
716  				  u64 *journal_seq)
717  {
718  	u32 restart_count = trans->restart_count;
719  	struct btree_iter iter;
720  	struct bkey_s_c k;
721  	int ret = 0;
722  
723  	bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
724  	while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
725  		struct disk_reservation disk_res =
726  			bch2_disk_reservation_init(trans->c, 0);
727  		struct bkey_i delete;
728  
729  		ret = bkey_err(k);
730  		if (ret)
731  			goto err;
732  
733  		bkey_init(&delete.k);
734  
735  		/*
736  		 * This could probably be more efficient for extents:
737  		 */
738  
739  		/*
740  		 * For extents, iter.pos won't necessarily be the same as
741  		 * bkey_start_pos(k.k) (for non extents they always will be the
742  		 * same). It's important that we delete starting from iter.pos
743  		 * because the range we want to delete could start in the middle
744  		 * of k.
745  		 *
746  		 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
747  		 * bkey_start_pos(k.k)).
748  		 */
749  		delete.k.p = iter.pos;
750  
751  		if (iter.flags & BTREE_ITER_is_extents)
752  			bch2_key_resize(&delete.k,
753  					bpos_min(end, k.k->p).offset -
754  					iter.pos.offset);
755  
756  		ret   = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
757  			bch2_trans_commit(trans, &disk_res, journal_seq,
758  					  BCH_TRANS_COMMIT_no_enospc);
759  		bch2_disk_reservation_put(trans->c, &disk_res);
760  err:
761  		/*
762  		 * the bch2_trans_begin() call is in a weird place because we
763  		 * need to call it after every transaction commit, to avoid path
764  		 * overflow, but don't want to call it if the delete operation
765  		 * is a no-op and we have no work to do:
766  		 */
767  		bch2_trans_begin(trans);
768  
769  		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
770  			ret = 0;
771  		if (ret)
772  			break;
773  	}
774  	bch2_trans_iter_exit(trans, &iter);
775  
776  	return ret ?: trans_was_restarted(trans, restart_count);
777  }
778  
779  /*
780   * bch_btree_delete_range - delete everything within a given range
781   *
782   * Range is a half open interval - [start, end)
783   */
bch2_btree_delete_range(struct bch_fs * c,enum btree_id id,struct bpos start,struct bpos end,unsigned update_flags,u64 * journal_seq)784  int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
785  			    struct bpos start, struct bpos end,
786  			    unsigned update_flags,
787  			    u64 *journal_seq)
788  {
789  	int ret = bch2_trans_run(c,
790  			bch2_btree_delete_range_trans(trans, id, start, end,
791  						      update_flags, journal_seq));
792  	if (ret == -BCH_ERR_transaction_restart_nested)
793  		ret = 0;
794  	return ret;
795  }
796  
bch2_btree_bit_mod(struct btree_trans * trans,enum btree_id btree,struct bpos pos,bool set)797  int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
798  		       struct bpos pos, bool set)
799  {
800  	struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
801  	int ret = PTR_ERR_OR_ZERO(k);
802  	if (ret)
803  		return ret;
804  
805  	bkey_init(&k->k);
806  	k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
807  	k->k.p = pos;
808  
809  	struct btree_iter iter;
810  	bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
811  
812  	ret   = bch2_btree_iter_traverse(&iter) ?:
813  		bch2_trans_update(trans, &iter, k, 0);
814  	bch2_trans_iter_exit(trans, &iter);
815  	return ret;
816  }
817  
bch2_btree_bit_mod_buffered(struct btree_trans * trans,enum btree_id btree,struct bpos pos,bool set)818  int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree,
819  				struct bpos pos, bool set)
820  {
821  	struct bkey_i k;
822  
823  	bkey_init(&k.k);
824  	k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
825  	k.k.p = pos;
826  
827  	return bch2_trans_update_buffered(trans, btree, &k);
828  }
829  
__bch2_trans_log_msg(struct btree_trans * trans,struct printbuf * buf,unsigned u64s)830  static int __bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf, unsigned u64s)
831  {
832  	struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
833  	int ret = PTR_ERR_OR_ZERO(e);
834  	if (ret)
835  		return ret;
836  
837  	struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry);
838  	journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s);
839  	memcpy(l->d, buf->buf, buf->pos);
840  	return 0;
841  }
842  
843  __printf(3, 0)
844  static int
__bch2_fs_log_msg(struct bch_fs * c,unsigned commit_flags,const char * fmt,va_list args)845  __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
846  		  va_list args)
847  {
848  	struct printbuf buf = PRINTBUF;
849  	prt_vprintf(&buf, fmt, args);
850  
851  	unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
852  	prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos);
853  
854  	int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
855  	if (ret)
856  		goto err;
857  
858  	if (!test_bit(JOURNAL_running, &c->journal.flags)) {
859  		ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
860  		if (ret)
861  			goto err;
862  
863  		struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries);
864  		journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s);
865  		memcpy(l->d, buf.buf, buf.pos);
866  		c->journal.early_journal_entries.nr += jset_u64s(u64s);
867  	} else {
868  		ret = bch2_trans_commit_do(c, NULL, NULL,
869  			BCH_TRANS_COMMIT_lazy_rw|commit_flags,
870  			__bch2_trans_log_msg(trans, &buf, u64s));
871  	}
872  err:
873  	printbuf_exit(&buf);
874  	return ret;
875  }
876  
877  __printf(2, 3)
bch2_fs_log_msg(struct bch_fs * c,const char * fmt,...)878  int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
879  {
880  	va_list args;
881  	int ret;
882  
883  	va_start(args, fmt);
884  	ret = __bch2_fs_log_msg(c, 0, fmt, args);
885  	va_end(args);
886  	return ret;
887  }
888  
889  /*
890   * Use for logging messages during recovery to enable reserved space and avoid
891   * blocking.
892   */
893  __printf(2, 3)
bch2_journal_log_msg(struct bch_fs * c,const char * fmt,...)894  int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
895  {
896  	va_list args;
897  	int ret;
898  
899  	va_start(args, fmt);
900  	ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);
901  	va_end(args);
902  	return ret;
903  }
904