1  // SPDX-License-Identifier: GPL-2.0
2  
3  #include "misc.h"
4  #include "ctree.h"
5  #include "block-rsv.h"
6  #include "space-info.h"
7  #include "transaction.h"
8  #include "block-group.h"
9  #include "fs.h"
10  #include "accessors.h"
11  
12  /*
13   * HOW DO BLOCK RESERVES WORK
14   *
15   *   Think of block_rsv's as buckets for logically grouped metadata
16   *   reservations.  Each block_rsv has a ->size and a ->reserved.  ->size is
17   *   how large we want our block rsv to be, ->reserved is how much space is
18   *   currently reserved for this block reserve.
19   *
20   *   ->failfast exists for the truncate case, and is described below.
21   *
22   * NORMAL OPERATION
23   *
24   *   -> Reserve
25   *     Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
26   *
27   *     We call into btrfs_reserve_metadata_bytes() with our bytes, which is
28   *     accounted for in space_info->bytes_may_use, and then add the bytes to
29   *     ->reserved, and ->size in the case of btrfs_block_rsv_add.
30   *
31   *     ->size is an over-estimation of how much we may use for a particular
32   *     operation.
33   *
34   *   -> Use
35   *     Entrance: btrfs_use_block_rsv
36   *
37   *     When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
38   *     to determine the appropriate block_rsv to use, and then verify that
39   *     ->reserved has enough space for our tree block allocation.  Once
40   *     successful we subtract fs_info->nodesize from ->reserved.
41   *
42   *   -> Finish
43   *     Entrance: btrfs_block_rsv_release
44   *
45   *     We are finished with our operation, subtract our individual reservation
46   *     from ->size, and then subtract ->size from ->reserved and free up the
47   *     excess if there is any.
48   *
49   *     There is some logic here to refill the delayed refs rsv or the global rsv
50   *     as needed, otherwise the excess is subtracted from
51   *     space_info->bytes_may_use.
52   *
53   * TYPES OF BLOCK RESERVES
54   *
55   * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
56   *   These behave normally, as described above, just within the confines of the
57   *   lifetime of their particular operation (transaction for the whole trans
58   *   handle lifetime, for example).
59   *
60   * BLOCK_RSV_GLOBAL
61   *   It is impossible to properly account for all the space that may be required
62   *   to make our extent tree updates.  This block reserve acts as an overflow
63   *   buffer in case our delayed refs reserve does not reserve enough space to
64   *   update the extent tree.
65   *
66   *   We can steal from this in some cases as well, notably on evict() or
67   *   truncate() in order to help users recover from ENOSPC conditions.
68   *
69   * BLOCK_RSV_DELALLOC
70   *   The individual item sizes are determined by the per-inode size
71   *   calculations, which are described with the delalloc code.  This is pretty
72   *   straightforward, it's just the calculation of ->size encodes a lot of
73   *   different items, and thus it gets used when updating inodes, inserting file
74   *   extents, and inserting checksums.
75   *
76   * BLOCK_RSV_DELREFS
77   *   We keep a running tally of how many delayed refs we have on the system.
78   *   We assume each one of these delayed refs are going to use a full
79   *   reservation.  We use the transaction items and pre-reserve space for every
80   *   operation, and use this reservation to refill any gap between ->size and
81   *   ->reserved that may exist.
82   *
83   *   From there it's straightforward, removing a delayed ref means we remove its
84   *   count from ->size and free up reservations as necessary.  Since this is
85   *   the most dynamic block reserve in the system, we will try to refill this
86   *   block reserve first with any excess returned by any other block reserve.
87   *
88   * BLOCK_RSV_EMPTY
89   *   This is the fallback block reserve to make us try to reserve space if we
90   *   don't have a specific bucket for this allocation.  It is mostly used for
91   *   updating the device tree and such, since that is a separate pool we're
92   *   content to just reserve space from the space_info on demand.
93   *
94   * BLOCK_RSV_TEMP
95   *   This is used by things like truncate and iput.  We will temporarily
96   *   allocate a block reserve, set it to some size, and then truncate bytes
97   *   until we have no space left.  With ->failfast set we'll simply return
98   *   ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
99   *   to make a new reservation.  This is because these operations are
100   *   unbounded, so we want to do as much work as we can, and then back off and
101   *   re-reserve.
102   */
103  
block_rsv_release_bytes(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,struct btrfs_block_rsv * dest,u64 num_bytes,u64 * qgroup_to_release_ret)104  static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
105  				    struct btrfs_block_rsv *block_rsv,
106  				    struct btrfs_block_rsv *dest, u64 num_bytes,
107  				    u64 *qgroup_to_release_ret)
108  {
109  	struct btrfs_space_info *space_info = block_rsv->space_info;
110  	u64 qgroup_to_release = 0;
111  	u64 ret;
112  
113  	spin_lock(&block_rsv->lock);
114  	if (num_bytes == (u64)-1) {
115  		num_bytes = block_rsv->size;
116  		qgroup_to_release = block_rsv->qgroup_rsv_size;
117  	}
118  	block_rsv->size -= num_bytes;
119  	if (block_rsv->reserved >= block_rsv->size) {
120  		num_bytes = block_rsv->reserved - block_rsv->size;
121  		block_rsv->reserved = block_rsv->size;
122  		block_rsv->full = true;
123  	} else {
124  		num_bytes = 0;
125  	}
126  	if (qgroup_to_release_ret &&
127  	    block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) {
128  		qgroup_to_release = block_rsv->qgroup_rsv_reserved -
129  				    block_rsv->qgroup_rsv_size;
130  		block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size;
131  	} else {
132  		qgroup_to_release = 0;
133  	}
134  	spin_unlock(&block_rsv->lock);
135  
136  	ret = num_bytes;
137  	if (num_bytes > 0) {
138  		if (dest) {
139  			spin_lock(&dest->lock);
140  			if (!dest->full) {
141  				u64 bytes_to_add;
142  
143  				bytes_to_add = dest->size - dest->reserved;
144  				bytes_to_add = min(num_bytes, bytes_to_add);
145  				dest->reserved += bytes_to_add;
146  				if (dest->reserved >= dest->size)
147  					dest->full = true;
148  				num_bytes -= bytes_to_add;
149  			}
150  			spin_unlock(&dest->lock);
151  		}
152  		if (num_bytes)
153  			btrfs_space_info_free_bytes_may_use(fs_info,
154  							    space_info,
155  							    num_bytes);
156  	}
157  	if (qgroup_to_release_ret)
158  		*qgroup_to_release_ret = qgroup_to_release;
159  	return ret;
160  }
161  
btrfs_block_rsv_migrate(struct btrfs_block_rsv * src,struct btrfs_block_rsv * dst,u64 num_bytes,bool update_size)162  int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
163  			    struct btrfs_block_rsv *dst, u64 num_bytes,
164  			    bool update_size)
165  {
166  	int ret;
167  
168  	ret = btrfs_block_rsv_use_bytes(src, num_bytes);
169  	if (ret)
170  		return ret;
171  
172  	btrfs_block_rsv_add_bytes(dst, num_bytes, update_size);
173  	return 0;
174  }
175  
btrfs_init_block_rsv(struct btrfs_block_rsv * rsv,enum btrfs_rsv_type type)176  void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type)
177  {
178  	memset(rsv, 0, sizeof(*rsv));
179  	spin_lock_init(&rsv->lock);
180  	rsv->type = type;
181  }
182  
btrfs_init_metadata_block_rsv(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * rsv,enum btrfs_rsv_type type)183  void btrfs_init_metadata_block_rsv(struct btrfs_fs_info *fs_info,
184  				   struct btrfs_block_rsv *rsv,
185  				   enum btrfs_rsv_type type)
186  {
187  	btrfs_init_block_rsv(rsv, type);
188  	rsv->space_info = btrfs_find_space_info(fs_info,
189  					    BTRFS_BLOCK_GROUP_METADATA);
190  }
191  
btrfs_alloc_block_rsv(struct btrfs_fs_info * fs_info,enum btrfs_rsv_type type)192  struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
193  					      enum btrfs_rsv_type type)
194  {
195  	struct btrfs_block_rsv *block_rsv;
196  
197  	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
198  	if (!block_rsv)
199  		return NULL;
200  
201  	btrfs_init_metadata_block_rsv(fs_info, block_rsv, type);
202  	return block_rsv;
203  }
204  
btrfs_free_block_rsv(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * rsv)205  void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
206  			  struct btrfs_block_rsv *rsv)
207  {
208  	if (!rsv)
209  		return;
210  	btrfs_block_rsv_release(fs_info, rsv, (u64)-1, NULL);
211  	kfree(rsv);
212  }
213  
btrfs_block_rsv_add(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,u64 num_bytes,enum btrfs_reserve_flush_enum flush)214  int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
215  			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
216  			enum btrfs_reserve_flush_enum flush)
217  {
218  	int ret;
219  
220  	if (num_bytes == 0)
221  		return 0;
222  
223  	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
224  					   num_bytes, flush);
225  	if (!ret)
226  		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, true);
227  
228  	return ret;
229  }
230  
btrfs_block_rsv_check(struct btrfs_block_rsv * block_rsv,int min_percent)231  int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
232  {
233  	u64 num_bytes = 0;
234  	int ret = -ENOSPC;
235  
236  	spin_lock(&block_rsv->lock);
237  	num_bytes = mult_perc(block_rsv->size, min_percent);
238  	if (block_rsv->reserved >= num_bytes)
239  		ret = 0;
240  	spin_unlock(&block_rsv->lock);
241  
242  	return ret;
243  }
244  
btrfs_block_rsv_refill(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,u64 num_bytes,enum btrfs_reserve_flush_enum flush)245  int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
246  			   struct btrfs_block_rsv *block_rsv, u64 num_bytes,
247  			   enum btrfs_reserve_flush_enum flush)
248  {
249  	int ret = -ENOSPC;
250  
251  	if (!block_rsv)
252  		return 0;
253  
254  	spin_lock(&block_rsv->lock);
255  	if (block_rsv->reserved >= num_bytes)
256  		ret = 0;
257  	else
258  		num_bytes -= block_rsv->reserved;
259  	spin_unlock(&block_rsv->lock);
260  
261  	if (!ret)
262  		return 0;
263  
264  	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
265  					   num_bytes, flush);
266  	if (!ret) {
267  		btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
268  		return 0;
269  	}
270  
271  	return ret;
272  }
273  
btrfs_block_rsv_release(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,u64 num_bytes,u64 * qgroup_to_release)274  u64 btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
275  			    struct btrfs_block_rsv *block_rsv, u64 num_bytes,
276  			    u64 *qgroup_to_release)
277  {
278  	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
279  	struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
280  	struct btrfs_block_rsv *target = NULL;
281  
282  	/*
283  	 * If we are a delayed block reserve then push to the global rsv,
284  	 * otherwise dump into the global delayed reserve if it is not full.
285  	 */
286  	if (block_rsv->type == BTRFS_BLOCK_RSV_DELOPS)
287  		target = global_rsv;
288  	else if (block_rsv != global_rsv && !btrfs_block_rsv_full(delayed_rsv))
289  		target = delayed_rsv;
290  
291  	if (target && block_rsv->space_info != target->space_info)
292  		target = NULL;
293  
294  	return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
295  				       qgroup_to_release);
296  }
297  
btrfs_block_rsv_use_bytes(struct btrfs_block_rsv * block_rsv,u64 num_bytes)298  int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv, u64 num_bytes)
299  {
300  	int ret = -ENOSPC;
301  
302  	spin_lock(&block_rsv->lock);
303  	if (block_rsv->reserved >= num_bytes) {
304  		block_rsv->reserved -= num_bytes;
305  		if (block_rsv->reserved < block_rsv->size)
306  			block_rsv->full = false;
307  		ret = 0;
308  	}
309  	spin_unlock(&block_rsv->lock);
310  	return ret;
311  }
312  
btrfs_block_rsv_add_bytes(struct btrfs_block_rsv * block_rsv,u64 num_bytes,bool update_size)313  void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
314  			       u64 num_bytes, bool update_size)
315  {
316  	spin_lock(&block_rsv->lock);
317  	block_rsv->reserved += num_bytes;
318  	if (update_size)
319  		block_rsv->size += num_bytes;
320  	else if (block_rsv->reserved >= block_rsv->size)
321  		block_rsv->full = true;
322  	spin_unlock(&block_rsv->lock);
323  }
324  
btrfs_update_global_block_rsv(struct btrfs_fs_info * fs_info)325  void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
326  {
327  	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
328  	struct btrfs_space_info *sinfo = block_rsv->space_info;
329  	struct btrfs_root *root, *tmp;
330  	u64 num_bytes = btrfs_root_used(&fs_info->tree_root->root_item);
331  	unsigned int min_items = 1;
332  
333  	/*
334  	 * The global block rsv is based on the size of the extent tree, the
335  	 * checksum tree and the root tree.  If the fs is empty we want to set
336  	 * it to a minimal amount for safety.
337  	 *
338  	 * We also are going to need to modify the minimum of the tree root and
339  	 * any global roots we could touch.
340  	 */
341  	read_lock(&fs_info->global_root_lock);
342  	rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree,
343  					     rb_node) {
344  		if (btrfs_root_id(root) == BTRFS_EXTENT_TREE_OBJECTID ||
345  		    btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID ||
346  		    btrfs_root_id(root) == BTRFS_FREE_SPACE_TREE_OBJECTID) {
347  			num_bytes += btrfs_root_used(&root->root_item);
348  			min_items++;
349  		}
350  	}
351  	read_unlock(&fs_info->global_root_lock);
352  
353  	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
354  		num_bytes += btrfs_root_used(&fs_info->block_group_root->root_item);
355  		min_items++;
356  	}
357  
358  	if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
359  		num_bytes += btrfs_root_used(&fs_info->stripe_root->root_item);
360  		min_items++;
361  	}
362  
363  	/*
364  	 * But we also want to reserve enough space so we can do the fallback
365  	 * global reserve for an unlink, which is an additional
366  	 * BTRFS_UNLINK_METADATA_UNITS items.
367  	 *
368  	 * But we also need space for the delayed ref updates from the unlink,
369  	 * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
370  	 * each unlink metadata item.
371  	 */
372  	min_items += BTRFS_UNLINK_METADATA_UNITS;
373  
374  	num_bytes = max_t(u64, num_bytes,
375  			  btrfs_calc_insert_metadata_size(fs_info, min_items) +
376  			  btrfs_calc_delayed_ref_bytes(fs_info,
377  					       BTRFS_UNLINK_METADATA_UNITS));
378  
379  	spin_lock(&sinfo->lock);
380  	spin_lock(&block_rsv->lock);
381  
382  	block_rsv->size = min_t(u64, num_bytes, SZ_512M);
383  
384  	if (block_rsv->reserved < block_rsv->size) {
385  		num_bytes = block_rsv->size - block_rsv->reserved;
386  		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
387  						      num_bytes);
388  		block_rsv->reserved = block_rsv->size;
389  	} else if (block_rsv->reserved > block_rsv->size) {
390  		num_bytes = block_rsv->reserved - block_rsv->size;
391  		btrfs_space_info_update_bytes_may_use(fs_info, sinfo,
392  						      -num_bytes);
393  		block_rsv->reserved = block_rsv->size;
394  		btrfs_try_granting_tickets(fs_info, sinfo);
395  	}
396  
397  	block_rsv->full = (block_rsv->reserved == block_rsv->size);
398  
399  	if (block_rsv->size >= sinfo->total_bytes)
400  		sinfo->force_alloc = CHUNK_ALLOC_FORCE;
401  	spin_unlock(&block_rsv->lock);
402  	spin_unlock(&sinfo->lock);
403  }
404  
btrfs_init_root_block_rsv(struct btrfs_root * root)405  void btrfs_init_root_block_rsv(struct btrfs_root *root)
406  {
407  	struct btrfs_fs_info *fs_info = root->fs_info;
408  
409  	switch (btrfs_root_id(root)) {
410  	case BTRFS_CSUM_TREE_OBJECTID:
411  	case BTRFS_EXTENT_TREE_OBJECTID:
412  	case BTRFS_FREE_SPACE_TREE_OBJECTID:
413  	case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
414  	case BTRFS_RAID_STRIPE_TREE_OBJECTID:
415  		root->block_rsv = &fs_info->delayed_refs_rsv;
416  		break;
417  	case BTRFS_ROOT_TREE_OBJECTID:
418  	case BTRFS_DEV_TREE_OBJECTID:
419  	case BTRFS_QUOTA_TREE_OBJECTID:
420  		root->block_rsv = &fs_info->global_block_rsv;
421  		break;
422  	case BTRFS_CHUNK_TREE_OBJECTID:
423  		root->block_rsv = &fs_info->chunk_block_rsv;
424  		break;
425  	default:
426  		root->block_rsv = NULL;
427  		break;
428  	}
429  }
430  
btrfs_init_global_block_rsv(struct btrfs_fs_info * fs_info)431  void btrfs_init_global_block_rsv(struct btrfs_fs_info *fs_info)
432  {
433  	struct btrfs_space_info *space_info;
434  
435  	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
436  	fs_info->chunk_block_rsv.space_info = space_info;
437  
438  	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
439  	fs_info->global_block_rsv.space_info = space_info;
440  	fs_info->trans_block_rsv.space_info = space_info;
441  	fs_info->empty_block_rsv.space_info = space_info;
442  	fs_info->delayed_block_rsv.space_info = space_info;
443  	fs_info->delayed_refs_rsv.space_info = space_info;
444  
445  	btrfs_update_global_block_rsv(fs_info);
446  }
447  
btrfs_release_global_block_rsv(struct btrfs_fs_info * fs_info)448  void btrfs_release_global_block_rsv(struct btrfs_fs_info *fs_info)
449  {
450  	btrfs_block_rsv_release(fs_info, &fs_info->global_block_rsv, (u64)-1,
451  				NULL);
452  	WARN_ON(fs_info->trans_block_rsv.size > 0);
453  	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
454  	WARN_ON(fs_info->chunk_block_rsv.size > 0);
455  	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
456  	WARN_ON(fs_info->delayed_block_rsv.size > 0);
457  	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
458  	WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
459  	WARN_ON(fs_info->delayed_refs_rsv.size > 0);
460  }
461  
get_block_rsv(const struct btrfs_trans_handle * trans,const struct btrfs_root * root)462  static struct btrfs_block_rsv *get_block_rsv(
463  					const struct btrfs_trans_handle *trans,
464  					const struct btrfs_root *root)
465  {
466  	struct btrfs_fs_info *fs_info = root->fs_info;
467  	struct btrfs_block_rsv *block_rsv = NULL;
468  
469  	if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
470  	    (root == fs_info->uuid_root) ||
471  	    (trans->adding_csums && btrfs_root_id(root) == BTRFS_CSUM_TREE_OBJECTID))
472  		block_rsv = trans->block_rsv;
473  
474  	if (!block_rsv)
475  		block_rsv = root->block_rsv;
476  
477  	if (!block_rsv)
478  		block_rsv = &fs_info->empty_block_rsv;
479  
480  	return block_rsv;
481  }
482  
btrfs_use_block_rsv(struct btrfs_trans_handle * trans,struct btrfs_root * root,u32 blocksize)483  struct btrfs_block_rsv *btrfs_use_block_rsv(struct btrfs_trans_handle *trans,
484  					    struct btrfs_root *root,
485  					    u32 blocksize)
486  {
487  	struct btrfs_fs_info *fs_info = root->fs_info;
488  	struct btrfs_block_rsv *block_rsv;
489  	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
490  	int ret;
491  	bool global_updated = false;
492  
493  	block_rsv = get_block_rsv(trans, root);
494  
495  	if (unlikely(btrfs_block_rsv_size(block_rsv) == 0))
496  		goto try_reserve;
497  again:
498  	ret = btrfs_block_rsv_use_bytes(block_rsv, blocksize);
499  	if (!ret)
500  		return block_rsv;
501  
502  	if (block_rsv->failfast)
503  		return ERR_PTR(ret);
504  
505  	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
506  		global_updated = true;
507  		btrfs_update_global_block_rsv(fs_info);
508  		goto again;
509  	}
510  
511  	/*
512  	 * The global reserve still exists to save us from ourselves, so don't
513  	 * warn_on if we are short on our delayed refs reserve.
514  	 */
515  	if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
516  	    btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
517  		static DEFINE_RATELIMIT_STATE(_rs,
518  				DEFAULT_RATELIMIT_INTERVAL * 10,
519  				/*DEFAULT_RATELIMIT_BURST*/ 1);
520  		if (__ratelimit(&_rs))
521  			WARN(1, KERN_DEBUG
522  				"BTRFS: block rsv %d returned %d\n",
523  				block_rsv->type, ret);
524  	}
525  try_reserve:
526  	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info,
527  					   blocksize, BTRFS_RESERVE_NO_FLUSH);
528  	if (!ret)
529  		return block_rsv;
530  	/*
531  	 * If we couldn't reserve metadata bytes try and use some from
532  	 * the global reserve if its space type is the same as the global
533  	 * reservation.
534  	 */
535  	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
536  	    block_rsv->space_info == global_rsv->space_info) {
537  		ret = btrfs_block_rsv_use_bytes(global_rsv, blocksize);
538  		if (!ret)
539  			return global_rsv;
540  	}
541  
542  	/*
543  	 * All hope is lost, but of course our reservations are overly
544  	 * pessimistic, so instead of possibly having an ENOSPC abort here, try
545  	 * one last time to force a reservation if there's enough actual space
546  	 * on disk to make the reservation.
547  	 */
548  	ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv->space_info, blocksize,
549  					   BTRFS_RESERVE_FLUSH_EMERGENCY);
550  	if (!ret)
551  		return block_rsv;
552  
553  	return ERR_PTR(ret);
554  }
555  
btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * rsv)556  int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info *fs_info,
557  				       struct btrfs_block_rsv *rsv)
558  {
559  	u64 needed_bytes;
560  	int ret;
561  
562  	/* 1 for slack space, 1 for updating the inode */
563  	needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
564  		btrfs_calc_metadata_size(fs_info, 1);
565  
566  	spin_lock(&rsv->lock);
567  	if (rsv->reserved < needed_bytes)
568  		ret = -ENOSPC;
569  	else
570  		ret = 0;
571  	spin_unlock(&rsv->lock);
572  	return ret;
573  }
574