1  // SPDX-License-Identifier: GPL-2.0
2  
3  #include <linux/sizes.h>
4  #include <linux/list_sort.h>
5  #include "misc.h"
6  #include "ctree.h"
7  #include "block-group.h"
8  #include "space-info.h"
9  #include "disk-io.h"
10  #include "free-space-cache.h"
11  #include "free-space-tree.h"
12  #include "volumes.h"
13  #include "transaction.h"
14  #include "ref-verify.h"
15  #include "sysfs.h"
16  #include "tree-log.h"
17  #include "delalloc-space.h"
18  #include "discard.h"
19  #include "raid56.h"
20  #include "zoned.h"
21  #include "fs.h"
22  #include "accessors.h"
23  #include "extent-tree.h"
24  
25  #ifdef CONFIG_BTRFS_DEBUG
btrfs_should_fragment_free_space(const struct btrfs_block_group * block_group)26  int btrfs_should_fragment_free_space(const struct btrfs_block_group *block_group)
27  {
28  	struct btrfs_fs_info *fs_info = block_group->fs_info;
29  
30  	return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
31  		block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
32  	       (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
33  		block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
34  }
35  #endif
36  
37  /*
38   * Return target flags in extended format or 0 if restripe for this chunk_type
39   * is not in progress
40   *
41   * Should be called with balance_lock held
42   */
get_restripe_target(const struct btrfs_fs_info * fs_info,u64 flags)43  static u64 get_restripe_target(const struct btrfs_fs_info *fs_info, u64 flags)
44  {
45  	const struct btrfs_balance_control *bctl = fs_info->balance_ctl;
46  	u64 target = 0;
47  
48  	if (!bctl)
49  		return 0;
50  
51  	if (flags & BTRFS_BLOCK_GROUP_DATA &&
52  	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
53  		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
54  	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
55  		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
56  		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
57  	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
58  		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
59  		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
60  	}
61  
62  	return target;
63  }
64  
65  /*
66   * @flags: available profiles in extended format (see ctree.h)
67   *
68   * Return reduced profile in chunk format.  If profile changing is in progress
69   * (either running or paused) picks the target profile (if it's already
70   * available), otherwise falls back to plain reducing.
71   */
btrfs_reduce_alloc_profile(struct btrfs_fs_info * fs_info,u64 flags)72  static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
73  {
74  	u64 num_devices = fs_info->fs_devices->rw_devices;
75  	u64 target;
76  	u64 raid_type;
77  	u64 allowed = 0;
78  
79  	/*
80  	 * See if restripe for this chunk_type is in progress, if so try to
81  	 * reduce to the target profile
82  	 */
83  	spin_lock(&fs_info->balance_lock);
84  	target = get_restripe_target(fs_info, flags);
85  	if (target) {
86  		spin_unlock(&fs_info->balance_lock);
87  		return extended_to_chunk(target);
88  	}
89  	spin_unlock(&fs_info->balance_lock);
90  
91  	/* First, mask out the RAID levels which aren't possible */
92  	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
93  		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
94  			allowed |= btrfs_raid_array[raid_type].bg_flag;
95  	}
96  	allowed &= flags;
97  
98  	/* Select the highest-redundancy RAID level. */
99  	if (allowed & BTRFS_BLOCK_GROUP_RAID1C4)
100  		allowed = BTRFS_BLOCK_GROUP_RAID1C4;
101  	else if (allowed & BTRFS_BLOCK_GROUP_RAID6)
102  		allowed = BTRFS_BLOCK_GROUP_RAID6;
103  	else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3)
104  		allowed = BTRFS_BLOCK_GROUP_RAID1C3;
105  	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
106  		allowed = BTRFS_BLOCK_GROUP_RAID5;
107  	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
108  		allowed = BTRFS_BLOCK_GROUP_RAID10;
109  	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
110  		allowed = BTRFS_BLOCK_GROUP_RAID1;
111  	else if (allowed & BTRFS_BLOCK_GROUP_DUP)
112  		allowed = BTRFS_BLOCK_GROUP_DUP;
113  	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
114  		allowed = BTRFS_BLOCK_GROUP_RAID0;
115  
116  	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
117  
118  	return extended_to_chunk(flags | allowed);
119  }
120  
btrfs_get_alloc_profile(struct btrfs_fs_info * fs_info,u64 orig_flags)121  u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
122  {
123  	unsigned seq;
124  	u64 flags;
125  
126  	do {
127  		flags = orig_flags;
128  		seq = read_seqbegin(&fs_info->profiles_lock);
129  
130  		if (flags & BTRFS_BLOCK_GROUP_DATA)
131  			flags |= fs_info->avail_data_alloc_bits;
132  		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
133  			flags |= fs_info->avail_system_alloc_bits;
134  		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
135  			flags |= fs_info->avail_metadata_alloc_bits;
136  	} while (read_seqretry(&fs_info->profiles_lock, seq));
137  
138  	return btrfs_reduce_alloc_profile(fs_info, flags);
139  }
140  
btrfs_get_block_group(struct btrfs_block_group * cache)141  void btrfs_get_block_group(struct btrfs_block_group *cache)
142  {
143  	refcount_inc(&cache->refs);
144  }
145  
btrfs_put_block_group(struct btrfs_block_group * cache)146  void btrfs_put_block_group(struct btrfs_block_group *cache)
147  {
148  	if (refcount_dec_and_test(&cache->refs)) {
149  		WARN_ON(cache->pinned > 0);
150  		/*
151  		 * If there was a failure to cleanup a log tree, very likely due
152  		 * to an IO failure on a writeback attempt of one or more of its
153  		 * extent buffers, we could not do proper (and cheap) unaccounting
154  		 * of their reserved space, so don't warn on reserved > 0 in that
155  		 * case.
156  		 */
157  		if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
158  		    !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
159  			WARN_ON(cache->reserved > 0);
160  
161  		/*
162  		 * A block_group shouldn't be on the discard_list anymore.
163  		 * Remove the block_group from the discard_list to prevent us
164  		 * from causing a panic due to NULL pointer dereference.
165  		 */
166  		if (WARN_ON(!list_empty(&cache->discard_list)))
167  			btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
168  						  cache);
169  
170  		kfree(cache->free_space_ctl);
171  		btrfs_free_chunk_map(cache->physical_map);
172  		kfree(cache);
173  	}
174  }
175  
176  /*
177   * This adds the block group to the fs_info rb tree for the block group cache
178   */
btrfs_add_block_group_cache(struct btrfs_fs_info * info,struct btrfs_block_group * block_group)179  static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
180  				       struct btrfs_block_group *block_group)
181  {
182  	struct rb_node **p;
183  	struct rb_node *parent = NULL;
184  	struct btrfs_block_group *cache;
185  	bool leftmost = true;
186  
187  	ASSERT(block_group->length != 0);
188  
189  	write_lock(&info->block_group_cache_lock);
190  	p = &info->block_group_cache_tree.rb_root.rb_node;
191  
192  	while (*p) {
193  		parent = *p;
194  		cache = rb_entry(parent, struct btrfs_block_group, cache_node);
195  		if (block_group->start < cache->start) {
196  			p = &(*p)->rb_left;
197  		} else if (block_group->start > cache->start) {
198  			p = &(*p)->rb_right;
199  			leftmost = false;
200  		} else {
201  			write_unlock(&info->block_group_cache_lock);
202  			return -EEXIST;
203  		}
204  	}
205  
206  	rb_link_node(&block_group->cache_node, parent, p);
207  	rb_insert_color_cached(&block_group->cache_node,
208  			       &info->block_group_cache_tree, leftmost);
209  
210  	write_unlock(&info->block_group_cache_lock);
211  
212  	return 0;
213  }
214  
215  /*
216   * This will return the block group at or after bytenr if contains is 0, else
217   * it will return the block group that contains the bytenr
218   */
block_group_cache_tree_search(struct btrfs_fs_info * info,u64 bytenr,int contains)219  static struct btrfs_block_group *block_group_cache_tree_search(
220  		struct btrfs_fs_info *info, u64 bytenr, int contains)
221  {
222  	struct btrfs_block_group *cache, *ret = NULL;
223  	struct rb_node *n;
224  	u64 end, start;
225  
226  	read_lock(&info->block_group_cache_lock);
227  	n = info->block_group_cache_tree.rb_root.rb_node;
228  
229  	while (n) {
230  		cache = rb_entry(n, struct btrfs_block_group, cache_node);
231  		end = cache->start + cache->length - 1;
232  		start = cache->start;
233  
234  		if (bytenr < start) {
235  			if (!contains && (!ret || start < ret->start))
236  				ret = cache;
237  			n = n->rb_left;
238  		} else if (bytenr > start) {
239  			if (contains && bytenr <= end) {
240  				ret = cache;
241  				break;
242  			}
243  			n = n->rb_right;
244  		} else {
245  			ret = cache;
246  			break;
247  		}
248  	}
249  	if (ret)
250  		btrfs_get_block_group(ret);
251  	read_unlock(&info->block_group_cache_lock);
252  
253  	return ret;
254  }
255  
256  /*
257   * Return the block group that starts at or after bytenr
258   */
btrfs_lookup_first_block_group(struct btrfs_fs_info * info,u64 bytenr)259  struct btrfs_block_group *btrfs_lookup_first_block_group(
260  		struct btrfs_fs_info *info, u64 bytenr)
261  {
262  	return block_group_cache_tree_search(info, bytenr, 0);
263  }
264  
265  /*
266   * Return the block group that contains the given bytenr
267   */
btrfs_lookup_block_group(struct btrfs_fs_info * info,u64 bytenr)268  struct btrfs_block_group *btrfs_lookup_block_group(
269  		struct btrfs_fs_info *info, u64 bytenr)
270  {
271  	return block_group_cache_tree_search(info, bytenr, 1);
272  }
273  
btrfs_next_block_group(struct btrfs_block_group * cache)274  struct btrfs_block_group *btrfs_next_block_group(
275  		struct btrfs_block_group *cache)
276  {
277  	struct btrfs_fs_info *fs_info = cache->fs_info;
278  	struct rb_node *node;
279  
280  	read_lock(&fs_info->block_group_cache_lock);
281  
282  	/* If our block group was removed, we need a full search. */
283  	if (RB_EMPTY_NODE(&cache->cache_node)) {
284  		const u64 next_bytenr = cache->start + cache->length;
285  
286  		read_unlock(&fs_info->block_group_cache_lock);
287  		btrfs_put_block_group(cache);
288  		return btrfs_lookup_first_block_group(fs_info, next_bytenr);
289  	}
290  	node = rb_next(&cache->cache_node);
291  	btrfs_put_block_group(cache);
292  	if (node) {
293  		cache = rb_entry(node, struct btrfs_block_group, cache_node);
294  		btrfs_get_block_group(cache);
295  	} else
296  		cache = NULL;
297  	read_unlock(&fs_info->block_group_cache_lock);
298  	return cache;
299  }
300  
301  /*
302   * Check if we can do a NOCOW write for a given extent.
303   *
304   * @fs_info:       The filesystem information object.
305   * @bytenr:        Logical start address of the extent.
306   *
307   * Check if we can do a NOCOW write for the given extent, and increments the
308   * number of NOCOW writers in the block group that contains the extent, as long
309   * as the block group exists and it's currently not in read-only mode.
310   *
311   * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
312   *          is responsible for calling btrfs_dec_nocow_writers() later.
313   *
314   *          Or NULL if we can not do a NOCOW write
315   */
btrfs_inc_nocow_writers(struct btrfs_fs_info * fs_info,u64 bytenr)316  struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
317  						  u64 bytenr)
318  {
319  	struct btrfs_block_group *bg;
320  	bool can_nocow = true;
321  
322  	bg = btrfs_lookup_block_group(fs_info, bytenr);
323  	if (!bg)
324  		return NULL;
325  
326  	spin_lock(&bg->lock);
327  	if (bg->ro)
328  		can_nocow = false;
329  	else
330  		atomic_inc(&bg->nocow_writers);
331  	spin_unlock(&bg->lock);
332  
333  	if (!can_nocow) {
334  		btrfs_put_block_group(bg);
335  		return NULL;
336  	}
337  
338  	/* No put on block group, done by btrfs_dec_nocow_writers(). */
339  	return bg;
340  }
341  
342  /*
343   * Decrement the number of NOCOW writers in a block group.
344   *
345   * This is meant to be called after a previous call to btrfs_inc_nocow_writers(),
346   * and on the block group returned by that call. Typically this is called after
347   * creating an ordered extent for a NOCOW write, to prevent races with scrub and
348   * relocation.
349   *
350   * After this call, the caller should not use the block group anymore. It it wants
351   * to use it, then it should get a reference on it before calling this function.
352   */
btrfs_dec_nocow_writers(struct btrfs_block_group * bg)353  void btrfs_dec_nocow_writers(struct btrfs_block_group *bg)
354  {
355  	if (atomic_dec_and_test(&bg->nocow_writers))
356  		wake_up_var(&bg->nocow_writers);
357  
358  	/* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */
359  	btrfs_put_block_group(bg);
360  }
361  
btrfs_wait_nocow_writers(struct btrfs_block_group * bg)362  void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
363  {
364  	wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
365  }
366  
btrfs_dec_block_group_reservations(struct btrfs_fs_info * fs_info,const u64 start)367  void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
368  					const u64 start)
369  {
370  	struct btrfs_block_group *bg;
371  
372  	bg = btrfs_lookup_block_group(fs_info, start);
373  	ASSERT(bg);
374  	if (atomic_dec_and_test(&bg->reservations))
375  		wake_up_var(&bg->reservations);
376  	btrfs_put_block_group(bg);
377  }
378  
btrfs_wait_block_group_reservations(struct btrfs_block_group * bg)379  void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
380  {
381  	struct btrfs_space_info *space_info = bg->space_info;
382  
383  	ASSERT(bg->ro);
384  
385  	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
386  		return;
387  
388  	/*
389  	 * Our block group is read only but before we set it to read only,
390  	 * some task might have had allocated an extent from it already, but it
391  	 * has not yet created a respective ordered extent (and added it to a
392  	 * root's list of ordered extents).
393  	 * Therefore wait for any task currently allocating extents, since the
394  	 * block group's reservations counter is incremented while a read lock
395  	 * on the groups' semaphore is held and decremented after releasing
396  	 * the read access on that semaphore and creating the ordered extent.
397  	 */
398  	down_write(&space_info->groups_sem);
399  	up_write(&space_info->groups_sem);
400  
401  	wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
402  }
403  
btrfs_get_caching_control(struct btrfs_block_group * cache)404  struct btrfs_caching_control *btrfs_get_caching_control(
405  		struct btrfs_block_group *cache)
406  {
407  	struct btrfs_caching_control *ctl;
408  
409  	spin_lock(&cache->lock);
410  	if (!cache->caching_ctl) {
411  		spin_unlock(&cache->lock);
412  		return NULL;
413  	}
414  
415  	ctl = cache->caching_ctl;
416  	refcount_inc(&ctl->count);
417  	spin_unlock(&cache->lock);
418  	return ctl;
419  }
420  
btrfs_put_caching_control(struct btrfs_caching_control * ctl)421  static void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
422  {
423  	if (refcount_dec_and_test(&ctl->count))
424  		kfree(ctl);
425  }
426  
427  /*
428   * When we wait for progress in the block group caching, its because our
429   * allocation attempt failed at least once.  So, we must sleep and let some
430   * progress happen before we try again.
431   *
432   * This function will sleep at least once waiting for new free space to show
433   * up, and then it will check the block group free space numbers for our min
434   * num_bytes.  Another option is to have it go ahead and look in the rbtree for
435   * a free extent of a given size, but this is a good start.
436   *
437   * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
438   * any of the information in this block group.
439   */
btrfs_wait_block_group_cache_progress(struct btrfs_block_group * cache,u64 num_bytes)440  void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
441  					   u64 num_bytes)
442  {
443  	struct btrfs_caching_control *caching_ctl;
444  	int progress;
445  
446  	caching_ctl = btrfs_get_caching_control(cache);
447  	if (!caching_ctl)
448  		return;
449  
450  	/*
451  	 * We've already failed to allocate from this block group, so even if
452  	 * there's enough space in the block group it isn't contiguous enough to
453  	 * allow for an allocation, so wait for at least the next wakeup tick,
454  	 * or for the thing to be done.
455  	 */
456  	progress = atomic_read(&caching_ctl->progress);
457  
458  	wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
459  		   (progress != atomic_read(&caching_ctl->progress) &&
460  		    (cache->free_space_ctl->free_space >= num_bytes)));
461  
462  	btrfs_put_caching_control(caching_ctl);
463  }
464  
btrfs_caching_ctl_wait_done(struct btrfs_block_group * cache,struct btrfs_caching_control * caching_ctl)465  static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
466  				       struct btrfs_caching_control *caching_ctl)
467  {
468  	wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
469  	return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
470  }
471  
btrfs_wait_block_group_cache_done(struct btrfs_block_group * cache)472  static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
473  {
474  	struct btrfs_caching_control *caching_ctl;
475  	int ret;
476  
477  	caching_ctl = btrfs_get_caching_control(cache);
478  	if (!caching_ctl)
479  		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
480  	ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
481  	btrfs_put_caching_control(caching_ctl);
482  	return ret;
483  }
484  
485  #ifdef CONFIG_BTRFS_DEBUG
fragment_free_space(struct btrfs_block_group * block_group)486  static void fragment_free_space(struct btrfs_block_group *block_group)
487  {
488  	struct btrfs_fs_info *fs_info = block_group->fs_info;
489  	u64 start = block_group->start;
490  	u64 len = block_group->length;
491  	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
492  		fs_info->nodesize : fs_info->sectorsize;
493  	u64 step = chunk << 1;
494  
495  	while (len > chunk) {
496  		btrfs_remove_free_space(block_group, start, chunk);
497  		start += step;
498  		if (len < step)
499  			len = 0;
500  		else
501  			len -= step;
502  	}
503  }
504  #endif
505  
506  /*
507   * Add a free space range to the in memory free space cache of a block group.
508   * This checks if the range contains super block locations and any such
509   * locations are not added to the free space cache.
510   *
511   * @block_group:      The target block group.
512   * @start:            Start offset of the range.
513   * @end:              End offset of the range (exclusive).
514   * @total_added_ret:  Optional pointer to return the total amount of space
515   *                    added to the block group's free space cache.
516   *
517   * Returns 0 on success or < 0 on error.
518   */
btrfs_add_new_free_space(struct btrfs_block_group * block_group,u64 start,u64 end,u64 * total_added_ret)519  int btrfs_add_new_free_space(struct btrfs_block_group *block_group, u64 start,
520  			     u64 end, u64 *total_added_ret)
521  {
522  	struct btrfs_fs_info *info = block_group->fs_info;
523  	u64 extent_start, extent_end, size;
524  	int ret;
525  
526  	if (total_added_ret)
527  		*total_added_ret = 0;
528  
529  	while (start < end) {
530  		if (!find_first_extent_bit(&info->excluded_extents, start,
531  					   &extent_start, &extent_end,
532  					   EXTENT_DIRTY | EXTENT_UPTODATE,
533  					   NULL))
534  			break;
535  
536  		if (extent_start <= start) {
537  			start = extent_end + 1;
538  		} else if (extent_start > start && extent_start < end) {
539  			size = extent_start - start;
540  			ret = btrfs_add_free_space_async_trimmed(block_group,
541  								 start, size);
542  			if (ret)
543  				return ret;
544  			if (total_added_ret)
545  				*total_added_ret += size;
546  			start = extent_end + 1;
547  		} else {
548  			break;
549  		}
550  	}
551  
552  	if (start < end) {
553  		size = end - start;
554  		ret = btrfs_add_free_space_async_trimmed(block_group, start,
555  							 size);
556  		if (ret)
557  			return ret;
558  		if (total_added_ret)
559  			*total_added_ret += size;
560  	}
561  
562  	return 0;
563  }
564  
565  /*
566   * Get an arbitrary extent item index / max_index through the block group
567   *
568   * @block_group   the block group to sample from
569   * @index:        the integral step through the block group to grab from
570   * @max_index:    the granularity of the sampling
571   * @key:          return value parameter for the item we find
572   *
573   * Pre-conditions on indices:
574   * 0 <= index <= max_index
575   * 0 < max_index
576   *
577   * Returns: 0 on success, 1 if the search didn't yield a useful item, negative
578   * error code on error.
579   */
sample_block_group_extent_item(struct btrfs_caching_control * caching_ctl,struct btrfs_block_group * block_group,int index,int max_index,struct btrfs_key * found_key)580  static int sample_block_group_extent_item(struct btrfs_caching_control *caching_ctl,
581  					  struct btrfs_block_group *block_group,
582  					  int index, int max_index,
583  					  struct btrfs_key *found_key)
584  {
585  	struct btrfs_fs_info *fs_info = block_group->fs_info;
586  	struct btrfs_root *extent_root;
587  	u64 search_offset;
588  	u64 search_end = block_group->start + block_group->length;
589  	struct btrfs_path *path;
590  	struct btrfs_key search_key;
591  	int ret = 0;
592  
593  	ASSERT(index >= 0);
594  	ASSERT(index <= max_index);
595  	ASSERT(max_index > 0);
596  	lockdep_assert_held(&caching_ctl->mutex);
597  	lockdep_assert_held_read(&fs_info->commit_root_sem);
598  
599  	path = btrfs_alloc_path();
600  	if (!path)
601  		return -ENOMEM;
602  
603  	extent_root = btrfs_extent_root(fs_info, max_t(u64, block_group->start,
604  						       BTRFS_SUPER_INFO_OFFSET));
605  
606  	path->skip_locking = 1;
607  	path->search_commit_root = 1;
608  	path->reada = READA_FORWARD;
609  
610  	search_offset = index * div_u64(block_group->length, max_index);
611  	search_key.objectid = block_group->start + search_offset;
612  	search_key.type = BTRFS_EXTENT_ITEM_KEY;
613  	search_key.offset = 0;
614  
615  	btrfs_for_each_slot(extent_root, &search_key, found_key, path, ret) {
616  		/* Success; sampled an extent item in the block group */
617  		if (found_key->type == BTRFS_EXTENT_ITEM_KEY &&
618  		    found_key->objectid >= block_group->start &&
619  		    found_key->objectid + found_key->offset <= search_end)
620  			break;
621  
622  		/* We can't possibly find a valid extent item anymore */
623  		if (found_key->objectid >= search_end) {
624  			ret = 1;
625  			break;
626  		}
627  	}
628  
629  	lockdep_assert_held(&caching_ctl->mutex);
630  	lockdep_assert_held_read(&fs_info->commit_root_sem);
631  	btrfs_free_path(path);
632  	return ret;
633  }
634  
635  /*
636   * Best effort attempt to compute a block group's size class while caching it.
637   *
638   * @block_group: the block group we are caching
639   *
640   * We cannot infer the size class while adding free space extents, because that
641   * logic doesn't care about contiguous file extents (it doesn't differentiate
642   * between a 100M extent and 100 contiguous 1M extents). So we need to read the
643   * file extent items. Reading all of them is quite wasteful, because usually
644   * only a handful are enough to give a good answer. Therefore, we just grab 5 of
645   * them at even steps through the block group and pick the smallest size class
646   * we see. Since size class is best effort, and not guaranteed in general,
647   * inaccuracy is acceptable.
648   *
649   * To be more explicit about why this algorithm makes sense:
650   *
651   * If we are caching in a block group from disk, then there are three major cases
652   * to consider:
653   * 1. the block group is well behaved and all extents in it are the same size
654   *    class.
655   * 2. the block group is mostly one size class with rare exceptions for last
656   *    ditch allocations
657   * 3. the block group was populated before size classes and can have a totally
658   *    arbitrary mix of size classes.
659   *
660   * In case 1, looking at any extent in the block group will yield the correct
661   * result. For the mixed cases, taking the minimum size class seems like a good
662   * approximation, since gaps from frees will be usable to the size class. For
663   * 2., a small handful of file extents is likely to yield the right answer. For
664   * 3, we can either read every file extent, or admit that this is best effort
665   * anyway and try to stay fast.
666   *
667   * Returns: 0 on success, negative error code on error.
668   */
load_block_group_size_class(struct btrfs_caching_control * caching_ctl,struct btrfs_block_group * block_group)669  static int load_block_group_size_class(struct btrfs_caching_control *caching_ctl,
670  				       struct btrfs_block_group *block_group)
671  {
672  	struct btrfs_fs_info *fs_info = block_group->fs_info;
673  	struct btrfs_key key;
674  	int i;
675  	u64 min_size = block_group->length;
676  	enum btrfs_block_group_size_class size_class = BTRFS_BG_SZ_NONE;
677  	int ret;
678  
679  	if (!btrfs_block_group_should_use_size_class(block_group))
680  		return 0;
681  
682  	lockdep_assert_held(&caching_ctl->mutex);
683  	lockdep_assert_held_read(&fs_info->commit_root_sem);
684  	for (i = 0; i < 5; ++i) {
685  		ret = sample_block_group_extent_item(caching_ctl, block_group, i, 5, &key);
686  		if (ret < 0)
687  			goto out;
688  		if (ret > 0)
689  			continue;
690  		min_size = min_t(u64, min_size, key.offset);
691  		size_class = btrfs_calc_block_group_size_class(min_size);
692  	}
693  	if (size_class != BTRFS_BG_SZ_NONE) {
694  		spin_lock(&block_group->lock);
695  		block_group->size_class = size_class;
696  		spin_unlock(&block_group->lock);
697  	}
698  out:
699  	return ret;
700  }
701  
load_extent_tree_free(struct btrfs_caching_control * caching_ctl)702  static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
703  {
704  	struct btrfs_block_group *block_group = caching_ctl->block_group;
705  	struct btrfs_fs_info *fs_info = block_group->fs_info;
706  	struct btrfs_root *extent_root;
707  	struct btrfs_path *path;
708  	struct extent_buffer *leaf;
709  	struct btrfs_key key;
710  	u64 total_found = 0;
711  	u64 last = 0;
712  	u32 nritems;
713  	int ret;
714  	bool wakeup = true;
715  
716  	path = btrfs_alloc_path();
717  	if (!path)
718  		return -ENOMEM;
719  
720  	last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
721  	extent_root = btrfs_extent_root(fs_info, last);
722  
723  #ifdef CONFIG_BTRFS_DEBUG
724  	/*
725  	 * If we're fragmenting we don't want to make anybody think we can
726  	 * allocate from this block group until we've had a chance to fragment
727  	 * the free space.
728  	 */
729  	if (btrfs_should_fragment_free_space(block_group))
730  		wakeup = false;
731  #endif
732  	/*
733  	 * We don't want to deadlock with somebody trying to allocate a new
734  	 * extent for the extent root while also trying to search the extent
735  	 * root to add free space.  So we skip locking and search the commit
736  	 * root, since its read-only
737  	 */
738  	path->skip_locking = 1;
739  	path->search_commit_root = 1;
740  	path->reada = READA_FORWARD;
741  
742  	key.objectid = last;
743  	key.offset = 0;
744  	key.type = BTRFS_EXTENT_ITEM_KEY;
745  
746  next:
747  	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
748  	if (ret < 0)
749  		goto out;
750  
751  	leaf = path->nodes[0];
752  	nritems = btrfs_header_nritems(leaf);
753  
754  	while (1) {
755  		if (btrfs_fs_closing(fs_info) > 1) {
756  			last = (u64)-1;
757  			break;
758  		}
759  
760  		if (path->slots[0] < nritems) {
761  			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
762  		} else {
763  			ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
764  			if (ret)
765  				break;
766  
767  			if (need_resched() ||
768  			    rwsem_is_contended(&fs_info->commit_root_sem)) {
769  				btrfs_release_path(path);
770  				up_read(&fs_info->commit_root_sem);
771  				mutex_unlock(&caching_ctl->mutex);
772  				cond_resched();
773  				mutex_lock(&caching_ctl->mutex);
774  				down_read(&fs_info->commit_root_sem);
775  				goto next;
776  			}
777  
778  			ret = btrfs_next_leaf(extent_root, path);
779  			if (ret < 0)
780  				goto out;
781  			if (ret)
782  				break;
783  			leaf = path->nodes[0];
784  			nritems = btrfs_header_nritems(leaf);
785  			continue;
786  		}
787  
788  		if (key.objectid < last) {
789  			key.objectid = last;
790  			key.offset = 0;
791  			key.type = BTRFS_EXTENT_ITEM_KEY;
792  			btrfs_release_path(path);
793  			goto next;
794  		}
795  
796  		if (key.objectid < block_group->start) {
797  			path->slots[0]++;
798  			continue;
799  		}
800  
801  		if (key.objectid >= block_group->start + block_group->length)
802  			break;
803  
804  		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
805  		    key.type == BTRFS_METADATA_ITEM_KEY) {
806  			u64 space_added;
807  
808  			ret = btrfs_add_new_free_space(block_group, last,
809  						       key.objectid, &space_added);
810  			if (ret)
811  				goto out;
812  			total_found += space_added;
813  			if (key.type == BTRFS_METADATA_ITEM_KEY)
814  				last = key.objectid +
815  					fs_info->nodesize;
816  			else
817  				last = key.objectid + key.offset;
818  
819  			if (total_found > CACHING_CTL_WAKE_UP) {
820  				total_found = 0;
821  				if (wakeup) {
822  					atomic_inc(&caching_ctl->progress);
823  					wake_up(&caching_ctl->wait);
824  				}
825  			}
826  		}
827  		path->slots[0]++;
828  	}
829  
830  	ret = btrfs_add_new_free_space(block_group, last,
831  				       block_group->start + block_group->length,
832  				       NULL);
833  out:
834  	btrfs_free_path(path);
835  	return ret;
836  }
837  
btrfs_free_excluded_extents(const struct btrfs_block_group * bg)838  static inline void btrfs_free_excluded_extents(const struct btrfs_block_group *bg)
839  {
840  	clear_extent_bits(&bg->fs_info->excluded_extents, bg->start,
841  			  bg->start + bg->length - 1, EXTENT_UPTODATE);
842  }
843  
caching_thread(struct btrfs_work * work)844  static noinline void caching_thread(struct btrfs_work *work)
845  {
846  	struct btrfs_block_group *block_group;
847  	struct btrfs_fs_info *fs_info;
848  	struct btrfs_caching_control *caching_ctl;
849  	int ret;
850  
851  	caching_ctl = container_of(work, struct btrfs_caching_control, work);
852  	block_group = caching_ctl->block_group;
853  	fs_info = block_group->fs_info;
854  
855  	mutex_lock(&caching_ctl->mutex);
856  	down_read(&fs_info->commit_root_sem);
857  
858  	load_block_group_size_class(caching_ctl, block_group);
859  	if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
860  		ret = load_free_space_cache(block_group);
861  		if (ret == 1) {
862  			ret = 0;
863  			goto done;
864  		}
865  
866  		/*
867  		 * We failed to load the space cache, set ourselves to
868  		 * CACHE_STARTED and carry on.
869  		 */
870  		spin_lock(&block_group->lock);
871  		block_group->cached = BTRFS_CACHE_STARTED;
872  		spin_unlock(&block_group->lock);
873  		wake_up(&caching_ctl->wait);
874  	}
875  
876  	/*
877  	 * If we are in the transaction that populated the free space tree we
878  	 * can't actually cache from the free space tree as our commit root and
879  	 * real root are the same, so we could change the contents of the blocks
880  	 * while caching.  Instead do the slow caching in this case, and after
881  	 * the transaction has committed we will be safe.
882  	 */
883  	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
884  	    !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
885  		ret = load_free_space_tree(caching_ctl);
886  	else
887  		ret = load_extent_tree_free(caching_ctl);
888  done:
889  	spin_lock(&block_group->lock);
890  	block_group->caching_ctl = NULL;
891  	block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
892  	spin_unlock(&block_group->lock);
893  
894  #ifdef CONFIG_BTRFS_DEBUG
895  	if (btrfs_should_fragment_free_space(block_group)) {
896  		u64 bytes_used;
897  
898  		spin_lock(&block_group->space_info->lock);
899  		spin_lock(&block_group->lock);
900  		bytes_used = block_group->length - block_group->used;
901  		block_group->space_info->bytes_used += bytes_used >> 1;
902  		spin_unlock(&block_group->lock);
903  		spin_unlock(&block_group->space_info->lock);
904  		fragment_free_space(block_group);
905  	}
906  #endif
907  
908  	up_read(&fs_info->commit_root_sem);
909  	btrfs_free_excluded_extents(block_group);
910  	mutex_unlock(&caching_ctl->mutex);
911  
912  	wake_up(&caching_ctl->wait);
913  
914  	btrfs_put_caching_control(caching_ctl);
915  	btrfs_put_block_group(block_group);
916  }
917  
btrfs_cache_block_group(struct btrfs_block_group * cache,bool wait)918  int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
919  {
920  	struct btrfs_fs_info *fs_info = cache->fs_info;
921  	struct btrfs_caching_control *caching_ctl = NULL;
922  	int ret = 0;
923  
924  	/* Allocator for zoned filesystems does not use the cache at all */
925  	if (btrfs_is_zoned(fs_info))
926  		return 0;
927  
928  	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
929  	if (!caching_ctl)
930  		return -ENOMEM;
931  
932  	INIT_LIST_HEAD(&caching_ctl->list);
933  	mutex_init(&caching_ctl->mutex);
934  	init_waitqueue_head(&caching_ctl->wait);
935  	caching_ctl->block_group = cache;
936  	refcount_set(&caching_ctl->count, 2);
937  	atomic_set(&caching_ctl->progress, 0);
938  	btrfs_init_work(&caching_ctl->work, caching_thread, NULL);
939  
940  	spin_lock(&cache->lock);
941  	if (cache->cached != BTRFS_CACHE_NO) {
942  		kfree(caching_ctl);
943  
944  		caching_ctl = cache->caching_ctl;
945  		if (caching_ctl)
946  			refcount_inc(&caching_ctl->count);
947  		spin_unlock(&cache->lock);
948  		goto out;
949  	}
950  	WARN_ON(cache->caching_ctl);
951  	cache->caching_ctl = caching_ctl;
952  	cache->cached = BTRFS_CACHE_STARTED;
953  	spin_unlock(&cache->lock);
954  
955  	write_lock(&fs_info->block_group_cache_lock);
956  	refcount_inc(&caching_ctl->count);
957  	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
958  	write_unlock(&fs_info->block_group_cache_lock);
959  
960  	btrfs_get_block_group(cache);
961  
962  	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
963  out:
964  	if (wait && caching_ctl)
965  		ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
966  	if (caching_ctl)
967  		btrfs_put_caching_control(caching_ctl);
968  
969  	return ret;
970  }
971  
clear_avail_alloc_bits(struct btrfs_fs_info * fs_info,u64 flags)972  static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
973  {
974  	u64 extra_flags = chunk_to_extended(flags) &
975  				BTRFS_EXTENDED_PROFILE_MASK;
976  
977  	write_seqlock(&fs_info->profiles_lock);
978  	if (flags & BTRFS_BLOCK_GROUP_DATA)
979  		fs_info->avail_data_alloc_bits &= ~extra_flags;
980  	if (flags & BTRFS_BLOCK_GROUP_METADATA)
981  		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
982  	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
983  		fs_info->avail_system_alloc_bits &= ~extra_flags;
984  	write_sequnlock(&fs_info->profiles_lock);
985  }
986  
987  /*
988   * Clear incompat bits for the following feature(s):
989   *
990   * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
991   *            in the whole filesystem
992   *
993   * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
994   */
clear_incompat_bg_bits(struct btrfs_fs_info * fs_info,u64 flags)995  static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
996  {
997  	bool found_raid56 = false;
998  	bool found_raid1c34 = false;
999  
1000  	if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
1001  	    (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
1002  	    (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
1003  		struct list_head *head = &fs_info->space_info;
1004  		struct btrfs_space_info *sinfo;
1005  
1006  		list_for_each_entry_rcu(sinfo, head, list) {
1007  			down_read(&sinfo->groups_sem);
1008  			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
1009  				found_raid56 = true;
1010  			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
1011  				found_raid56 = true;
1012  			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
1013  				found_raid1c34 = true;
1014  			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
1015  				found_raid1c34 = true;
1016  			up_read(&sinfo->groups_sem);
1017  		}
1018  		if (!found_raid56)
1019  			btrfs_clear_fs_incompat(fs_info, RAID56);
1020  		if (!found_raid1c34)
1021  			btrfs_clear_fs_incompat(fs_info, RAID1C34);
1022  	}
1023  }
1024  
btrfs_block_group_root(struct btrfs_fs_info * fs_info)1025  static struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
1026  {
1027  	if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
1028  		return fs_info->block_group_root;
1029  	return btrfs_extent_root(fs_info, 0);
1030  }
1031  
remove_block_group_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_block_group * block_group)1032  static int remove_block_group_item(struct btrfs_trans_handle *trans,
1033  				   struct btrfs_path *path,
1034  				   struct btrfs_block_group *block_group)
1035  {
1036  	struct btrfs_fs_info *fs_info = trans->fs_info;
1037  	struct btrfs_root *root;
1038  	struct btrfs_key key;
1039  	int ret;
1040  
1041  	root = btrfs_block_group_root(fs_info);
1042  	key.objectid = block_group->start;
1043  	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
1044  	key.offset = block_group->length;
1045  
1046  	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1047  	if (ret > 0)
1048  		ret = -ENOENT;
1049  	if (ret < 0)
1050  		return ret;
1051  
1052  	ret = btrfs_del_item(trans, root, path);
1053  	return ret;
1054  }
1055  
btrfs_remove_block_group(struct btrfs_trans_handle * trans,struct btrfs_chunk_map * map)1056  int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1057  			     struct btrfs_chunk_map *map)
1058  {
1059  	struct btrfs_fs_info *fs_info = trans->fs_info;
1060  	struct btrfs_path *path;
1061  	struct btrfs_block_group *block_group;
1062  	struct btrfs_free_cluster *cluster;
1063  	struct inode *inode;
1064  	struct kobject *kobj = NULL;
1065  	int ret;
1066  	int index;
1067  	int factor;
1068  	struct btrfs_caching_control *caching_ctl = NULL;
1069  	bool remove_map;
1070  	bool remove_rsv = false;
1071  
1072  	block_group = btrfs_lookup_block_group(fs_info, map->start);
1073  	if (!block_group)
1074  		return -ENOENT;
1075  
1076  	BUG_ON(!block_group->ro);
1077  
1078  	trace_btrfs_remove_block_group(block_group);
1079  	/*
1080  	 * Free the reserved super bytes from this block group before
1081  	 * remove it.
1082  	 */
1083  	btrfs_free_excluded_extents(block_group);
1084  	btrfs_free_ref_tree_range(fs_info, block_group->start,
1085  				  block_group->length);
1086  
1087  	index = btrfs_bg_flags_to_raid_index(block_group->flags);
1088  	factor = btrfs_bg_type_to_factor(block_group->flags);
1089  
1090  	/* make sure this block group isn't part of an allocation cluster */
1091  	cluster = &fs_info->data_alloc_cluster;
1092  	spin_lock(&cluster->refill_lock);
1093  	btrfs_return_cluster_to_free_space(block_group, cluster);
1094  	spin_unlock(&cluster->refill_lock);
1095  
1096  	/*
1097  	 * make sure this block group isn't part of a metadata
1098  	 * allocation cluster
1099  	 */
1100  	cluster = &fs_info->meta_alloc_cluster;
1101  	spin_lock(&cluster->refill_lock);
1102  	btrfs_return_cluster_to_free_space(block_group, cluster);
1103  	spin_unlock(&cluster->refill_lock);
1104  
1105  	btrfs_clear_treelog_bg(block_group);
1106  	btrfs_clear_data_reloc_bg(block_group);
1107  
1108  	path = btrfs_alloc_path();
1109  	if (!path) {
1110  		ret = -ENOMEM;
1111  		goto out;
1112  	}
1113  
1114  	/*
1115  	 * get the inode first so any iput calls done for the io_list
1116  	 * aren't the final iput (no unlinks allowed now)
1117  	 */
1118  	inode = lookup_free_space_inode(block_group, path);
1119  
1120  	mutex_lock(&trans->transaction->cache_write_mutex);
1121  	/*
1122  	 * Make sure our free space cache IO is done before removing the
1123  	 * free space inode
1124  	 */
1125  	spin_lock(&trans->transaction->dirty_bgs_lock);
1126  	if (!list_empty(&block_group->io_list)) {
1127  		list_del_init(&block_group->io_list);
1128  
1129  		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
1130  
1131  		spin_unlock(&trans->transaction->dirty_bgs_lock);
1132  		btrfs_wait_cache_io(trans, block_group, path);
1133  		btrfs_put_block_group(block_group);
1134  		spin_lock(&trans->transaction->dirty_bgs_lock);
1135  	}
1136  
1137  	if (!list_empty(&block_group->dirty_list)) {
1138  		list_del_init(&block_group->dirty_list);
1139  		remove_rsv = true;
1140  		btrfs_put_block_group(block_group);
1141  	}
1142  	spin_unlock(&trans->transaction->dirty_bgs_lock);
1143  	mutex_unlock(&trans->transaction->cache_write_mutex);
1144  
1145  	ret = btrfs_remove_free_space_inode(trans, inode, block_group);
1146  	if (ret)
1147  		goto out;
1148  
1149  	write_lock(&fs_info->block_group_cache_lock);
1150  	rb_erase_cached(&block_group->cache_node,
1151  			&fs_info->block_group_cache_tree);
1152  	RB_CLEAR_NODE(&block_group->cache_node);
1153  
1154  	/* Once for the block groups rbtree */
1155  	btrfs_put_block_group(block_group);
1156  
1157  	write_unlock(&fs_info->block_group_cache_lock);
1158  
1159  	down_write(&block_group->space_info->groups_sem);
1160  	/*
1161  	 * we must use list_del_init so people can check to see if they
1162  	 * are still on the list after taking the semaphore
1163  	 */
1164  	list_del_init(&block_group->list);
1165  	if (list_empty(&block_group->space_info->block_groups[index])) {
1166  		kobj = block_group->space_info->block_group_kobjs[index];
1167  		block_group->space_info->block_group_kobjs[index] = NULL;
1168  		clear_avail_alloc_bits(fs_info, block_group->flags);
1169  	}
1170  	up_write(&block_group->space_info->groups_sem);
1171  	clear_incompat_bg_bits(fs_info, block_group->flags);
1172  	if (kobj) {
1173  		kobject_del(kobj);
1174  		kobject_put(kobj);
1175  	}
1176  
1177  	if (block_group->cached == BTRFS_CACHE_STARTED)
1178  		btrfs_wait_block_group_cache_done(block_group);
1179  
1180  	write_lock(&fs_info->block_group_cache_lock);
1181  	caching_ctl = btrfs_get_caching_control(block_group);
1182  	if (!caching_ctl) {
1183  		struct btrfs_caching_control *ctl;
1184  
1185  		list_for_each_entry(ctl, &fs_info->caching_block_groups, list) {
1186  			if (ctl->block_group == block_group) {
1187  				caching_ctl = ctl;
1188  				refcount_inc(&caching_ctl->count);
1189  				break;
1190  			}
1191  		}
1192  	}
1193  	if (caching_ctl)
1194  		list_del_init(&caching_ctl->list);
1195  	write_unlock(&fs_info->block_group_cache_lock);
1196  
1197  	if (caching_ctl) {
1198  		/* Once for the caching bgs list and once for us. */
1199  		btrfs_put_caching_control(caching_ctl);
1200  		btrfs_put_caching_control(caching_ctl);
1201  	}
1202  
1203  	spin_lock(&trans->transaction->dirty_bgs_lock);
1204  	WARN_ON(!list_empty(&block_group->dirty_list));
1205  	WARN_ON(!list_empty(&block_group->io_list));
1206  	spin_unlock(&trans->transaction->dirty_bgs_lock);
1207  
1208  	btrfs_remove_free_space_cache(block_group);
1209  
1210  	spin_lock(&block_group->space_info->lock);
1211  	list_del_init(&block_group->ro_list);
1212  
1213  	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1214  		WARN_ON(block_group->space_info->total_bytes
1215  			< block_group->length);
1216  		WARN_ON(block_group->space_info->bytes_readonly
1217  			< block_group->length - block_group->zone_unusable);
1218  		WARN_ON(block_group->space_info->bytes_zone_unusable
1219  			< block_group->zone_unusable);
1220  		WARN_ON(block_group->space_info->disk_total
1221  			< block_group->length * factor);
1222  	}
1223  	block_group->space_info->total_bytes -= block_group->length;
1224  	block_group->space_info->bytes_readonly -=
1225  		(block_group->length - block_group->zone_unusable);
1226  	btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
1227  						    -block_group->zone_unusable);
1228  	block_group->space_info->disk_total -= block_group->length * factor;
1229  
1230  	spin_unlock(&block_group->space_info->lock);
1231  
1232  	/*
1233  	 * Remove the free space for the block group from the free space tree
1234  	 * and the block group's item from the extent tree before marking the
1235  	 * block group as removed. This is to prevent races with tasks that
1236  	 * freeze and unfreeze a block group, this task and another task
1237  	 * allocating a new block group - the unfreeze task ends up removing
1238  	 * the block group's extent map before the task calling this function
1239  	 * deletes the block group item from the extent tree, allowing for
1240  	 * another task to attempt to create another block group with the same
1241  	 * item key (and failing with -EEXIST and a transaction abort).
1242  	 */
1243  	ret = remove_block_group_free_space(trans, block_group);
1244  	if (ret)
1245  		goto out;
1246  
1247  	ret = remove_block_group_item(trans, path, block_group);
1248  	if (ret < 0)
1249  		goto out;
1250  
1251  	spin_lock(&block_group->lock);
1252  	set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
1253  
1254  	/*
1255  	 * At this point trimming or scrub can't start on this block group,
1256  	 * because we removed the block group from the rbtree
1257  	 * fs_info->block_group_cache_tree so no one can't find it anymore and
1258  	 * even if someone already got this block group before we removed it
1259  	 * from the rbtree, they have already incremented block_group->frozen -
1260  	 * if they didn't, for the trimming case they won't find any free space
1261  	 * entries because we already removed them all when we called
1262  	 * btrfs_remove_free_space_cache().
1263  	 *
1264  	 * And we must not remove the chunk map from the fs_info->mapping_tree
1265  	 * to prevent the same logical address range and physical device space
1266  	 * ranges from being reused for a new block group. This is needed to
1267  	 * avoid races with trimming and scrub.
1268  	 *
1269  	 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1270  	 * completely transactionless, so while it is trimming a range the
1271  	 * currently running transaction might finish and a new one start,
1272  	 * allowing for new block groups to be created that can reuse the same
1273  	 * physical device locations unless we take this special care.
1274  	 *
1275  	 * There may also be an implicit trim operation if the file system
1276  	 * is mounted with -odiscard. The same protections must remain
1277  	 * in place until the extents have been discarded completely when
1278  	 * the transaction commit has completed.
1279  	 */
1280  	remove_map = (atomic_read(&block_group->frozen) == 0);
1281  	spin_unlock(&block_group->lock);
1282  
1283  	if (remove_map)
1284  		btrfs_remove_chunk_map(fs_info, map);
1285  
1286  out:
1287  	/* Once for the lookup reference */
1288  	btrfs_put_block_group(block_group);
1289  	if (remove_rsv)
1290  		btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
1291  	btrfs_free_path(path);
1292  	return ret;
1293  }
1294  
btrfs_start_trans_remove_block_group(struct btrfs_fs_info * fs_info,const u64 chunk_offset)1295  struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1296  		struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1297  {
1298  	struct btrfs_root *root = btrfs_block_group_root(fs_info);
1299  	struct btrfs_chunk_map *map;
1300  	unsigned int num_items;
1301  
1302  	map = btrfs_find_chunk_map(fs_info, chunk_offset, 1);
1303  	ASSERT(map != NULL);
1304  	ASSERT(map->start == chunk_offset);
1305  
1306  	/*
1307  	 * We need to reserve 3 + N units from the metadata space info in order
1308  	 * to remove a block group (done at btrfs_remove_chunk() and at
1309  	 * btrfs_remove_block_group()), which are used for:
1310  	 *
1311  	 * 1 unit for adding the free space inode's orphan (located in the tree
1312  	 * of tree roots).
1313  	 * 1 unit for deleting the block group item (located in the extent
1314  	 * tree).
1315  	 * 1 unit for deleting the free space item (located in tree of tree
1316  	 * roots).
1317  	 * N units for deleting N device extent items corresponding to each
1318  	 * stripe (located in the device tree).
1319  	 *
1320  	 * In order to remove a block group we also need to reserve units in the
1321  	 * system space info in order to update the chunk tree (update one or
1322  	 * more device items and remove one chunk item), but this is done at
1323  	 * btrfs_remove_chunk() through a call to check_system_chunk().
1324  	 */
1325  	num_items = 3 + map->num_stripes;
1326  	btrfs_free_chunk_map(map);
1327  
1328  	return btrfs_start_transaction_fallback_global_rsv(root, num_items);
1329  }
1330  
1331  /*
1332   * Mark block group @cache read-only, so later write won't happen to block
1333   * group @cache.
1334   *
1335   * If @force is not set, this function will only mark the block group readonly
1336   * if we have enough free space (1M) in other metadata/system block groups.
1337   * If @force is not set, this function will mark the block group readonly
1338   * without checking free space.
1339   *
1340   * NOTE: This function doesn't care if other block groups can contain all the
1341   * data in this block group. That check should be done by relocation routine,
1342   * not this function.
1343   */
inc_block_group_ro(struct btrfs_block_group * cache,int force)1344  static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1345  {
1346  	struct btrfs_space_info *sinfo = cache->space_info;
1347  	u64 num_bytes;
1348  	int ret = -ENOSPC;
1349  
1350  	spin_lock(&sinfo->lock);
1351  	spin_lock(&cache->lock);
1352  
1353  	if (cache->swap_extents) {
1354  		ret = -ETXTBSY;
1355  		goto out;
1356  	}
1357  
1358  	if (cache->ro) {
1359  		cache->ro++;
1360  		ret = 0;
1361  		goto out;
1362  	}
1363  
1364  	num_bytes = cache->length - cache->reserved - cache->pinned -
1365  		    cache->bytes_super - cache->zone_unusable - cache->used;
1366  
1367  	/*
1368  	 * Data never overcommits, even in mixed mode, so do just the straight
1369  	 * check of left over space in how much we have allocated.
1370  	 */
1371  	if (force) {
1372  		ret = 0;
1373  	} else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
1374  		u64 sinfo_used = btrfs_space_info_used(sinfo, true);
1375  
1376  		/*
1377  		 * Here we make sure if we mark this bg RO, we still have enough
1378  		 * free space as buffer.
1379  		 */
1380  		if (sinfo_used + num_bytes <= sinfo->total_bytes)
1381  			ret = 0;
1382  	} else {
1383  		/*
1384  		 * We overcommit metadata, so we need to do the
1385  		 * btrfs_can_overcommit check here, and we need to pass in
1386  		 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
1387  		 * leeway to allow us to mark this block group as read only.
1388  		 */
1389  		if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1390  					 BTRFS_RESERVE_NO_FLUSH))
1391  			ret = 0;
1392  	}
1393  
1394  	if (!ret) {
1395  		sinfo->bytes_readonly += num_bytes;
1396  		if (btrfs_is_zoned(cache->fs_info)) {
1397  			/* Migrate zone_unusable bytes to readonly */
1398  			sinfo->bytes_readonly += cache->zone_unusable;
1399  			btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
1400  								    -cache->zone_unusable);
1401  			cache->zone_unusable = 0;
1402  		}
1403  		cache->ro++;
1404  		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1405  	}
1406  out:
1407  	spin_unlock(&cache->lock);
1408  	spin_unlock(&sinfo->lock);
1409  	if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1410  		btrfs_info(cache->fs_info,
1411  			"unable to make block group %llu ro", cache->start);
1412  		btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1413  	}
1414  	return ret;
1415  }
1416  
clean_pinned_extents(struct btrfs_trans_handle * trans,const struct btrfs_block_group * bg)1417  static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
1418  				 const struct btrfs_block_group *bg)
1419  {
1420  	struct btrfs_fs_info *fs_info = trans->fs_info;
1421  	struct btrfs_transaction *prev_trans = NULL;
1422  	const u64 start = bg->start;
1423  	const u64 end = start + bg->length - 1;
1424  	int ret;
1425  
1426  	spin_lock(&fs_info->trans_lock);
1427  	if (trans->transaction->list.prev != &fs_info->trans_list) {
1428  		prev_trans = list_last_entry(&trans->transaction->list,
1429  					     struct btrfs_transaction, list);
1430  		refcount_inc(&prev_trans->use_count);
1431  	}
1432  	spin_unlock(&fs_info->trans_lock);
1433  
1434  	/*
1435  	 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1436  	 * btrfs_finish_extent_commit(). If we are at transaction N, another
1437  	 * task might be running finish_extent_commit() for the previous
1438  	 * transaction N - 1, and have seen a range belonging to the block
1439  	 * group in pinned_extents before we were able to clear the whole block
1440  	 * group range from pinned_extents. This means that task can lookup for
1441  	 * the block group after we unpinned it from pinned_extents and removed
1442  	 * it, leading to an error at unpin_extent_range().
1443  	 */
1444  	mutex_lock(&fs_info->unused_bg_unpin_mutex);
1445  	if (prev_trans) {
1446  		ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
1447  					EXTENT_DIRTY);
1448  		if (ret)
1449  			goto out;
1450  	}
1451  
1452  	ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
1453  				EXTENT_DIRTY);
1454  out:
1455  	mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1456  	if (prev_trans)
1457  		btrfs_put_transaction(prev_trans);
1458  
1459  	return ret == 0;
1460  }
1461  
1462  /*
1463   * Process the unused_bgs list and remove any that don't have any allocated
1464   * space inside of them.
1465   */
btrfs_delete_unused_bgs(struct btrfs_fs_info * fs_info)1466  void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1467  {
1468  	LIST_HEAD(retry_list);
1469  	struct btrfs_block_group *block_group;
1470  	struct btrfs_space_info *space_info;
1471  	struct btrfs_trans_handle *trans;
1472  	const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
1473  	int ret = 0;
1474  
1475  	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1476  		return;
1477  
1478  	if (btrfs_fs_closing(fs_info))
1479  		return;
1480  
1481  	/*
1482  	 * Long running balances can keep us blocked here for eternity, so
1483  	 * simply skip deletion if we're unable to get the mutex.
1484  	 */
1485  	if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
1486  		return;
1487  
1488  	spin_lock(&fs_info->unused_bgs_lock);
1489  	while (!list_empty(&fs_info->unused_bgs)) {
1490  		u64 used;
1491  		int trimming;
1492  
1493  		block_group = list_first_entry(&fs_info->unused_bgs,
1494  					       struct btrfs_block_group,
1495  					       bg_list);
1496  		list_del_init(&block_group->bg_list);
1497  
1498  		space_info = block_group->space_info;
1499  
1500  		if (ret || btrfs_mixed_space_info(space_info)) {
1501  			btrfs_put_block_group(block_group);
1502  			continue;
1503  		}
1504  		spin_unlock(&fs_info->unused_bgs_lock);
1505  
1506  		btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1507  
1508  		/* Don't want to race with allocators so take the groups_sem */
1509  		down_write(&space_info->groups_sem);
1510  
1511  		/*
1512  		 * Async discard moves the final block group discard to be prior
1513  		 * to the unused_bgs code path.  Therefore, if it's not fully
1514  		 * trimmed, punt it back to the async discard lists.
1515  		 */
1516  		if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
1517  		    !btrfs_is_free_space_trimmed(block_group)) {
1518  			trace_btrfs_skip_unused_block_group(block_group);
1519  			up_write(&space_info->groups_sem);
1520  			/* Requeue if we failed because of async discard */
1521  			btrfs_discard_queue_work(&fs_info->discard_ctl,
1522  						 block_group);
1523  			goto next;
1524  		}
1525  
1526  		spin_lock(&space_info->lock);
1527  		spin_lock(&block_group->lock);
1528  		if (btrfs_is_block_group_used(block_group) || block_group->ro ||
1529  		    list_is_singular(&block_group->list)) {
1530  			/*
1531  			 * We want to bail if we made new allocations or have
1532  			 * outstanding allocations in this block group.  We do
1533  			 * the ro check in case balance is currently acting on
1534  			 * this block group.
1535  			 *
1536  			 * Also bail out if this is the only block group for its
1537  			 * type, because otherwise we would lose profile
1538  			 * information from fs_info->avail_*_alloc_bits and the
1539  			 * next block group of this type would be created with a
1540  			 * "single" profile (even if we're in a raid fs) because
1541  			 * fs_info->avail_*_alloc_bits would be 0.
1542  			 */
1543  			trace_btrfs_skip_unused_block_group(block_group);
1544  			spin_unlock(&block_group->lock);
1545  			spin_unlock(&space_info->lock);
1546  			up_write(&space_info->groups_sem);
1547  			goto next;
1548  		}
1549  
1550  		/*
1551  		 * The block group may be unused but there may be space reserved
1552  		 * accounting with the existence of that block group, that is,
1553  		 * space_info->bytes_may_use was incremented by a task but no
1554  		 * space was yet allocated from the block group by the task.
1555  		 * That space may or may not be allocated, as we are generally
1556  		 * pessimistic about space reservation for metadata as well as
1557  		 * for data when using compression (as we reserve space based on
1558  		 * the worst case, when data can't be compressed, and before
1559  		 * actually attempting compression, before starting writeback).
1560  		 *
1561  		 * So check if the total space of the space_info minus the size
1562  		 * of this block group is less than the used space of the
1563  		 * space_info - if that's the case, then it means we have tasks
1564  		 * that might be relying on the block group in order to allocate
1565  		 * extents, and add back the block group to the unused list when
1566  		 * we finish, so that we retry later in case no tasks ended up
1567  		 * needing to allocate extents from the block group.
1568  		 */
1569  		used = btrfs_space_info_used(space_info, true);
1570  		if (space_info->total_bytes - block_group->length < used &&
1571  		    block_group->zone_unusable < block_group->length) {
1572  			/*
1573  			 * Add a reference for the list, compensate for the ref
1574  			 * drop under the "next" label for the
1575  			 * fs_info->unused_bgs list.
1576  			 */
1577  			btrfs_get_block_group(block_group);
1578  			list_add_tail(&block_group->bg_list, &retry_list);
1579  
1580  			trace_btrfs_skip_unused_block_group(block_group);
1581  			spin_unlock(&block_group->lock);
1582  			spin_unlock(&space_info->lock);
1583  			up_write(&space_info->groups_sem);
1584  			goto next;
1585  		}
1586  
1587  		spin_unlock(&block_group->lock);
1588  		spin_unlock(&space_info->lock);
1589  
1590  		/* We don't want to force the issue, only flip if it's ok. */
1591  		ret = inc_block_group_ro(block_group, 0);
1592  		up_write(&space_info->groups_sem);
1593  		if (ret < 0) {
1594  			ret = 0;
1595  			goto next;
1596  		}
1597  
1598  		ret = btrfs_zone_finish(block_group);
1599  		if (ret < 0) {
1600  			btrfs_dec_block_group_ro(block_group);
1601  			if (ret == -EAGAIN)
1602  				ret = 0;
1603  			goto next;
1604  		}
1605  
1606  		/*
1607  		 * Want to do this before we do anything else so we can recover
1608  		 * properly if we fail to join the transaction.
1609  		 */
1610  		trans = btrfs_start_trans_remove_block_group(fs_info,
1611  						     block_group->start);
1612  		if (IS_ERR(trans)) {
1613  			btrfs_dec_block_group_ro(block_group);
1614  			ret = PTR_ERR(trans);
1615  			goto next;
1616  		}
1617  
1618  		/*
1619  		 * We could have pending pinned extents for this block group,
1620  		 * just delete them, we don't care about them anymore.
1621  		 */
1622  		if (!clean_pinned_extents(trans, block_group)) {
1623  			btrfs_dec_block_group_ro(block_group);
1624  			goto end_trans;
1625  		}
1626  
1627  		/*
1628  		 * At this point, the block_group is read only and should fail
1629  		 * new allocations.  However, btrfs_finish_extent_commit() can
1630  		 * cause this block_group to be placed back on the discard
1631  		 * lists because now the block_group isn't fully discarded.
1632  		 * Bail here and try again later after discarding everything.
1633  		 */
1634  		spin_lock(&fs_info->discard_ctl.lock);
1635  		if (!list_empty(&block_group->discard_list)) {
1636  			spin_unlock(&fs_info->discard_ctl.lock);
1637  			btrfs_dec_block_group_ro(block_group);
1638  			btrfs_discard_queue_work(&fs_info->discard_ctl,
1639  						 block_group);
1640  			goto end_trans;
1641  		}
1642  		spin_unlock(&fs_info->discard_ctl.lock);
1643  
1644  		/* Reset pinned so btrfs_put_block_group doesn't complain */
1645  		spin_lock(&space_info->lock);
1646  		spin_lock(&block_group->lock);
1647  
1648  		btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1649  						     -block_group->pinned);
1650  		space_info->bytes_readonly += block_group->pinned;
1651  		block_group->pinned = 0;
1652  
1653  		spin_unlock(&block_group->lock);
1654  		spin_unlock(&space_info->lock);
1655  
1656  		/*
1657  		 * The normal path here is an unused block group is passed here,
1658  		 * then trimming is handled in the transaction commit path.
1659  		 * Async discard interposes before this to do the trimming
1660  		 * before coming down the unused block group path as trimming
1661  		 * will no longer be done later in the transaction commit path.
1662  		 */
1663  		if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
1664  			goto flip_async;
1665  
1666  		/*
1667  		 * DISCARD can flip during remount. On zoned filesystems, we
1668  		 * need to reset sequential-required zones.
1669  		 */
1670  		trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) ||
1671  				btrfs_is_zoned(fs_info);
1672  
1673  		/* Implicit trim during transaction commit. */
1674  		if (trimming)
1675  			btrfs_freeze_block_group(block_group);
1676  
1677  		/*
1678  		 * Btrfs_remove_chunk will abort the transaction if things go
1679  		 * horribly wrong.
1680  		 */
1681  		ret = btrfs_remove_chunk(trans, block_group->start);
1682  
1683  		if (ret) {
1684  			if (trimming)
1685  				btrfs_unfreeze_block_group(block_group);
1686  			goto end_trans;
1687  		}
1688  
1689  		/*
1690  		 * If we're not mounted with -odiscard, we can just forget
1691  		 * about this block group. Otherwise we'll need to wait
1692  		 * until transaction commit to do the actual discard.
1693  		 */
1694  		if (trimming) {
1695  			spin_lock(&fs_info->unused_bgs_lock);
1696  			/*
1697  			 * A concurrent scrub might have added us to the list
1698  			 * fs_info->unused_bgs, so use a list_move operation
1699  			 * to add the block group to the deleted_bgs list.
1700  			 */
1701  			list_move(&block_group->bg_list,
1702  				  &trans->transaction->deleted_bgs);
1703  			spin_unlock(&fs_info->unused_bgs_lock);
1704  			btrfs_get_block_group(block_group);
1705  		}
1706  end_trans:
1707  		btrfs_end_transaction(trans);
1708  next:
1709  		btrfs_put_block_group(block_group);
1710  		spin_lock(&fs_info->unused_bgs_lock);
1711  	}
1712  	list_splice_tail(&retry_list, &fs_info->unused_bgs);
1713  	spin_unlock(&fs_info->unused_bgs_lock);
1714  	mutex_unlock(&fs_info->reclaim_bgs_lock);
1715  	return;
1716  
1717  flip_async:
1718  	btrfs_end_transaction(trans);
1719  	spin_lock(&fs_info->unused_bgs_lock);
1720  	list_splice_tail(&retry_list, &fs_info->unused_bgs);
1721  	spin_unlock(&fs_info->unused_bgs_lock);
1722  	mutex_unlock(&fs_info->reclaim_bgs_lock);
1723  	btrfs_put_block_group(block_group);
1724  	btrfs_discard_punt_unused_bgs_list(fs_info);
1725  }
1726  
btrfs_mark_bg_unused(struct btrfs_block_group * bg)1727  void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
1728  {
1729  	struct btrfs_fs_info *fs_info = bg->fs_info;
1730  
1731  	spin_lock(&fs_info->unused_bgs_lock);
1732  	if (list_empty(&bg->bg_list)) {
1733  		btrfs_get_block_group(bg);
1734  		trace_btrfs_add_unused_block_group(bg);
1735  		list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1736  	} else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
1737  		/* Pull out the block group from the reclaim_bgs list. */
1738  		trace_btrfs_add_unused_block_group(bg);
1739  		list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
1740  	}
1741  	spin_unlock(&fs_info->unused_bgs_lock);
1742  }
1743  
1744  /*
1745   * We want block groups with a low number of used bytes to be in the beginning
1746   * of the list, so they will get reclaimed first.
1747   */
reclaim_bgs_cmp(void * unused,const struct list_head * a,const struct list_head * b)1748  static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
1749  			   const struct list_head *b)
1750  {
1751  	const struct btrfs_block_group *bg1, *bg2;
1752  
1753  	bg1 = list_entry(a, struct btrfs_block_group, bg_list);
1754  	bg2 = list_entry(b, struct btrfs_block_group, bg_list);
1755  
1756  	return bg1->used > bg2->used;
1757  }
1758  
btrfs_should_reclaim(const struct btrfs_fs_info * fs_info)1759  static inline bool btrfs_should_reclaim(const struct btrfs_fs_info *fs_info)
1760  {
1761  	if (btrfs_is_zoned(fs_info))
1762  		return btrfs_zoned_should_reclaim(fs_info);
1763  	return true;
1764  }
1765  
should_reclaim_block_group(const struct btrfs_block_group * bg,u64 bytes_freed)1766  static bool should_reclaim_block_group(const struct btrfs_block_group *bg, u64 bytes_freed)
1767  {
1768  	const int thresh_pct = btrfs_calc_reclaim_threshold(bg->space_info);
1769  	u64 thresh_bytes = mult_perc(bg->length, thresh_pct);
1770  	const u64 new_val = bg->used;
1771  	const u64 old_val = new_val + bytes_freed;
1772  
1773  	if (thresh_bytes == 0)
1774  		return false;
1775  
1776  	/*
1777  	 * If we were below the threshold before don't reclaim, we are likely a
1778  	 * brand new block group and we don't want to relocate new block groups.
1779  	 */
1780  	if (old_val < thresh_bytes)
1781  		return false;
1782  	if (new_val >= thresh_bytes)
1783  		return false;
1784  	return true;
1785  }
1786  
btrfs_reclaim_bgs_work(struct work_struct * work)1787  void btrfs_reclaim_bgs_work(struct work_struct *work)
1788  {
1789  	struct btrfs_fs_info *fs_info =
1790  		container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
1791  	struct btrfs_block_group *bg;
1792  	struct btrfs_space_info *space_info;
1793  	LIST_HEAD(retry_list);
1794  
1795  	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1796  		return;
1797  
1798  	if (btrfs_fs_closing(fs_info))
1799  		return;
1800  
1801  	if (!btrfs_should_reclaim(fs_info))
1802  		return;
1803  
1804  	sb_start_write(fs_info->sb);
1805  
1806  	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
1807  		sb_end_write(fs_info->sb);
1808  		return;
1809  	}
1810  
1811  	/*
1812  	 * Long running balances can keep us blocked here for eternity, so
1813  	 * simply skip reclaim if we're unable to get the mutex.
1814  	 */
1815  	if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
1816  		btrfs_exclop_finish(fs_info);
1817  		sb_end_write(fs_info->sb);
1818  		return;
1819  	}
1820  
1821  	spin_lock(&fs_info->unused_bgs_lock);
1822  	/*
1823  	 * Sort happens under lock because we can't simply splice it and sort.
1824  	 * The block groups might still be in use and reachable via bg_list,
1825  	 * and their presence in the reclaim_bgs list must be preserved.
1826  	 */
1827  	list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
1828  	while (!list_empty(&fs_info->reclaim_bgs)) {
1829  		u64 zone_unusable;
1830  		u64 reclaimed;
1831  		int ret = 0;
1832  
1833  		bg = list_first_entry(&fs_info->reclaim_bgs,
1834  				      struct btrfs_block_group,
1835  				      bg_list);
1836  		list_del_init(&bg->bg_list);
1837  
1838  		space_info = bg->space_info;
1839  		spin_unlock(&fs_info->unused_bgs_lock);
1840  
1841  		/* Don't race with allocators so take the groups_sem */
1842  		down_write(&space_info->groups_sem);
1843  
1844  		spin_lock(&space_info->lock);
1845  		spin_lock(&bg->lock);
1846  		if (bg->reserved || bg->pinned || bg->ro) {
1847  			/*
1848  			 * We want to bail if we made new allocations or have
1849  			 * outstanding allocations in this block group.  We do
1850  			 * the ro check in case balance is currently acting on
1851  			 * this block group.
1852  			 */
1853  			spin_unlock(&bg->lock);
1854  			spin_unlock(&space_info->lock);
1855  			up_write(&space_info->groups_sem);
1856  			goto next;
1857  		}
1858  		if (bg->used == 0) {
1859  			/*
1860  			 * It is possible that we trigger relocation on a block
1861  			 * group as its extents are deleted and it first goes
1862  			 * below the threshold, then shortly after goes empty.
1863  			 *
1864  			 * In this case, relocating it does delete it, but has
1865  			 * some overhead in relocation specific metadata, looking
1866  			 * for the non-existent extents and running some extra
1867  			 * transactions, which we can avoid by using one of the
1868  			 * other mechanisms for dealing with empty block groups.
1869  			 */
1870  			if (!btrfs_test_opt(fs_info, DISCARD_ASYNC))
1871  				btrfs_mark_bg_unused(bg);
1872  			spin_unlock(&bg->lock);
1873  			spin_unlock(&space_info->lock);
1874  			up_write(&space_info->groups_sem);
1875  			goto next;
1876  
1877  		}
1878  		/*
1879  		 * The block group might no longer meet the reclaim condition by
1880  		 * the time we get around to reclaiming it, so to avoid
1881  		 * reclaiming overly full block_groups, skip reclaiming them.
1882  		 *
1883  		 * Since the decision making process also depends on the amount
1884  		 * being freed, pass in a fake giant value to skip that extra
1885  		 * check, which is more meaningful when adding to the list in
1886  		 * the first place.
1887  		 */
1888  		if (!should_reclaim_block_group(bg, bg->length)) {
1889  			spin_unlock(&bg->lock);
1890  			spin_unlock(&space_info->lock);
1891  			up_write(&space_info->groups_sem);
1892  			goto next;
1893  		}
1894  		spin_unlock(&bg->lock);
1895  		spin_unlock(&space_info->lock);
1896  
1897  		/*
1898  		 * Get out fast, in case we're read-only or unmounting the
1899  		 * filesystem. It is OK to drop block groups from the list even
1900  		 * for the read-only case. As we did sb_start_write(),
1901  		 * "mount -o remount,ro" won't happen and read-only filesystem
1902  		 * means it is forced read-only due to a fatal error. So, it
1903  		 * never gets back to read-write to let us reclaim again.
1904  		 */
1905  		if (btrfs_need_cleaner_sleep(fs_info)) {
1906  			up_write(&space_info->groups_sem);
1907  			goto next;
1908  		}
1909  
1910  		/*
1911  		 * Cache the zone_unusable value before turning the block group
1912  		 * to read only. As soon as the blog group is read only it's
1913  		 * zone_unusable value gets moved to the block group's read-only
1914  		 * bytes and isn't available for calculations anymore.
1915  		 */
1916  		zone_unusable = bg->zone_unusable;
1917  		ret = inc_block_group_ro(bg, 0);
1918  		up_write(&space_info->groups_sem);
1919  		if (ret < 0)
1920  			goto next;
1921  
1922  		btrfs_info(fs_info,
1923  			"reclaiming chunk %llu with %llu%% used %llu%% unusable",
1924  				bg->start,
1925  				div64_u64(bg->used * 100, bg->length),
1926  				div64_u64(zone_unusable * 100, bg->length));
1927  		trace_btrfs_reclaim_block_group(bg);
1928  		reclaimed = bg->used;
1929  		ret = btrfs_relocate_chunk(fs_info, bg->start);
1930  		if (ret) {
1931  			btrfs_dec_block_group_ro(bg);
1932  			btrfs_err(fs_info, "error relocating chunk %llu",
1933  				  bg->start);
1934  			reclaimed = 0;
1935  			spin_lock(&space_info->lock);
1936  			space_info->reclaim_errors++;
1937  			if (READ_ONCE(space_info->periodic_reclaim))
1938  				space_info->periodic_reclaim_ready = false;
1939  			spin_unlock(&space_info->lock);
1940  		}
1941  		spin_lock(&space_info->lock);
1942  		space_info->reclaim_count++;
1943  		space_info->reclaim_bytes += reclaimed;
1944  		spin_unlock(&space_info->lock);
1945  
1946  next:
1947  		if (ret && !READ_ONCE(space_info->periodic_reclaim)) {
1948  			/* Refcount held by the reclaim_bgs list after splice. */
1949  			spin_lock(&fs_info->unused_bgs_lock);
1950  			/*
1951  			 * This block group might be added to the unused list
1952  			 * during the above process. Move it back to the
1953  			 * reclaim list otherwise.
1954  			 */
1955  			if (list_empty(&bg->bg_list)) {
1956  				btrfs_get_block_group(bg);
1957  				list_add_tail(&bg->bg_list, &retry_list);
1958  			}
1959  			spin_unlock(&fs_info->unused_bgs_lock);
1960  		}
1961  		btrfs_put_block_group(bg);
1962  
1963  		mutex_unlock(&fs_info->reclaim_bgs_lock);
1964  		/*
1965  		 * Reclaiming all the block groups in the list can take really
1966  		 * long.  Prioritize cleaning up unused block groups.
1967  		 */
1968  		btrfs_delete_unused_bgs(fs_info);
1969  		/*
1970  		 * If we are interrupted by a balance, we can just bail out. The
1971  		 * cleaner thread restart again if necessary.
1972  		 */
1973  		if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
1974  			goto end;
1975  		spin_lock(&fs_info->unused_bgs_lock);
1976  	}
1977  	spin_unlock(&fs_info->unused_bgs_lock);
1978  	mutex_unlock(&fs_info->reclaim_bgs_lock);
1979  end:
1980  	spin_lock(&fs_info->unused_bgs_lock);
1981  	list_splice_tail(&retry_list, &fs_info->reclaim_bgs);
1982  	spin_unlock(&fs_info->unused_bgs_lock);
1983  	btrfs_exclop_finish(fs_info);
1984  	sb_end_write(fs_info->sb);
1985  }
1986  
btrfs_reclaim_bgs(struct btrfs_fs_info * fs_info)1987  void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
1988  {
1989  	btrfs_reclaim_sweep(fs_info);
1990  	spin_lock(&fs_info->unused_bgs_lock);
1991  	if (!list_empty(&fs_info->reclaim_bgs))
1992  		queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
1993  	spin_unlock(&fs_info->unused_bgs_lock);
1994  }
1995  
btrfs_mark_bg_to_reclaim(struct btrfs_block_group * bg)1996  void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
1997  {
1998  	struct btrfs_fs_info *fs_info = bg->fs_info;
1999  
2000  	spin_lock(&fs_info->unused_bgs_lock);
2001  	if (list_empty(&bg->bg_list)) {
2002  		btrfs_get_block_group(bg);
2003  		trace_btrfs_add_reclaim_block_group(bg);
2004  		list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
2005  	}
2006  	spin_unlock(&fs_info->unused_bgs_lock);
2007  }
2008  
read_bg_from_eb(struct btrfs_fs_info * fs_info,const struct btrfs_key * key,const struct btrfs_path * path)2009  static int read_bg_from_eb(struct btrfs_fs_info *fs_info, const struct btrfs_key *key,
2010  			   const struct btrfs_path *path)
2011  {
2012  	struct btrfs_chunk_map *map;
2013  	struct btrfs_block_group_item bg;
2014  	struct extent_buffer *leaf;
2015  	int slot;
2016  	u64 flags;
2017  	int ret = 0;
2018  
2019  	slot = path->slots[0];
2020  	leaf = path->nodes[0];
2021  
2022  	map = btrfs_find_chunk_map(fs_info, key->objectid, key->offset);
2023  	if (!map) {
2024  		btrfs_err(fs_info,
2025  			  "logical %llu len %llu found bg but no related chunk",
2026  			  key->objectid, key->offset);
2027  		return -ENOENT;
2028  	}
2029  
2030  	if (map->start != key->objectid || map->chunk_len != key->offset) {
2031  		btrfs_err(fs_info,
2032  			"block group %llu len %llu mismatch with chunk %llu len %llu",
2033  			  key->objectid, key->offset, map->start, map->chunk_len);
2034  		ret = -EUCLEAN;
2035  		goto out_free_map;
2036  	}
2037  
2038  	read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
2039  			   sizeof(bg));
2040  	flags = btrfs_stack_block_group_flags(&bg) &
2041  		BTRFS_BLOCK_GROUP_TYPE_MASK;
2042  
2043  	if (flags != (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
2044  		btrfs_err(fs_info,
2045  "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
2046  			  key->objectid, key->offset, flags,
2047  			  (BTRFS_BLOCK_GROUP_TYPE_MASK & map->type));
2048  		ret = -EUCLEAN;
2049  	}
2050  
2051  out_free_map:
2052  	btrfs_free_chunk_map(map);
2053  	return ret;
2054  }
2055  
find_first_block_group(struct btrfs_fs_info * fs_info,struct btrfs_path * path,const struct btrfs_key * key)2056  static int find_first_block_group(struct btrfs_fs_info *fs_info,
2057  				  struct btrfs_path *path,
2058  				  const struct btrfs_key *key)
2059  {
2060  	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2061  	int ret;
2062  	struct btrfs_key found_key;
2063  
2064  	btrfs_for_each_slot(root, key, &found_key, path, ret) {
2065  		if (found_key.objectid >= key->objectid &&
2066  		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
2067  			return read_bg_from_eb(fs_info, &found_key, path);
2068  		}
2069  	}
2070  	return ret;
2071  }
2072  
set_avail_alloc_bits(struct btrfs_fs_info * fs_info,u64 flags)2073  static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2074  {
2075  	u64 extra_flags = chunk_to_extended(flags) &
2076  				BTRFS_EXTENDED_PROFILE_MASK;
2077  
2078  	write_seqlock(&fs_info->profiles_lock);
2079  	if (flags & BTRFS_BLOCK_GROUP_DATA)
2080  		fs_info->avail_data_alloc_bits |= extra_flags;
2081  	if (flags & BTRFS_BLOCK_GROUP_METADATA)
2082  		fs_info->avail_metadata_alloc_bits |= extra_flags;
2083  	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2084  		fs_info->avail_system_alloc_bits |= extra_flags;
2085  	write_sequnlock(&fs_info->profiles_lock);
2086  }
2087  
2088  /*
2089   * Map a physical disk address to a list of logical addresses.
2090   *
2091   * @fs_info:       the filesystem
2092   * @chunk_start:   logical address of block group
2093   * @physical:	   physical address to map to logical addresses
2094   * @logical:	   return array of logical addresses which map to @physical
2095   * @naddrs:	   length of @logical
2096   * @stripe_len:    size of IO stripe for the given block group
2097   *
2098   * Maps a particular @physical disk address to a list of @logical addresses.
2099   * Used primarily to exclude those portions of a block group that contain super
2100   * block copies.
2101   */
btrfs_rmap_block(struct btrfs_fs_info * fs_info,u64 chunk_start,u64 physical,u64 ** logical,int * naddrs,int * stripe_len)2102  int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
2103  		     u64 physical, u64 **logical, int *naddrs, int *stripe_len)
2104  {
2105  	struct btrfs_chunk_map *map;
2106  	u64 *buf;
2107  	u64 bytenr;
2108  	u64 data_stripe_length;
2109  	u64 io_stripe_size;
2110  	int i, nr = 0;
2111  	int ret = 0;
2112  
2113  	map = btrfs_get_chunk_map(fs_info, chunk_start, 1);
2114  	if (IS_ERR(map))
2115  		return -EIO;
2116  
2117  	data_stripe_length = map->stripe_size;
2118  	io_stripe_size = BTRFS_STRIPE_LEN;
2119  	chunk_start = map->start;
2120  
2121  	/* For RAID5/6 adjust to a full IO stripe length */
2122  	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
2123  		io_stripe_size = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2124  
2125  	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
2126  	if (!buf) {
2127  		ret = -ENOMEM;
2128  		goto out;
2129  	}
2130  
2131  	for (i = 0; i < map->num_stripes; i++) {
2132  		bool already_inserted = false;
2133  		u32 stripe_nr;
2134  		u32 offset;
2135  		int j;
2136  
2137  		if (!in_range(physical, map->stripes[i].physical,
2138  			      data_stripe_length))
2139  			continue;
2140  
2141  		stripe_nr = (physical - map->stripes[i].physical) >>
2142  			    BTRFS_STRIPE_LEN_SHIFT;
2143  		offset = (physical - map->stripes[i].physical) &
2144  			 BTRFS_STRIPE_LEN_MASK;
2145  
2146  		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2147  				 BTRFS_BLOCK_GROUP_RAID10))
2148  			stripe_nr = div_u64(stripe_nr * map->num_stripes + i,
2149  					    map->sub_stripes);
2150  		/*
2151  		 * The remaining case would be for RAID56, multiply by
2152  		 * nr_data_stripes().  Alternatively, just use rmap_len below
2153  		 * instead of map->stripe_len
2154  		 */
2155  		bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
2156  
2157  		/* Ensure we don't add duplicate addresses */
2158  		for (j = 0; j < nr; j++) {
2159  			if (buf[j] == bytenr) {
2160  				already_inserted = true;
2161  				break;
2162  			}
2163  		}
2164  
2165  		if (!already_inserted)
2166  			buf[nr++] = bytenr;
2167  	}
2168  
2169  	*logical = buf;
2170  	*naddrs = nr;
2171  	*stripe_len = io_stripe_size;
2172  out:
2173  	btrfs_free_chunk_map(map);
2174  	return ret;
2175  }
2176  
exclude_super_stripes(struct btrfs_block_group * cache)2177  static int exclude_super_stripes(struct btrfs_block_group *cache)
2178  {
2179  	struct btrfs_fs_info *fs_info = cache->fs_info;
2180  	const bool zoned = btrfs_is_zoned(fs_info);
2181  	u64 bytenr;
2182  	u64 *logical;
2183  	int stripe_len;
2184  	int i, nr, ret;
2185  
2186  	if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
2187  		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
2188  		cache->bytes_super += stripe_len;
2189  		ret = set_extent_bit(&fs_info->excluded_extents, cache->start,
2190  				     cache->start + stripe_len - 1,
2191  				     EXTENT_UPTODATE, NULL);
2192  		if (ret)
2193  			return ret;
2194  	}
2195  
2196  	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2197  		bytenr = btrfs_sb_offset(i);
2198  		ret = btrfs_rmap_block(fs_info, cache->start,
2199  				       bytenr, &logical, &nr, &stripe_len);
2200  		if (ret)
2201  			return ret;
2202  
2203  		/* Shouldn't have super stripes in sequential zones */
2204  		if (zoned && nr) {
2205  			kfree(logical);
2206  			btrfs_err(fs_info,
2207  			"zoned: block group %llu must not contain super block",
2208  				  cache->start);
2209  			return -EUCLEAN;
2210  		}
2211  
2212  		while (nr--) {
2213  			u64 len = min_t(u64, stripe_len,
2214  				cache->start + cache->length - logical[nr]);
2215  
2216  			cache->bytes_super += len;
2217  			ret = set_extent_bit(&fs_info->excluded_extents, logical[nr],
2218  					     logical[nr] + len - 1,
2219  					     EXTENT_UPTODATE, NULL);
2220  			if (ret) {
2221  				kfree(logical);
2222  				return ret;
2223  			}
2224  		}
2225  
2226  		kfree(logical);
2227  	}
2228  	return 0;
2229  }
2230  
btrfs_create_block_group_cache(struct btrfs_fs_info * fs_info,u64 start)2231  static struct btrfs_block_group *btrfs_create_block_group_cache(
2232  		struct btrfs_fs_info *fs_info, u64 start)
2233  {
2234  	struct btrfs_block_group *cache;
2235  
2236  	cache = kzalloc(sizeof(*cache), GFP_NOFS);
2237  	if (!cache)
2238  		return NULL;
2239  
2240  	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
2241  					GFP_NOFS);
2242  	if (!cache->free_space_ctl) {
2243  		kfree(cache);
2244  		return NULL;
2245  	}
2246  
2247  	cache->start = start;
2248  
2249  	cache->fs_info = fs_info;
2250  	cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
2251  
2252  	cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
2253  
2254  	refcount_set(&cache->refs, 1);
2255  	spin_lock_init(&cache->lock);
2256  	init_rwsem(&cache->data_rwsem);
2257  	INIT_LIST_HEAD(&cache->list);
2258  	INIT_LIST_HEAD(&cache->cluster_list);
2259  	INIT_LIST_HEAD(&cache->bg_list);
2260  	INIT_LIST_HEAD(&cache->ro_list);
2261  	INIT_LIST_HEAD(&cache->discard_list);
2262  	INIT_LIST_HEAD(&cache->dirty_list);
2263  	INIT_LIST_HEAD(&cache->io_list);
2264  	INIT_LIST_HEAD(&cache->active_bg_list);
2265  	btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
2266  	atomic_set(&cache->frozen, 0);
2267  	mutex_init(&cache->free_space_lock);
2268  
2269  	return cache;
2270  }
2271  
2272  /*
2273   * Iterate all chunks and verify that each of them has the corresponding block
2274   * group
2275   */
check_chunk_block_group_mappings(struct btrfs_fs_info * fs_info)2276  static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
2277  {
2278  	u64 start = 0;
2279  	int ret = 0;
2280  
2281  	while (1) {
2282  		struct btrfs_chunk_map *map;
2283  		struct btrfs_block_group *bg;
2284  
2285  		/*
2286  		 * btrfs_find_chunk_map() will return the first chunk map
2287  		 * intersecting the range, so setting @length to 1 is enough to
2288  		 * get the first chunk.
2289  		 */
2290  		map = btrfs_find_chunk_map(fs_info, start, 1);
2291  		if (!map)
2292  			break;
2293  
2294  		bg = btrfs_lookup_block_group(fs_info, map->start);
2295  		if (!bg) {
2296  			btrfs_err(fs_info,
2297  	"chunk start=%llu len=%llu doesn't have corresponding block group",
2298  				     map->start, map->chunk_len);
2299  			ret = -EUCLEAN;
2300  			btrfs_free_chunk_map(map);
2301  			break;
2302  		}
2303  		if (bg->start != map->start || bg->length != map->chunk_len ||
2304  		    (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
2305  		    (map->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
2306  			btrfs_err(fs_info,
2307  "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
2308  				map->start, map->chunk_len,
2309  				map->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
2310  				bg->start, bg->length,
2311  				bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
2312  			ret = -EUCLEAN;
2313  			btrfs_free_chunk_map(map);
2314  			btrfs_put_block_group(bg);
2315  			break;
2316  		}
2317  		start = map->start + map->chunk_len;
2318  		btrfs_free_chunk_map(map);
2319  		btrfs_put_block_group(bg);
2320  	}
2321  	return ret;
2322  }
2323  
read_one_block_group(struct btrfs_fs_info * info,struct btrfs_block_group_item * bgi,const struct btrfs_key * key,int need_clear)2324  static int read_one_block_group(struct btrfs_fs_info *info,
2325  				struct btrfs_block_group_item *bgi,
2326  				const struct btrfs_key *key,
2327  				int need_clear)
2328  {
2329  	struct btrfs_block_group *cache;
2330  	const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
2331  	int ret;
2332  
2333  	ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
2334  
2335  	cache = btrfs_create_block_group_cache(info, key->objectid);
2336  	if (!cache)
2337  		return -ENOMEM;
2338  
2339  	cache->length = key->offset;
2340  	cache->used = btrfs_stack_block_group_used(bgi);
2341  	cache->commit_used = cache->used;
2342  	cache->flags = btrfs_stack_block_group_flags(bgi);
2343  	cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
2344  
2345  	set_free_space_tree_thresholds(cache);
2346  
2347  	if (need_clear) {
2348  		/*
2349  		 * When we mount with old space cache, we need to
2350  		 * set BTRFS_DC_CLEAR and set dirty flag.
2351  		 *
2352  		 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
2353  		 *    truncate the old free space cache inode and
2354  		 *    setup a new one.
2355  		 * b) Setting 'dirty flag' makes sure that we flush
2356  		 *    the new space cache info onto disk.
2357  		 */
2358  		if (btrfs_test_opt(info, SPACE_CACHE))
2359  			cache->disk_cache_state = BTRFS_DC_CLEAR;
2360  	}
2361  	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
2362  	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
2363  			btrfs_err(info,
2364  "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
2365  				  cache->start);
2366  			ret = -EINVAL;
2367  			goto error;
2368  	}
2369  
2370  	ret = btrfs_load_block_group_zone_info(cache, false);
2371  	if (ret) {
2372  		btrfs_err(info, "zoned: failed to load zone info of bg %llu",
2373  			  cache->start);
2374  		goto error;
2375  	}
2376  
2377  	/*
2378  	 * We need to exclude the super stripes now so that the space info has
2379  	 * super bytes accounted for, otherwise we'll think we have more space
2380  	 * than we actually do.
2381  	 */
2382  	ret = exclude_super_stripes(cache);
2383  	if (ret) {
2384  		/* We may have excluded something, so call this just in case. */
2385  		btrfs_free_excluded_extents(cache);
2386  		goto error;
2387  	}
2388  
2389  	/*
2390  	 * For zoned filesystem, space after the allocation offset is the only
2391  	 * free space for a block group. So, we don't need any caching work.
2392  	 * btrfs_calc_zone_unusable() will set the amount of free space and
2393  	 * zone_unusable space.
2394  	 *
2395  	 * For regular filesystem, check for two cases, either we are full, and
2396  	 * therefore don't need to bother with the caching work since we won't
2397  	 * find any space, or we are empty, and we can just add all the space
2398  	 * in and be done with it.  This saves us _a_lot_ of time, particularly
2399  	 * in the full case.
2400  	 */
2401  	if (btrfs_is_zoned(info)) {
2402  		btrfs_calc_zone_unusable(cache);
2403  		/* Should not have any excluded extents. Just in case, though. */
2404  		btrfs_free_excluded_extents(cache);
2405  	} else if (cache->length == cache->used) {
2406  		cache->cached = BTRFS_CACHE_FINISHED;
2407  		btrfs_free_excluded_extents(cache);
2408  	} else if (cache->used == 0) {
2409  		cache->cached = BTRFS_CACHE_FINISHED;
2410  		ret = btrfs_add_new_free_space(cache, cache->start,
2411  					       cache->start + cache->length, NULL);
2412  		btrfs_free_excluded_extents(cache);
2413  		if (ret)
2414  			goto error;
2415  	}
2416  
2417  	ret = btrfs_add_block_group_cache(info, cache);
2418  	if (ret) {
2419  		btrfs_remove_free_space_cache(cache);
2420  		goto error;
2421  	}
2422  	trace_btrfs_add_block_group(info, cache, 0);
2423  	btrfs_add_bg_to_space_info(info, cache);
2424  
2425  	set_avail_alloc_bits(info, cache->flags);
2426  	if (btrfs_chunk_writeable(info, cache->start)) {
2427  		if (cache->used == 0) {
2428  			ASSERT(list_empty(&cache->bg_list));
2429  			if (btrfs_test_opt(info, DISCARD_ASYNC))
2430  				btrfs_discard_queue_work(&info->discard_ctl, cache);
2431  			else
2432  				btrfs_mark_bg_unused(cache);
2433  		}
2434  	} else {
2435  		inc_block_group_ro(cache, 1);
2436  	}
2437  
2438  	return 0;
2439  error:
2440  	btrfs_put_block_group(cache);
2441  	return ret;
2442  }
2443  
fill_dummy_bgs(struct btrfs_fs_info * fs_info)2444  static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
2445  {
2446  	struct rb_node *node;
2447  	int ret = 0;
2448  
2449  	for (node = rb_first_cached(&fs_info->mapping_tree); node; node = rb_next(node)) {
2450  		struct btrfs_chunk_map *map;
2451  		struct btrfs_block_group *bg;
2452  
2453  		map = rb_entry(node, struct btrfs_chunk_map, rb_node);
2454  		bg = btrfs_create_block_group_cache(fs_info, map->start);
2455  		if (!bg) {
2456  			ret = -ENOMEM;
2457  			break;
2458  		}
2459  
2460  		/* Fill dummy cache as FULL */
2461  		bg->length = map->chunk_len;
2462  		bg->flags = map->type;
2463  		bg->cached = BTRFS_CACHE_FINISHED;
2464  		bg->used = map->chunk_len;
2465  		bg->flags = map->type;
2466  		ret = btrfs_add_block_group_cache(fs_info, bg);
2467  		/*
2468  		 * We may have some valid block group cache added already, in
2469  		 * that case we skip to the next one.
2470  		 */
2471  		if (ret == -EEXIST) {
2472  			ret = 0;
2473  			btrfs_put_block_group(bg);
2474  			continue;
2475  		}
2476  
2477  		if (ret) {
2478  			btrfs_remove_free_space_cache(bg);
2479  			btrfs_put_block_group(bg);
2480  			break;
2481  		}
2482  
2483  		btrfs_add_bg_to_space_info(fs_info, bg);
2484  
2485  		set_avail_alloc_bits(fs_info, bg->flags);
2486  	}
2487  	if (!ret)
2488  		btrfs_init_global_block_rsv(fs_info);
2489  	return ret;
2490  }
2491  
btrfs_read_block_groups(struct btrfs_fs_info * info)2492  int btrfs_read_block_groups(struct btrfs_fs_info *info)
2493  {
2494  	struct btrfs_root *root = btrfs_block_group_root(info);
2495  	struct btrfs_path *path;
2496  	int ret;
2497  	struct btrfs_block_group *cache;
2498  	struct btrfs_space_info *space_info;
2499  	struct btrfs_key key;
2500  	int need_clear = 0;
2501  	u64 cache_gen;
2502  
2503  	/*
2504  	 * Either no extent root (with ibadroots rescue option) or we have
2505  	 * unsupported RO options. The fs can never be mounted read-write, so no
2506  	 * need to waste time searching block group items.
2507  	 *
2508  	 * This also allows new extent tree related changes to be RO compat,
2509  	 * no need for a full incompat flag.
2510  	 */
2511  	if (!root || (btrfs_super_compat_ro_flags(info->super_copy) &
2512  		      ~BTRFS_FEATURE_COMPAT_RO_SUPP))
2513  		return fill_dummy_bgs(info);
2514  
2515  	key.objectid = 0;
2516  	key.offset = 0;
2517  	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2518  	path = btrfs_alloc_path();
2519  	if (!path)
2520  		return -ENOMEM;
2521  
2522  	cache_gen = btrfs_super_cache_generation(info->super_copy);
2523  	if (btrfs_test_opt(info, SPACE_CACHE) &&
2524  	    btrfs_super_generation(info->super_copy) != cache_gen)
2525  		need_clear = 1;
2526  	if (btrfs_test_opt(info, CLEAR_CACHE))
2527  		need_clear = 1;
2528  
2529  	while (1) {
2530  		struct btrfs_block_group_item bgi;
2531  		struct extent_buffer *leaf;
2532  		int slot;
2533  
2534  		ret = find_first_block_group(info, path, &key);
2535  		if (ret > 0)
2536  			break;
2537  		if (ret != 0)
2538  			goto error;
2539  
2540  		leaf = path->nodes[0];
2541  		slot = path->slots[0];
2542  
2543  		read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
2544  				   sizeof(bgi));
2545  
2546  		btrfs_item_key_to_cpu(leaf, &key, slot);
2547  		btrfs_release_path(path);
2548  		ret = read_one_block_group(info, &bgi, &key, need_clear);
2549  		if (ret < 0)
2550  			goto error;
2551  		key.objectid += key.offset;
2552  		key.offset = 0;
2553  	}
2554  	btrfs_release_path(path);
2555  
2556  	list_for_each_entry(space_info, &info->space_info, list) {
2557  		int i;
2558  
2559  		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2560  			if (list_empty(&space_info->block_groups[i]))
2561  				continue;
2562  			cache = list_first_entry(&space_info->block_groups[i],
2563  						 struct btrfs_block_group,
2564  						 list);
2565  			btrfs_sysfs_add_block_group_type(cache);
2566  		}
2567  
2568  		if (!(btrfs_get_alloc_profile(info, space_info->flags) &
2569  		      (BTRFS_BLOCK_GROUP_RAID10 |
2570  		       BTRFS_BLOCK_GROUP_RAID1_MASK |
2571  		       BTRFS_BLOCK_GROUP_RAID56_MASK |
2572  		       BTRFS_BLOCK_GROUP_DUP)))
2573  			continue;
2574  		/*
2575  		 * Avoid allocating from un-mirrored block group if there are
2576  		 * mirrored block groups.
2577  		 */
2578  		list_for_each_entry(cache,
2579  				&space_info->block_groups[BTRFS_RAID_RAID0],
2580  				list)
2581  			inc_block_group_ro(cache, 1);
2582  		list_for_each_entry(cache,
2583  				&space_info->block_groups[BTRFS_RAID_SINGLE],
2584  				list)
2585  			inc_block_group_ro(cache, 1);
2586  	}
2587  
2588  	btrfs_init_global_block_rsv(info);
2589  	ret = check_chunk_block_group_mappings(info);
2590  error:
2591  	btrfs_free_path(path);
2592  	/*
2593  	 * We've hit some error while reading the extent tree, and have
2594  	 * rescue=ibadroots mount option.
2595  	 * Try to fill the tree using dummy block groups so that the user can
2596  	 * continue to mount and grab their data.
2597  	 */
2598  	if (ret && btrfs_test_opt(info, IGNOREBADROOTS))
2599  		ret = fill_dummy_bgs(info);
2600  	return ret;
2601  }
2602  
2603  /*
2604   * This function, insert_block_group_item(), belongs to the phase 2 of chunk
2605   * allocation.
2606   *
2607   * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2608   * phases.
2609   */
insert_block_group_item(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group)2610  static int insert_block_group_item(struct btrfs_trans_handle *trans,
2611  				   struct btrfs_block_group *block_group)
2612  {
2613  	struct btrfs_fs_info *fs_info = trans->fs_info;
2614  	struct btrfs_block_group_item bgi;
2615  	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2616  	struct btrfs_key key;
2617  	u64 old_commit_used;
2618  	int ret;
2619  
2620  	spin_lock(&block_group->lock);
2621  	btrfs_set_stack_block_group_used(&bgi, block_group->used);
2622  	btrfs_set_stack_block_group_chunk_objectid(&bgi,
2623  						   block_group->global_root_id);
2624  	btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
2625  	old_commit_used = block_group->commit_used;
2626  	block_group->commit_used = block_group->used;
2627  	key.objectid = block_group->start;
2628  	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2629  	key.offset = block_group->length;
2630  	spin_unlock(&block_group->lock);
2631  
2632  	ret = btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
2633  	if (ret < 0) {
2634  		spin_lock(&block_group->lock);
2635  		block_group->commit_used = old_commit_used;
2636  		spin_unlock(&block_group->lock);
2637  	}
2638  
2639  	return ret;
2640  }
2641  
insert_dev_extent(struct btrfs_trans_handle * trans,const struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)2642  static int insert_dev_extent(struct btrfs_trans_handle *trans,
2643  			     const struct btrfs_device *device, u64 chunk_offset,
2644  			     u64 start, u64 num_bytes)
2645  {
2646  	struct btrfs_fs_info *fs_info = device->fs_info;
2647  	struct btrfs_root *root = fs_info->dev_root;
2648  	struct btrfs_path *path;
2649  	struct btrfs_dev_extent *extent;
2650  	struct extent_buffer *leaf;
2651  	struct btrfs_key key;
2652  	int ret;
2653  
2654  	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
2655  	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
2656  	path = btrfs_alloc_path();
2657  	if (!path)
2658  		return -ENOMEM;
2659  
2660  	key.objectid = device->devid;
2661  	key.type = BTRFS_DEV_EXTENT_KEY;
2662  	key.offset = start;
2663  	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
2664  	if (ret)
2665  		goto out;
2666  
2667  	leaf = path->nodes[0];
2668  	extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
2669  	btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
2670  	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
2671  					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
2672  	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
2673  
2674  	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
2675  	btrfs_mark_buffer_dirty(trans, leaf);
2676  out:
2677  	btrfs_free_path(path);
2678  	return ret;
2679  }
2680  
2681  /*
2682   * This function belongs to phase 2.
2683   *
2684   * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2685   * phases.
2686   */
insert_dev_extents(struct btrfs_trans_handle * trans,u64 chunk_offset,u64 chunk_size)2687  static int insert_dev_extents(struct btrfs_trans_handle *trans,
2688  				   u64 chunk_offset, u64 chunk_size)
2689  {
2690  	struct btrfs_fs_info *fs_info = trans->fs_info;
2691  	struct btrfs_device *device;
2692  	struct btrfs_chunk_map *map;
2693  	u64 dev_offset;
2694  	int i;
2695  	int ret = 0;
2696  
2697  	map = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
2698  	if (IS_ERR(map))
2699  		return PTR_ERR(map);
2700  
2701  	/*
2702  	 * Take the device list mutex to prevent races with the final phase of
2703  	 * a device replace operation that replaces the device object associated
2704  	 * with the map's stripes, because the device object's id can change
2705  	 * at any time during that final phase of the device replace operation
2706  	 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
2707  	 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
2708  	 * resulting in persisting a device extent item with such ID.
2709  	 */
2710  	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2711  	for (i = 0; i < map->num_stripes; i++) {
2712  		device = map->stripes[i].dev;
2713  		dev_offset = map->stripes[i].physical;
2714  
2715  		ret = insert_dev_extent(trans, device, chunk_offset, dev_offset,
2716  					map->stripe_size);
2717  		if (ret)
2718  			break;
2719  	}
2720  	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2721  
2722  	btrfs_free_chunk_map(map);
2723  	return ret;
2724  }
2725  
2726  /*
2727   * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
2728   * chunk allocation.
2729   *
2730   * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2731   * phases.
2732   */
btrfs_create_pending_block_groups(struct btrfs_trans_handle * trans)2733  void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
2734  {
2735  	struct btrfs_fs_info *fs_info = trans->fs_info;
2736  	struct btrfs_block_group *block_group;
2737  	int ret = 0;
2738  
2739  	while (!list_empty(&trans->new_bgs)) {
2740  		int index;
2741  
2742  		block_group = list_first_entry(&trans->new_bgs,
2743  					       struct btrfs_block_group,
2744  					       bg_list);
2745  		if (ret)
2746  			goto next;
2747  
2748  		index = btrfs_bg_flags_to_raid_index(block_group->flags);
2749  
2750  		ret = insert_block_group_item(trans, block_group);
2751  		if (ret)
2752  			btrfs_abort_transaction(trans, ret);
2753  		if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
2754  			      &block_group->runtime_flags)) {
2755  			mutex_lock(&fs_info->chunk_mutex);
2756  			ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
2757  			mutex_unlock(&fs_info->chunk_mutex);
2758  			if (ret)
2759  				btrfs_abort_transaction(trans, ret);
2760  		}
2761  		ret = insert_dev_extents(trans, block_group->start,
2762  					 block_group->length);
2763  		if (ret)
2764  			btrfs_abort_transaction(trans, ret);
2765  		add_block_group_free_space(trans, block_group);
2766  
2767  		/*
2768  		 * If we restriped during balance, we may have added a new raid
2769  		 * type, so now add the sysfs entries when it is safe to do so.
2770  		 * We don't have to worry about locking here as it's handled in
2771  		 * btrfs_sysfs_add_block_group_type.
2772  		 */
2773  		if (block_group->space_info->block_group_kobjs[index] == NULL)
2774  			btrfs_sysfs_add_block_group_type(block_group);
2775  
2776  		/* Already aborted the transaction if it failed. */
2777  next:
2778  		btrfs_dec_delayed_refs_rsv_bg_inserts(fs_info);
2779  		list_del_init(&block_group->bg_list);
2780  		clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
2781  
2782  		/*
2783  		 * If the block group is still unused, add it to the list of
2784  		 * unused block groups. The block group may have been created in
2785  		 * order to satisfy a space reservation, in which case the
2786  		 * extent allocation only happens later. But often we don't
2787  		 * actually need to allocate space that we previously reserved,
2788  		 * so the block group may become unused for a long time. For
2789  		 * example for metadata we generally reserve space for a worst
2790  		 * possible scenario, but then don't end up allocating all that
2791  		 * space or none at all (due to no need to COW, extent buffers
2792  		 * were already COWed in the current transaction and still
2793  		 * unwritten, tree heights lower than the maximum possible
2794  		 * height, etc). For data we generally reserve the axact amount
2795  		 * of space we are going to allocate later, the exception is
2796  		 * when using compression, as we must reserve space based on the
2797  		 * uncompressed data size, because the compression is only done
2798  		 * when writeback triggered and we don't know how much space we
2799  		 * are actually going to need, so we reserve the uncompressed
2800  		 * size because the data may be uncompressible in the worst case.
2801  		 */
2802  		if (ret == 0) {
2803  			bool used;
2804  
2805  			spin_lock(&block_group->lock);
2806  			used = btrfs_is_block_group_used(block_group);
2807  			spin_unlock(&block_group->lock);
2808  
2809  			if (!used)
2810  				btrfs_mark_bg_unused(block_group);
2811  		}
2812  	}
2813  	btrfs_trans_release_chunk_metadata(trans);
2814  }
2815  
2816  /*
2817   * For extent tree v2 we use the block_group_item->chunk_offset to point at our
2818   * global root id.  For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
2819   */
calculate_global_root_id(const struct btrfs_fs_info * fs_info,u64 offset)2820  static u64 calculate_global_root_id(const struct btrfs_fs_info *fs_info, u64 offset)
2821  {
2822  	u64 div = SZ_1G;
2823  	u64 index;
2824  
2825  	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2826  		return BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2827  
2828  	/* If we have a smaller fs index based on 128MiB. */
2829  	if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL))
2830  		div = SZ_128M;
2831  
2832  	offset = div64_u64(offset, div);
2833  	div64_u64_rem(offset, fs_info->nr_global_roots, &index);
2834  	return index;
2835  }
2836  
btrfs_make_block_group(struct btrfs_trans_handle * trans,u64 type,u64 chunk_offset,u64 size)2837  struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
2838  						 u64 type,
2839  						 u64 chunk_offset, u64 size)
2840  {
2841  	struct btrfs_fs_info *fs_info = trans->fs_info;
2842  	struct btrfs_block_group *cache;
2843  	int ret;
2844  
2845  	btrfs_set_log_full_commit(trans);
2846  
2847  	cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
2848  	if (!cache)
2849  		return ERR_PTR(-ENOMEM);
2850  
2851  	/*
2852  	 * Mark it as new before adding it to the rbtree of block groups or any
2853  	 * list, so that no other task finds it and calls btrfs_mark_bg_unused()
2854  	 * before the new flag is set.
2855  	 */
2856  	set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
2857  
2858  	cache->length = size;
2859  	set_free_space_tree_thresholds(cache);
2860  	cache->flags = type;
2861  	cache->cached = BTRFS_CACHE_FINISHED;
2862  	cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
2863  
2864  	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
2865  		set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
2866  
2867  	ret = btrfs_load_block_group_zone_info(cache, true);
2868  	if (ret) {
2869  		btrfs_put_block_group(cache);
2870  		return ERR_PTR(ret);
2871  	}
2872  
2873  	ret = exclude_super_stripes(cache);
2874  	if (ret) {
2875  		/* We may have excluded something, so call this just in case */
2876  		btrfs_free_excluded_extents(cache);
2877  		btrfs_put_block_group(cache);
2878  		return ERR_PTR(ret);
2879  	}
2880  
2881  	ret = btrfs_add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
2882  	btrfs_free_excluded_extents(cache);
2883  	if (ret) {
2884  		btrfs_put_block_group(cache);
2885  		return ERR_PTR(ret);
2886  	}
2887  
2888  	/*
2889  	 * Ensure the corresponding space_info object is created and
2890  	 * assigned to our block group. We want our bg to be added to the rbtree
2891  	 * with its ->space_info set.
2892  	 */
2893  	cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2894  	ASSERT(cache->space_info);
2895  
2896  	ret = btrfs_add_block_group_cache(fs_info, cache);
2897  	if (ret) {
2898  		btrfs_remove_free_space_cache(cache);
2899  		btrfs_put_block_group(cache);
2900  		return ERR_PTR(ret);
2901  	}
2902  
2903  	/*
2904  	 * Now that our block group has its ->space_info set and is inserted in
2905  	 * the rbtree, update the space info's counters.
2906  	 */
2907  	trace_btrfs_add_block_group(fs_info, cache, 1);
2908  	btrfs_add_bg_to_space_info(fs_info, cache);
2909  	btrfs_update_global_block_rsv(fs_info);
2910  
2911  #ifdef CONFIG_BTRFS_DEBUG
2912  	if (btrfs_should_fragment_free_space(cache)) {
2913  		cache->space_info->bytes_used += size >> 1;
2914  		fragment_free_space(cache);
2915  	}
2916  #endif
2917  
2918  	list_add_tail(&cache->bg_list, &trans->new_bgs);
2919  	btrfs_inc_delayed_refs_rsv_bg_inserts(fs_info);
2920  
2921  	set_avail_alloc_bits(fs_info, type);
2922  	return cache;
2923  }
2924  
2925  /*
2926   * Mark one block group RO, can be called several times for the same block
2927   * group.
2928   *
2929   * @cache:		the destination block group
2930   * @do_chunk_alloc:	whether need to do chunk pre-allocation, this is to
2931   * 			ensure we still have some free space after marking this
2932   * 			block group RO.
2933   */
btrfs_inc_block_group_ro(struct btrfs_block_group * cache,bool do_chunk_alloc)2934  int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2935  			     bool do_chunk_alloc)
2936  {
2937  	struct btrfs_fs_info *fs_info = cache->fs_info;
2938  	struct btrfs_trans_handle *trans;
2939  	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2940  	u64 alloc_flags;
2941  	int ret;
2942  	bool dirty_bg_running;
2943  
2944  	/*
2945  	 * This can only happen when we are doing read-only scrub on read-only
2946  	 * mount.
2947  	 * In that case we should not start a new transaction on read-only fs.
2948  	 * Thus here we skip all chunk allocations.
2949  	 */
2950  	if (sb_rdonly(fs_info->sb)) {
2951  		mutex_lock(&fs_info->ro_block_group_mutex);
2952  		ret = inc_block_group_ro(cache, 0);
2953  		mutex_unlock(&fs_info->ro_block_group_mutex);
2954  		return ret;
2955  	}
2956  
2957  	do {
2958  		trans = btrfs_join_transaction(root);
2959  		if (IS_ERR(trans))
2960  			return PTR_ERR(trans);
2961  
2962  		dirty_bg_running = false;
2963  
2964  		/*
2965  		 * We're not allowed to set block groups readonly after the dirty
2966  		 * block group cache has started writing.  If it already started,
2967  		 * back off and let this transaction commit.
2968  		 */
2969  		mutex_lock(&fs_info->ro_block_group_mutex);
2970  		if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2971  			u64 transid = trans->transid;
2972  
2973  			mutex_unlock(&fs_info->ro_block_group_mutex);
2974  			btrfs_end_transaction(trans);
2975  
2976  			ret = btrfs_wait_for_commit(fs_info, transid);
2977  			if (ret)
2978  				return ret;
2979  			dirty_bg_running = true;
2980  		}
2981  	} while (dirty_bg_running);
2982  
2983  	if (do_chunk_alloc) {
2984  		/*
2985  		 * If we are changing raid levels, try to allocate a
2986  		 * corresponding block group with the new raid level.
2987  		 */
2988  		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2989  		if (alloc_flags != cache->flags) {
2990  			ret = btrfs_chunk_alloc(trans, alloc_flags,
2991  						CHUNK_ALLOC_FORCE);
2992  			/*
2993  			 * ENOSPC is allowed here, we may have enough space
2994  			 * already allocated at the new raid level to carry on
2995  			 */
2996  			if (ret == -ENOSPC)
2997  				ret = 0;
2998  			if (ret < 0)
2999  				goto out;
3000  		}
3001  	}
3002  
3003  	ret = inc_block_group_ro(cache, 0);
3004  	if (!ret)
3005  		goto out;
3006  	if (ret == -ETXTBSY)
3007  		goto unlock_out;
3008  
3009  	/*
3010  	 * Skip chunk allocation if the bg is SYSTEM, this is to avoid system
3011  	 * chunk allocation storm to exhaust the system chunk array.  Otherwise
3012  	 * we still want to try our best to mark the block group read-only.
3013  	 */
3014  	if (!do_chunk_alloc && ret == -ENOSPC &&
3015  	    (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
3016  		goto unlock_out;
3017  
3018  	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
3019  	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
3020  	if (ret < 0)
3021  		goto out;
3022  	/*
3023  	 * We have allocated a new chunk. We also need to activate that chunk to
3024  	 * grant metadata tickets for zoned filesystem.
3025  	 */
3026  	ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
3027  	if (ret < 0)
3028  		goto out;
3029  
3030  	ret = inc_block_group_ro(cache, 0);
3031  	if (ret == -ETXTBSY)
3032  		goto unlock_out;
3033  out:
3034  	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3035  		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
3036  		mutex_lock(&fs_info->chunk_mutex);
3037  		check_system_chunk(trans, alloc_flags);
3038  		mutex_unlock(&fs_info->chunk_mutex);
3039  	}
3040  unlock_out:
3041  	mutex_unlock(&fs_info->ro_block_group_mutex);
3042  
3043  	btrfs_end_transaction(trans);
3044  	return ret;
3045  }
3046  
btrfs_dec_block_group_ro(struct btrfs_block_group * cache)3047  void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
3048  {
3049  	struct btrfs_space_info *sinfo = cache->space_info;
3050  	u64 num_bytes;
3051  
3052  	BUG_ON(!cache->ro);
3053  
3054  	spin_lock(&sinfo->lock);
3055  	spin_lock(&cache->lock);
3056  	if (!--cache->ro) {
3057  		if (btrfs_is_zoned(cache->fs_info)) {
3058  			/* Migrate zone_unusable bytes back */
3059  			cache->zone_unusable =
3060  				(cache->alloc_offset - cache->used - cache->pinned -
3061  				 cache->reserved) +
3062  				(cache->length - cache->zone_capacity);
3063  			btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
3064  								    cache->zone_unusable);
3065  			sinfo->bytes_readonly -= cache->zone_unusable;
3066  		}
3067  		num_bytes = cache->length - cache->reserved -
3068  			    cache->pinned - cache->bytes_super -
3069  			    cache->zone_unusable - cache->used;
3070  		sinfo->bytes_readonly -= num_bytes;
3071  		list_del_init(&cache->ro_list);
3072  	}
3073  	spin_unlock(&cache->lock);
3074  	spin_unlock(&sinfo->lock);
3075  }
3076  
update_block_group_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_block_group * cache)3077  static int update_block_group_item(struct btrfs_trans_handle *trans,
3078  				   struct btrfs_path *path,
3079  				   struct btrfs_block_group *cache)
3080  {
3081  	struct btrfs_fs_info *fs_info = trans->fs_info;
3082  	int ret;
3083  	struct btrfs_root *root = btrfs_block_group_root(fs_info);
3084  	unsigned long bi;
3085  	struct extent_buffer *leaf;
3086  	struct btrfs_block_group_item bgi;
3087  	struct btrfs_key key;
3088  	u64 old_commit_used;
3089  	u64 used;
3090  
3091  	/*
3092  	 * Block group items update can be triggered out of commit transaction
3093  	 * critical section, thus we need a consistent view of used bytes.
3094  	 * We cannot use cache->used directly outside of the spin lock, as it
3095  	 * may be changed.
3096  	 */
3097  	spin_lock(&cache->lock);
3098  	old_commit_used = cache->commit_used;
3099  	used = cache->used;
3100  	/* No change in used bytes, can safely skip it. */
3101  	if (cache->commit_used == used) {
3102  		spin_unlock(&cache->lock);
3103  		return 0;
3104  	}
3105  	cache->commit_used = used;
3106  	spin_unlock(&cache->lock);
3107  
3108  	key.objectid = cache->start;
3109  	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
3110  	key.offset = cache->length;
3111  
3112  	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3113  	if (ret) {
3114  		if (ret > 0)
3115  			ret = -ENOENT;
3116  		goto fail;
3117  	}
3118  
3119  	leaf = path->nodes[0];
3120  	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3121  	btrfs_set_stack_block_group_used(&bgi, used);
3122  	btrfs_set_stack_block_group_chunk_objectid(&bgi,
3123  						   cache->global_root_id);
3124  	btrfs_set_stack_block_group_flags(&bgi, cache->flags);
3125  	write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
3126  	btrfs_mark_buffer_dirty(trans, leaf);
3127  fail:
3128  	btrfs_release_path(path);
3129  	/*
3130  	 * We didn't update the block group item, need to revert commit_used
3131  	 * unless the block group item didn't exist yet - this is to prevent a
3132  	 * race with a concurrent insertion of the block group item, with
3133  	 * insert_block_group_item(), that happened just after we attempted to
3134  	 * update. In that case we would reset commit_used to 0 just after the
3135  	 * insertion set it to a value greater than 0 - if the block group later
3136  	 * becomes with 0 used bytes, we would incorrectly skip its update.
3137  	 */
3138  	if (ret < 0 && ret != -ENOENT) {
3139  		spin_lock(&cache->lock);
3140  		cache->commit_used = old_commit_used;
3141  		spin_unlock(&cache->lock);
3142  	}
3143  	return ret;
3144  
3145  }
3146  
cache_save_setup(struct btrfs_block_group * block_group,struct btrfs_trans_handle * trans,struct btrfs_path * path)3147  static int cache_save_setup(struct btrfs_block_group *block_group,
3148  			    struct btrfs_trans_handle *trans,
3149  			    struct btrfs_path *path)
3150  {
3151  	struct btrfs_fs_info *fs_info = block_group->fs_info;
3152  	struct inode *inode = NULL;
3153  	struct extent_changeset *data_reserved = NULL;
3154  	u64 alloc_hint = 0;
3155  	int dcs = BTRFS_DC_ERROR;
3156  	u64 cache_size = 0;
3157  	int retries = 0;
3158  	int ret = 0;
3159  
3160  	if (!btrfs_test_opt(fs_info, SPACE_CACHE))
3161  		return 0;
3162  
3163  	/*
3164  	 * If this block group is smaller than 100 megs don't bother caching the
3165  	 * block group.
3166  	 */
3167  	if (block_group->length < (100 * SZ_1M)) {
3168  		spin_lock(&block_group->lock);
3169  		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3170  		spin_unlock(&block_group->lock);
3171  		return 0;
3172  	}
3173  
3174  	if (TRANS_ABORTED(trans))
3175  		return 0;
3176  again:
3177  	inode = lookup_free_space_inode(block_group, path);
3178  	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3179  		ret = PTR_ERR(inode);
3180  		btrfs_release_path(path);
3181  		goto out;
3182  	}
3183  
3184  	if (IS_ERR(inode)) {
3185  		BUG_ON(retries);
3186  		retries++;
3187  
3188  		if (block_group->ro)
3189  			goto out_free;
3190  
3191  		ret = create_free_space_inode(trans, block_group, path);
3192  		if (ret)
3193  			goto out_free;
3194  		goto again;
3195  	}
3196  
3197  	/*
3198  	 * We want to set the generation to 0, that way if anything goes wrong
3199  	 * from here on out we know not to trust this cache when we load up next
3200  	 * time.
3201  	 */
3202  	BTRFS_I(inode)->generation = 0;
3203  	ret = btrfs_update_inode(trans, BTRFS_I(inode));
3204  	if (ret) {
3205  		/*
3206  		 * So theoretically we could recover from this, simply set the
3207  		 * super cache generation to 0 so we know to invalidate the
3208  		 * cache, but then we'd have to keep track of the block groups
3209  		 * that fail this way so we know we _have_ to reset this cache
3210  		 * before the next commit or risk reading stale cache.  So to
3211  		 * limit our exposure to horrible edge cases lets just abort the
3212  		 * transaction, this only happens in really bad situations
3213  		 * anyway.
3214  		 */
3215  		btrfs_abort_transaction(trans, ret);
3216  		goto out_put;
3217  	}
3218  	WARN_ON(ret);
3219  
3220  	/* We've already setup this transaction, go ahead and exit */
3221  	if (block_group->cache_generation == trans->transid &&
3222  	    i_size_read(inode)) {
3223  		dcs = BTRFS_DC_SETUP;
3224  		goto out_put;
3225  	}
3226  
3227  	if (i_size_read(inode) > 0) {
3228  		ret = btrfs_check_trunc_cache_free_space(fs_info,
3229  					&fs_info->global_block_rsv);
3230  		if (ret)
3231  			goto out_put;
3232  
3233  		ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
3234  		if (ret)
3235  			goto out_put;
3236  	}
3237  
3238  	spin_lock(&block_group->lock);
3239  	if (block_group->cached != BTRFS_CACHE_FINISHED ||
3240  	    !btrfs_test_opt(fs_info, SPACE_CACHE)) {
3241  		/*
3242  		 * don't bother trying to write stuff out _if_
3243  		 * a) we're not cached,
3244  		 * b) we're with nospace_cache mount option,
3245  		 * c) we're with v2 space_cache (FREE_SPACE_TREE).
3246  		 */
3247  		dcs = BTRFS_DC_WRITTEN;
3248  		spin_unlock(&block_group->lock);
3249  		goto out_put;
3250  	}
3251  	spin_unlock(&block_group->lock);
3252  
3253  	/*
3254  	 * We hit an ENOSPC when setting up the cache in this transaction, just
3255  	 * skip doing the setup, we've already cleared the cache so we're safe.
3256  	 */
3257  	if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3258  		ret = -ENOSPC;
3259  		goto out_put;
3260  	}
3261  
3262  	/*
3263  	 * Try to preallocate enough space based on how big the block group is.
3264  	 * Keep in mind this has to include any pinned space which could end up
3265  	 * taking up quite a bit since it's not folded into the other space
3266  	 * cache.
3267  	 */
3268  	cache_size = div_u64(block_group->length, SZ_256M);
3269  	if (!cache_size)
3270  		cache_size = 1;
3271  
3272  	cache_size *= 16;
3273  	cache_size *= fs_info->sectorsize;
3274  
3275  	ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
3276  					  cache_size, false);
3277  	if (ret)
3278  		goto out_put;
3279  
3280  	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size,
3281  					      cache_size, cache_size,
3282  					      &alloc_hint);
3283  	/*
3284  	 * Our cache requires contiguous chunks so that we don't modify a bunch
3285  	 * of metadata or split extents when writing the cache out, which means
3286  	 * we can enospc if we are heavily fragmented in addition to just normal
3287  	 * out of space conditions.  So if we hit this just skip setting up any
3288  	 * other block groups for this transaction, maybe we'll unpin enough
3289  	 * space the next time around.
3290  	 */
3291  	if (!ret)
3292  		dcs = BTRFS_DC_SETUP;
3293  	else if (ret == -ENOSPC)
3294  		set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3295  
3296  out_put:
3297  	iput(inode);
3298  out_free:
3299  	btrfs_release_path(path);
3300  out:
3301  	spin_lock(&block_group->lock);
3302  	if (!ret && dcs == BTRFS_DC_SETUP)
3303  		block_group->cache_generation = trans->transid;
3304  	block_group->disk_cache_state = dcs;
3305  	spin_unlock(&block_group->lock);
3306  
3307  	extent_changeset_free(data_reserved);
3308  	return ret;
3309  }
3310  
btrfs_setup_space_cache(struct btrfs_trans_handle * trans)3311  int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
3312  {
3313  	struct btrfs_fs_info *fs_info = trans->fs_info;
3314  	struct btrfs_block_group *cache, *tmp;
3315  	struct btrfs_transaction *cur_trans = trans->transaction;
3316  	struct btrfs_path *path;
3317  
3318  	if (list_empty(&cur_trans->dirty_bgs) ||
3319  	    !btrfs_test_opt(fs_info, SPACE_CACHE))
3320  		return 0;
3321  
3322  	path = btrfs_alloc_path();
3323  	if (!path)
3324  		return -ENOMEM;
3325  
3326  	/* Could add new block groups, use _safe just in case */
3327  	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3328  				 dirty_list) {
3329  		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3330  			cache_save_setup(cache, trans, path);
3331  	}
3332  
3333  	btrfs_free_path(path);
3334  	return 0;
3335  }
3336  
3337  /*
3338   * Transaction commit does final block group cache writeback during a critical
3339   * section where nothing is allowed to change the FS.  This is required in
3340   * order for the cache to actually match the block group, but can introduce a
3341   * lot of latency into the commit.
3342   *
3343   * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3344   * There's a chance we'll have to redo some of it if the block group changes
3345   * again during the commit, but it greatly reduces the commit latency by
3346   * getting rid of the easy block groups while we're still allowing others to
3347   * join the commit.
3348   */
btrfs_start_dirty_block_groups(struct btrfs_trans_handle * trans)3349  int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
3350  {
3351  	struct btrfs_fs_info *fs_info = trans->fs_info;
3352  	struct btrfs_block_group *cache;
3353  	struct btrfs_transaction *cur_trans = trans->transaction;
3354  	int ret = 0;
3355  	int should_put;
3356  	struct btrfs_path *path = NULL;
3357  	LIST_HEAD(dirty);
3358  	struct list_head *io = &cur_trans->io_bgs;
3359  	int loops = 0;
3360  
3361  	spin_lock(&cur_trans->dirty_bgs_lock);
3362  	if (list_empty(&cur_trans->dirty_bgs)) {
3363  		spin_unlock(&cur_trans->dirty_bgs_lock);
3364  		return 0;
3365  	}
3366  	list_splice_init(&cur_trans->dirty_bgs, &dirty);
3367  	spin_unlock(&cur_trans->dirty_bgs_lock);
3368  
3369  again:
3370  	/* Make sure all the block groups on our dirty list actually exist */
3371  	btrfs_create_pending_block_groups(trans);
3372  
3373  	if (!path) {
3374  		path = btrfs_alloc_path();
3375  		if (!path) {
3376  			ret = -ENOMEM;
3377  			goto out;
3378  		}
3379  	}
3380  
3381  	/*
3382  	 * cache_write_mutex is here only to save us from balance or automatic
3383  	 * removal of empty block groups deleting this block group while we are
3384  	 * writing out the cache
3385  	 */
3386  	mutex_lock(&trans->transaction->cache_write_mutex);
3387  	while (!list_empty(&dirty)) {
3388  		bool drop_reserve = true;
3389  
3390  		cache = list_first_entry(&dirty, struct btrfs_block_group,
3391  					 dirty_list);
3392  		/*
3393  		 * This can happen if something re-dirties a block group that
3394  		 * is already under IO.  Just wait for it to finish and then do
3395  		 * it all again
3396  		 */
3397  		if (!list_empty(&cache->io_list)) {
3398  			list_del_init(&cache->io_list);
3399  			btrfs_wait_cache_io(trans, cache, path);
3400  			btrfs_put_block_group(cache);
3401  		}
3402  
3403  
3404  		/*
3405  		 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
3406  		 * it should update the cache_state.  Don't delete until after
3407  		 * we wait.
3408  		 *
3409  		 * Since we're not running in the commit critical section
3410  		 * we need the dirty_bgs_lock to protect from update_block_group
3411  		 */
3412  		spin_lock(&cur_trans->dirty_bgs_lock);
3413  		list_del_init(&cache->dirty_list);
3414  		spin_unlock(&cur_trans->dirty_bgs_lock);
3415  
3416  		should_put = 1;
3417  
3418  		cache_save_setup(cache, trans, path);
3419  
3420  		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3421  			cache->io_ctl.inode = NULL;
3422  			ret = btrfs_write_out_cache(trans, cache, path);
3423  			if (ret == 0 && cache->io_ctl.inode) {
3424  				should_put = 0;
3425  
3426  				/*
3427  				 * The cache_write_mutex is protecting the
3428  				 * io_list, also refer to the definition of
3429  				 * btrfs_transaction::io_bgs for more details
3430  				 */
3431  				list_add_tail(&cache->io_list, io);
3432  			} else {
3433  				/*
3434  				 * If we failed to write the cache, the
3435  				 * generation will be bad and life goes on
3436  				 */
3437  				ret = 0;
3438  			}
3439  		}
3440  		if (!ret) {
3441  			ret = update_block_group_item(trans, path, cache);
3442  			/*
3443  			 * Our block group might still be attached to the list
3444  			 * of new block groups in the transaction handle of some
3445  			 * other task (struct btrfs_trans_handle->new_bgs). This
3446  			 * means its block group item isn't yet in the extent
3447  			 * tree. If this happens ignore the error, as we will
3448  			 * try again later in the critical section of the
3449  			 * transaction commit.
3450  			 */
3451  			if (ret == -ENOENT) {
3452  				ret = 0;
3453  				spin_lock(&cur_trans->dirty_bgs_lock);
3454  				if (list_empty(&cache->dirty_list)) {
3455  					list_add_tail(&cache->dirty_list,
3456  						      &cur_trans->dirty_bgs);
3457  					btrfs_get_block_group(cache);
3458  					drop_reserve = false;
3459  				}
3460  				spin_unlock(&cur_trans->dirty_bgs_lock);
3461  			} else if (ret) {
3462  				btrfs_abort_transaction(trans, ret);
3463  			}
3464  		}
3465  
3466  		/* If it's not on the io list, we need to put the block group */
3467  		if (should_put)
3468  			btrfs_put_block_group(cache);
3469  		if (drop_reserve)
3470  			btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
3471  		/*
3472  		 * Avoid blocking other tasks for too long. It might even save
3473  		 * us from writing caches for block groups that are going to be
3474  		 * removed.
3475  		 */
3476  		mutex_unlock(&trans->transaction->cache_write_mutex);
3477  		if (ret)
3478  			goto out;
3479  		mutex_lock(&trans->transaction->cache_write_mutex);
3480  	}
3481  	mutex_unlock(&trans->transaction->cache_write_mutex);
3482  
3483  	/*
3484  	 * Go through delayed refs for all the stuff we've just kicked off
3485  	 * and then loop back (just once)
3486  	 */
3487  	if (!ret)
3488  		ret = btrfs_run_delayed_refs(trans, 0);
3489  	if (!ret && loops == 0) {
3490  		loops++;
3491  		spin_lock(&cur_trans->dirty_bgs_lock);
3492  		list_splice_init(&cur_trans->dirty_bgs, &dirty);
3493  		/*
3494  		 * dirty_bgs_lock protects us from concurrent block group
3495  		 * deletes too (not just cache_write_mutex).
3496  		 */
3497  		if (!list_empty(&dirty)) {
3498  			spin_unlock(&cur_trans->dirty_bgs_lock);
3499  			goto again;
3500  		}
3501  		spin_unlock(&cur_trans->dirty_bgs_lock);
3502  	}
3503  out:
3504  	if (ret < 0) {
3505  		spin_lock(&cur_trans->dirty_bgs_lock);
3506  		list_splice_init(&dirty, &cur_trans->dirty_bgs);
3507  		spin_unlock(&cur_trans->dirty_bgs_lock);
3508  		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3509  	}
3510  
3511  	btrfs_free_path(path);
3512  	return ret;
3513  }
3514  
btrfs_write_dirty_block_groups(struct btrfs_trans_handle * trans)3515  int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
3516  {
3517  	struct btrfs_fs_info *fs_info = trans->fs_info;
3518  	struct btrfs_block_group *cache;
3519  	struct btrfs_transaction *cur_trans = trans->transaction;
3520  	int ret = 0;
3521  	int should_put;
3522  	struct btrfs_path *path;
3523  	struct list_head *io = &cur_trans->io_bgs;
3524  
3525  	path = btrfs_alloc_path();
3526  	if (!path)
3527  		return -ENOMEM;
3528  
3529  	/*
3530  	 * Even though we are in the critical section of the transaction commit,
3531  	 * we can still have concurrent tasks adding elements to this
3532  	 * transaction's list of dirty block groups. These tasks correspond to
3533  	 * endio free space workers started when writeback finishes for a
3534  	 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3535  	 * allocate new block groups as a result of COWing nodes of the root
3536  	 * tree when updating the free space inode. The writeback for the space
3537  	 * caches is triggered by an earlier call to
3538  	 * btrfs_start_dirty_block_groups() and iterations of the following
3539  	 * loop.
3540  	 * Also we want to do the cache_save_setup first and then run the
3541  	 * delayed refs to make sure we have the best chance at doing this all
3542  	 * in one shot.
3543  	 */
3544  	spin_lock(&cur_trans->dirty_bgs_lock);
3545  	while (!list_empty(&cur_trans->dirty_bgs)) {
3546  		cache = list_first_entry(&cur_trans->dirty_bgs,
3547  					 struct btrfs_block_group,
3548  					 dirty_list);
3549  
3550  		/*
3551  		 * This can happen if cache_save_setup re-dirties a block group
3552  		 * that is already under IO.  Just wait for it to finish and
3553  		 * then do it all again
3554  		 */
3555  		if (!list_empty(&cache->io_list)) {
3556  			spin_unlock(&cur_trans->dirty_bgs_lock);
3557  			list_del_init(&cache->io_list);
3558  			btrfs_wait_cache_io(trans, cache, path);
3559  			btrfs_put_block_group(cache);
3560  			spin_lock(&cur_trans->dirty_bgs_lock);
3561  		}
3562  
3563  		/*
3564  		 * Don't remove from the dirty list until after we've waited on
3565  		 * any pending IO
3566  		 */
3567  		list_del_init(&cache->dirty_list);
3568  		spin_unlock(&cur_trans->dirty_bgs_lock);
3569  		should_put = 1;
3570  
3571  		cache_save_setup(cache, trans, path);
3572  
3573  		if (!ret)
3574  			ret = btrfs_run_delayed_refs(trans, U64_MAX);
3575  
3576  		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3577  			cache->io_ctl.inode = NULL;
3578  			ret = btrfs_write_out_cache(trans, cache, path);
3579  			if (ret == 0 && cache->io_ctl.inode) {
3580  				should_put = 0;
3581  				list_add_tail(&cache->io_list, io);
3582  			} else {
3583  				/*
3584  				 * If we failed to write the cache, the
3585  				 * generation will be bad and life goes on
3586  				 */
3587  				ret = 0;
3588  			}
3589  		}
3590  		if (!ret) {
3591  			ret = update_block_group_item(trans, path, cache);
3592  			/*
3593  			 * One of the free space endio workers might have
3594  			 * created a new block group while updating a free space
3595  			 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3596  			 * and hasn't released its transaction handle yet, in
3597  			 * which case the new block group is still attached to
3598  			 * its transaction handle and its creation has not
3599  			 * finished yet (no block group item in the extent tree
3600  			 * yet, etc). If this is the case, wait for all free
3601  			 * space endio workers to finish and retry. This is a
3602  			 * very rare case so no need for a more efficient and
3603  			 * complex approach.
3604  			 */
3605  			if (ret == -ENOENT) {
3606  				wait_event(cur_trans->writer_wait,
3607  				   atomic_read(&cur_trans->num_writers) == 1);
3608  				ret = update_block_group_item(trans, path, cache);
3609  			}
3610  			if (ret)
3611  				btrfs_abort_transaction(trans, ret);
3612  		}
3613  
3614  		/* If its not on the io list, we need to put the block group */
3615  		if (should_put)
3616  			btrfs_put_block_group(cache);
3617  		btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
3618  		spin_lock(&cur_trans->dirty_bgs_lock);
3619  	}
3620  	spin_unlock(&cur_trans->dirty_bgs_lock);
3621  
3622  	/*
3623  	 * Refer to the definition of io_bgs member for details why it's safe
3624  	 * to use it without any locking
3625  	 */
3626  	while (!list_empty(io)) {
3627  		cache = list_first_entry(io, struct btrfs_block_group,
3628  					 io_list);
3629  		list_del_init(&cache->io_list);
3630  		btrfs_wait_cache_io(trans, cache, path);
3631  		btrfs_put_block_group(cache);
3632  	}
3633  
3634  	btrfs_free_path(path);
3635  	return ret;
3636  }
3637  
btrfs_update_block_group(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,bool alloc)3638  int btrfs_update_block_group(struct btrfs_trans_handle *trans,
3639  			     u64 bytenr, u64 num_bytes, bool alloc)
3640  {
3641  	struct btrfs_fs_info *info = trans->fs_info;
3642  	struct btrfs_space_info *space_info;
3643  	struct btrfs_block_group *cache;
3644  	u64 old_val;
3645  	bool reclaim = false;
3646  	bool bg_already_dirty = true;
3647  	int factor;
3648  
3649  	/* Block accounting for super block */
3650  	spin_lock(&info->delalloc_root_lock);
3651  	old_val = btrfs_super_bytes_used(info->super_copy);
3652  	if (alloc)
3653  		old_val += num_bytes;
3654  	else
3655  		old_val -= num_bytes;
3656  	btrfs_set_super_bytes_used(info->super_copy, old_val);
3657  	spin_unlock(&info->delalloc_root_lock);
3658  
3659  	cache = btrfs_lookup_block_group(info, bytenr);
3660  	if (!cache)
3661  		return -ENOENT;
3662  
3663  	/* An extent can not span multiple block groups. */
3664  	ASSERT(bytenr + num_bytes <= cache->start + cache->length);
3665  
3666  	space_info = cache->space_info;
3667  	factor = btrfs_bg_type_to_factor(cache->flags);
3668  
3669  	/*
3670  	 * If this block group has free space cache written out, we need to make
3671  	 * sure to load it if we are removing space.  This is because we need
3672  	 * the unpinning stage to actually add the space back to the block group,
3673  	 * otherwise we will leak space.
3674  	 */
3675  	if (!alloc && !btrfs_block_group_done(cache))
3676  		btrfs_cache_block_group(cache, true);
3677  
3678  	spin_lock(&space_info->lock);
3679  	spin_lock(&cache->lock);
3680  
3681  	if (btrfs_test_opt(info, SPACE_CACHE) &&
3682  	    cache->disk_cache_state < BTRFS_DC_CLEAR)
3683  		cache->disk_cache_state = BTRFS_DC_CLEAR;
3684  
3685  	old_val = cache->used;
3686  	if (alloc) {
3687  		old_val += num_bytes;
3688  		cache->used = old_val;
3689  		cache->reserved -= num_bytes;
3690  		cache->reclaim_mark = 0;
3691  		space_info->bytes_reserved -= num_bytes;
3692  		space_info->bytes_used += num_bytes;
3693  		space_info->disk_used += num_bytes * factor;
3694  		if (READ_ONCE(space_info->periodic_reclaim))
3695  			btrfs_space_info_update_reclaimable(space_info, -num_bytes);
3696  		spin_unlock(&cache->lock);
3697  		spin_unlock(&space_info->lock);
3698  	} else {
3699  		old_val -= num_bytes;
3700  		cache->used = old_val;
3701  		cache->pinned += num_bytes;
3702  		btrfs_space_info_update_bytes_pinned(info, space_info, num_bytes);
3703  		space_info->bytes_used -= num_bytes;
3704  		space_info->disk_used -= num_bytes * factor;
3705  		if (READ_ONCE(space_info->periodic_reclaim))
3706  			btrfs_space_info_update_reclaimable(space_info, num_bytes);
3707  		else
3708  			reclaim = should_reclaim_block_group(cache, num_bytes);
3709  
3710  		spin_unlock(&cache->lock);
3711  		spin_unlock(&space_info->lock);
3712  
3713  		set_extent_bit(&trans->transaction->pinned_extents, bytenr,
3714  			       bytenr + num_bytes - 1, EXTENT_DIRTY, NULL);
3715  	}
3716  
3717  	spin_lock(&trans->transaction->dirty_bgs_lock);
3718  	if (list_empty(&cache->dirty_list)) {
3719  		list_add_tail(&cache->dirty_list, &trans->transaction->dirty_bgs);
3720  		bg_already_dirty = false;
3721  		btrfs_get_block_group(cache);
3722  	}
3723  	spin_unlock(&trans->transaction->dirty_bgs_lock);
3724  
3725  	/*
3726  	 * No longer have used bytes in this block group, queue it for deletion.
3727  	 * We do this after adding the block group to the dirty list to avoid
3728  	 * races between cleaner kthread and space cache writeout.
3729  	 */
3730  	if (!alloc && old_val == 0) {
3731  		if (!btrfs_test_opt(info, DISCARD_ASYNC))
3732  			btrfs_mark_bg_unused(cache);
3733  	} else if (!alloc && reclaim) {
3734  		btrfs_mark_bg_to_reclaim(cache);
3735  	}
3736  
3737  	btrfs_put_block_group(cache);
3738  
3739  	/* Modified block groups are accounted for in the delayed_refs_rsv. */
3740  	if (!bg_already_dirty)
3741  		btrfs_inc_delayed_refs_rsv_bg_updates(info);
3742  
3743  	return 0;
3744  }
3745  
3746  /*
3747   * Update the block_group and space info counters.
3748   *
3749   * @cache:	The cache we are manipulating
3750   * @ram_bytes:  The number of bytes of file content, and will be same to
3751   *              @num_bytes except for the compress path.
3752   * @num_bytes:	The number of bytes in question
3753   * @delalloc:   The blocks are allocated for the delalloc write
3754   *
3755   * This is called by the allocator when it reserves space. If this is a
3756   * reservation and the block group has become read only we cannot make the
3757   * reservation and return -EAGAIN, otherwise this function always succeeds.
3758   */
btrfs_add_reserved_bytes(struct btrfs_block_group * cache,u64 ram_bytes,u64 num_bytes,int delalloc,bool force_wrong_size_class)3759  int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
3760  			     u64 ram_bytes, u64 num_bytes, int delalloc,
3761  			     bool force_wrong_size_class)
3762  {
3763  	struct btrfs_space_info *space_info = cache->space_info;
3764  	enum btrfs_block_group_size_class size_class;
3765  	int ret = 0;
3766  
3767  	spin_lock(&space_info->lock);
3768  	spin_lock(&cache->lock);
3769  	if (cache->ro) {
3770  		ret = -EAGAIN;
3771  		goto out;
3772  	}
3773  
3774  	if (btrfs_block_group_should_use_size_class(cache)) {
3775  		size_class = btrfs_calc_block_group_size_class(num_bytes);
3776  		ret = btrfs_use_block_group_size_class(cache, size_class, force_wrong_size_class);
3777  		if (ret)
3778  			goto out;
3779  	}
3780  	cache->reserved += num_bytes;
3781  	space_info->bytes_reserved += num_bytes;
3782  	trace_btrfs_space_reservation(cache->fs_info, "space_info",
3783  				      space_info->flags, num_bytes, 1);
3784  	btrfs_space_info_update_bytes_may_use(cache->fs_info,
3785  					      space_info, -ram_bytes);
3786  	if (delalloc)
3787  		cache->delalloc_bytes += num_bytes;
3788  
3789  	/*
3790  	 * Compression can use less space than we reserved, so wake tickets if
3791  	 * that happens.
3792  	 */
3793  	if (num_bytes < ram_bytes)
3794  		btrfs_try_granting_tickets(cache->fs_info, space_info);
3795  out:
3796  	spin_unlock(&cache->lock);
3797  	spin_unlock(&space_info->lock);
3798  	return ret;
3799  }
3800  
3801  /*
3802   * Update the block_group and space info counters.
3803   *
3804   * @cache:      The cache we are manipulating
3805   * @num_bytes:  The number of bytes in question
3806   * @delalloc:   The blocks are allocated for the delalloc write
3807   *
3808   * This is called by somebody who is freeing space that was never actually used
3809   * on disk.  For example if you reserve some space for a new leaf in transaction
3810   * A and before transaction A commits you free that leaf, you call this with
3811   * reserve set to 0 in order to clear the reservation.
3812   */
btrfs_free_reserved_bytes(struct btrfs_block_group * cache,u64 num_bytes,int delalloc)3813  void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
3814  			       u64 num_bytes, int delalloc)
3815  {
3816  	struct btrfs_space_info *space_info = cache->space_info;
3817  
3818  	spin_lock(&space_info->lock);
3819  	spin_lock(&cache->lock);
3820  	if (cache->ro)
3821  		space_info->bytes_readonly += num_bytes;
3822  	else if (btrfs_is_zoned(cache->fs_info))
3823  		space_info->bytes_zone_unusable += num_bytes;
3824  	cache->reserved -= num_bytes;
3825  	space_info->bytes_reserved -= num_bytes;
3826  	space_info->max_extent_size = 0;
3827  
3828  	if (delalloc)
3829  		cache->delalloc_bytes -= num_bytes;
3830  	spin_unlock(&cache->lock);
3831  
3832  	btrfs_try_granting_tickets(cache->fs_info, space_info);
3833  	spin_unlock(&space_info->lock);
3834  }
3835  
force_metadata_allocation(struct btrfs_fs_info * info)3836  static void force_metadata_allocation(struct btrfs_fs_info *info)
3837  {
3838  	struct list_head *head = &info->space_info;
3839  	struct btrfs_space_info *found;
3840  
3841  	list_for_each_entry(found, head, list) {
3842  		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3843  			found->force_alloc = CHUNK_ALLOC_FORCE;
3844  	}
3845  }
3846  
should_alloc_chunk(const struct btrfs_fs_info * fs_info,const struct btrfs_space_info * sinfo,int force)3847  static int should_alloc_chunk(const struct btrfs_fs_info *fs_info,
3848  			      const struct btrfs_space_info *sinfo, int force)
3849  {
3850  	u64 bytes_used = btrfs_space_info_used(sinfo, false);
3851  	u64 thresh;
3852  
3853  	if (force == CHUNK_ALLOC_FORCE)
3854  		return 1;
3855  
3856  	/*
3857  	 * in limited mode, we want to have some free space up to
3858  	 * about 1% of the FS size.
3859  	 */
3860  	if (force == CHUNK_ALLOC_LIMITED) {
3861  		thresh = btrfs_super_total_bytes(fs_info->super_copy);
3862  		thresh = max_t(u64, SZ_64M, mult_perc(thresh, 1));
3863  
3864  		if (sinfo->total_bytes - bytes_used < thresh)
3865  			return 1;
3866  	}
3867  
3868  	if (bytes_used + SZ_2M < mult_perc(sinfo->total_bytes, 80))
3869  		return 0;
3870  	return 1;
3871  }
3872  
btrfs_force_chunk_alloc(struct btrfs_trans_handle * trans,u64 type)3873  int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
3874  {
3875  	u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
3876  
3877  	return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
3878  }
3879  
do_chunk_alloc(struct btrfs_trans_handle * trans,u64 flags)3880  static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
3881  {
3882  	struct btrfs_block_group *bg;
3883  	int ret;
3884  
3885  	/*
3886  	 * Check if we have enough space in the system space info because we
3887  	 * will need to update device items in the chunk btree and insert a new
3888  	 * chunk item in the chunk btree as well. This will allocate a new
3889  	 * system block group if needed.
3890  	 */
3891  	check_system_chunk(trans, flags);
3892  
3893  	bg = btrfs_create_chunk(trans, flags);
3894  	if (IS_ERR(bg)) {
3895  		ret = PTR_ERR(bg);
3896  		goto out;
3897  	}
3898  
3899  	ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3900  	/*
3901  	 * Normally we are not expected to fail with -ENOSPC here, since we have
3902  	 * previously reserved space in the system space_info and allocated one
3903  	 * new system chunk if necessary. However there are three exceptions:
3904  	 *
3905  	 * 1) We may have enough free space in the system space_info but all the
3906  	 *    existing system block groups have a profile which can not be used
3907  	 *    for extent allocation.
3908  	 *
3909  	 *    This happens when mounting in degraded mode. For example we have a
3910  	 *    RAID1 filesystem with 2 devices, lose one device and mount the fs
3911  	 *    using the other device in degraded mode. If we then allocate a chunk,
3912  	 *    we may have enough free space in the existing system space_info, but
3913  	 *    none of the block groups can be used for extent allocation since they
3914  	 *    have a RAID1 profile, and because we are in degraded mode with a
3915  	 *    single device, we are forced to allocate a new system chunk with a
3916  	 *    SINGLE profile. Making check_system_chunk() iterate over all system
3917  	 *    block groups and check if they have a usable profile and enough space
3918  	 *    can be slow on very large filesystems, so we tolerate the -ENOSPC and
3919  	 *    try again after forcing allocation of a new system chunk. Like this
3920  	 *    we avoid paying the cost of that search in normal circumstances, when
3921  	 *    we were not mounted in degraded mode;
3922  	 *
3923  	 * 2) We had enough free space info the system space_info, and one suitable
3924  	 *    block group to allocate from when we called check_system_chunk()
3925  	 *    above. However right after we called it, the only system block group
3926  	 *    with enough free space got turned into RO mode by a running scrub,
3927  	 *    and in this case we have to allocate a new one and retry. We only
3928  	 *    need do this allocate and retry once, since we have a transaction
3929  	 *    handle and scrub uses the commit root to search for block groups;
3930  	 *
3931  	 * 3) We had one system block group with enough free space when we called
3932  	 *    check_system_chunk(), but after that, right before we tried to
3933  	 *    allocate the last extent buffer we needed, a discard operation came
3934  	 *    in and it temporarily removed the last free space entry from the
3935  	 *    block group (discard removes a free space entry, discards it, and
3936  	 *    then adds back the entry to the block group cache).
3937  	 */
3938  	if (ret == -ENOSPC) {
3939  		const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
3940  		struct btrfs_block_group *sys_bg;
3941  
3942  		sys_bg = btrfs_create_chunk(trans, sys_flags);
3943  		if (IS_ERR(sys_bg)) {
3944  			ret = PTR_ERR(sys_bg);
3945  			btrfs_abort_transaction(trans, ret);
3946  			goto out;
3947  		}
3948  
3949  		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3950  		if (ret) {
3951  			btrfs_abort_transaction(trans, ret);
3952  			goto out;
3953  		}
3954  
3955  		ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3956  		if (ret) {
3957  			btrfs_abort_transaction(trans, ret);
3958  			goto out;
3959  		}
3960  	} else if (ret) {
3961  		btrfs_abort_transaction(trans, ret);
3962  		goto out;
3963  	}
3964  out:
3965  	btrfs_trans_release_chunk_metadata(trans);
3966  
3967  	if (ret)
3968  		return ERR_PTR(ret);
3969  
3970  	btrfs_get_block_group(bg);
3971  	return bg;
3972  }
3973  
3974  /*
3975   * Chunk allocation is done in 2 phases:
3976   *
3977   * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
3978   *    the chunk, the chunk mapping, create its block group and add the items
3979   *    that belong in the chunk btree to it - more specifically, we need to
3980   *    update device items in the chunk btree and add a new chunk item to it.
3981   *
3982   * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
3983   *    group item to the extent btree and the device extent items to the devices
3984   *    btree.
3985   *
3986   * This is done to prevent deadlocks. For example when COWing a node from the
3987   * extent btree we are holding a write lock on the node's parent and if we
3988   * trigger chunk allocation and attempted to insert the new block group item
3989   * in the extent btree right way, we could deadlock because the path for the
3990   * insertion can include that parent node. At first glance it seems impossible
3991   * to trigger chunk allocation after starting a transaction since tasks should
3992   * reserve enough transaction units (metadata space), however while that is true
3993   * most of the time, chunk allocation may still be triggered for several reasons:
3994   *
3995   * 1) When reserving metadata, we check if there is enough free space in the
3996   *    metadata space_info and therefore don't trigger allocation of a new chunk.
3997   *    However later when the task actually tries to COW an extent buffer from
3998   *    the extent btree or from the device btree for example, it is forced to
3999   *    allocate a new block group (chunk) because the only one that had enough
4000   *    free space was just turned to RO mode by a running scrub for example (or
4001   *    device replace, block group reclaim thread, etc), so we can not use it
4002   *    for allocating an extent and end up being forced to allocate a new one;
4003   *
4004   * 2) Because we only check that the metadata space_info has enough free bytes,
4005   *    we end up not allocating a new metadata chunk in that case. However if
4006   *    the filesystem was mounted in degraded mode, none of the existing block
4007   *    groups might be suitable for extent allocation due to their incompatible
4008   *    profile (for e.g. mounting a 2 devices filesystem, where all block groups
4009   *    use a RAID1 profile, in degraded mode using a single device). In this case
4010   *    when the task attempts to COW some extent buffer of the extent btree for
4011   *    example, it will trigger allocation of a new metadata block group with a
4012   *    suitable profile (SINGLE profile in the example of the degraded mount of
4013   *    the RAID1 filesystem);
4014   *
4015   * 3) The task has reserved enough transaction units / metadata space, but when
4016   *    it attempts to COW an extent buffer from the extent or device btree for
4017   *    example, it does not find any free extent in any metadata block group,
4018   *    therefore forced to try to allocate a new metadata block group.
4019   *    This is because some other task allocated all available extents in the
4020   *    meanwhile - this typically happens with tasks that don't reserve space
4021   *    properly, either intentionally or as a bug. One example where this is
4022   *    done intentionally is fsync, as it does not reserve any transaction units
4023   *    and ends up allocating a variable number of metadata extents for log
4024   *    tree extent buffers;
4025   *
4026   * 4) The task has reserved enough transaction units / metadata space, but right
4027   *    before it tries to allocate the last extent buffer it needs, a discard
4028   *    operation comes in and, temporarily, removes the last free space entry from
4029   *    the only metadata block group that had free space (discard starts by
4030   *    removing a free space entry from a block group, then does the discard
4031   *    operation and, once it's done, it adds back the free space entry to the
4032   *    block group).
4033   *
4034   * We also need this 2 phases setup when adding a device to a filesystem with
4035   * a seed device - we must create new metadata and system chunks without adding
4036   * any of the block group items to the chunk, extent and device btrees. If we
4037   * did not do it this way, we would get ENOSPC when attempting to update those
4038   * btrees, since all the chunks from the seed device are read-only.
4039   *
4040   * Phase 1 does the updates and insertions to the chunk btree because if we had
4041   * it done in phase 2 and have a thundering herd of tasks allocating chunks in
4042   * parallel, we risk having too many system chunks allocated by many tasks if
4043   * many tasks reach phase 1 without the previous ones completing phase 2. In the
4044   * extreme case this leads to exhaustion of the system chunk array in the
4045   * superblock. This is easier to trigger if using a btree node/leaf size of 64K
4046   * and with RAID filesystems (so we have more device items in the chunk btree).
4047   * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
4048   * the system chunk array due to concurrent allocations") provides more details.
4049   *
4050   * Allocation of system chunks does not happen through this function. A task that
4051   * needs to update the chunk btree (the only btree that uses system chunks), must
4052   * preallocate chunk space by calling either check_system_chunk() or
4053   * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
4054   * metadata chunk or when removing a chunk, while the later is used before doing
4055   * a modification to the chunk btree - use cases for the later are adding,
4056   * removing and resizing a device as well as relocation of a system chunk.
4057   * See the comment below for more details.
4058   *
4059   * The reservation of system space, done through check_system_chunk(), as well
4060   * as all the updates and insertions into the chunk btree must be done while
4061   * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
4062   * an extent buffer from the chunks btree we never trigger allocation of a new
4063   * system chunk, which would result in a deadlock (trying to lock twice an
4064   * extent buffer of the chunk btree, first time before triggering the chunk
4065   * allocation and the second time during chunk allocation while attempting to
4066   * update the chunks btree). The system chunk array is also updated while holding
4067   * that mutex. The same logic applies to removing chunks - we must reserve system
4068   * space, update the chunk btree and the system chunk array in the superblock
4069   * while holding fs_info->chunk_mutex.
4070   *
4071   * This function, btrfs_chunk_alloc(), belongs to phase 1.
4072   *
4073   * If @force is CHUNK_ALLOC_FORCE:
4074   *    - return 1 if it successfully allocates a chunk,
4075   *    - return errors including -ENOSPC otherwise.
4076   * If @force is NOT CHUNK_ALLOC_FORCE:
4077   *    - return 0 if it doesn't need to allocate a new chunk,
4078   *    - return 1 if it successfully allocates a chunk,
4079   *    - return errors including -ENOSPC otherwise.
4080   */
btrfs_chunk_alloc(struct btrfs_trans_handle * trans,u64 flags,enum btrfs_chunk_alloc_enum force)4081  int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
4082  		      enum btrfs_chunk_alloc_enum force)
4083  {
4084  	struct btrfs_fs_info *fs_info = trans->fs_info;
4085  	struct btrfs_space_info *space_info;
4086  	struct btrfs_block_group *ret_bg;
4087  	bool wait_for_alloc = false;
4088  	bool should_alloc = false;
4089  	bool from_extent_allocation = false;
4090  	int ret = 0;
4091  
4092  	if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
4093  		from_extent_allocation = true;
4094  		force = CHUNK_ALLOC_FORCE;
4095  	}
4096  
4097  	/* Don't re-enter if we're already allocating a chunk */
4098  	if (trans->allocating_chunk)
4099  		return -ENOSPC;
4100  	/*
4101  	 * Allocation of system chunks can not happen through this path, as we
4102  	 * could end up in a deadlock if we are allocating a data or metadata
4103  	 * chunk and there is another task modifying the chunk btree.
4104  	 *
4105  	 * This is because while we are holding the chunk mutex, we will attempt
4106  	 * to add the new chunk item to the chunk btree or update an existing
4107  	 * device item in the chunk btree, while the other task that is modifying
4108  	 * the chunk btree is attempting to COW an extent buffer while holding a
4109  	 * lock on it and on its parent - if the COW operation triggers a system
4110  	 * chunk allocation, then we can deadlock because we are holding the
4111  	 * chunk mutex and we may need to access that extent buffer or its parent
4112  	 * in order to add the chunk item or update a device item.
4113  	 *
4114  	 * Tasks that want to modify the chunk tree should reserve system space
4115  	 * before updating the chunk btree, by calling either
4116  	 * btrfs_reserve_chunk_metadata() or check_system_chunk().
4117  	 * It's possible that after a task reserves the space, it still ends up
4118  	 * here - this happens in the cases described above at do_chunk_alloc().
4119  	 * The task will have to either retry or fail.
4120  	 */
4121  	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4122  		return -ENOSPC;
4123  
4124  	space_info = btrfs_find_space_info(fs_info, flags);
4125  	ASSERT(space_info);
4126  
4127  	do {
4128  		spin_lock(&space_info->lock);
4129  		if (force < space_info->force_alloc)
4130  			force = space_info->force_alloc;
4131  		should_alloc = should_alloc_chunk(fs_info, space_info, force);
4132  		if (space_info->full) {
4133  			/* No more free physical space */
4134  			if (should_alloc)
4135  				ret = -ENOSPC;
4136  			else
4137  				ret = 0;
4138  			spin_unlock(&space_info->lock);
4139  			return ret;
4140  		} else if (!should_alloc) {
4141  			spin_unlock(&space_info->lock);
4142  			return 0;
4143  		} else if (space_info->chunk_alloc) {
4144  			/*
4145  			 * Someone is already allocating, so we need to block
4146  			 * until this someone is finished and then loop to
4147  			 * recheck if we should continue with our allocation
4148  			 * attempt.
4149  			 */
4150  			wait_for_alloc = true;
4151  			force = CHUNK_ALLOC_NO_FORCE;
4152  			spin_unlock(&space_info->lock);
4153  			mutex_lock(&fs_info->chunk_mutex);
4154  			mutex_unlock(&fs_info->chunk_mutex);
4155  		} else {
4156  			/* Proceed with allocation */
4157  			space_info->chunk_alloc = 1;
4158  			wait_for_alloc = false;
4159  			spin_unlock(&space_info->lock);
4160  		}
4161  
4162  		cond_resched();
4163  	} while (wait_for_alloc);
4164  
4165  	mutex_lock(&fs_info->chunk_mutex);
4166  	trans->allocating_chunk = true;
4167  
4168  	/*
4169  	 * If we have mixed data/metadata chunks we want to make sure we keep
4170  	 * allocating mixed chunks instead of individual chunks.
4171  	 */
4172  	if (btrfs_mixed_space_info(space_info))
4173  		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4174  
4175  	/*
4176  	 * if we're doing a data chunk, go ahead and make sure that
4177  	 * we keep a reasonable number of metadata chunks allocated in the
4178  	 * FS as well.
4179  	 */
4180  	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4181  		fs_info->data_chunk_allocations++;
4182  		if (!(fs_info->data_chunk_allocations %
4183  		      fs_info->metadata_ratio))
4184  			force_metadata_allocation(fs_info);
4185  	}
4186  
4187  	ret_bg = do_chunk_alloc(trans, flags);
4188  	trans->allocating_chunk = false;
4189  
4190  	if (IS_ERR(ret_bg)) {
4191  		ret = PTR_ERR(ret_bg);
4192  	} else if (from_extent_allocation && (flags & BTRFS_BLOCK_GROUP_DATA)) {
4193  		/*
4194  		 * New block group is likely to be used soon. Try to activate
4195  		 * it now. Failure is OK for now.
4196  		 */
4197  		btrfs_zone_activate(ret_bg);
4198  	}
4199  
4200  	if (!ret)
4201  		btrfs_put_block_group(ret_bg);
4202  
4203  	spin_lock(&space_info->lock);
4204  	if (ret < 0) {
4205  		if (ret == -ENOSPC)
4206  			space_info->full = 1;
4207  		else
4208  			goto out;
4209  	} else {
4210  		ret = 1;
4211  		space_info->max_extent_size = 0;
4212  	}
4213  
4214  	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4215  out:
4216  	space_info->chunk_alloc = 0;
4217  	spin_unlock(&space_info->lock);
4218  	mutex_unlock(&fs_info->chunk_mutex);
4219  
4220  	return ret;
4221  }
4222  
get_profile_num_devs(const struct btrfs_fs_info * fs_info,u64 type)4223  static u64 get_profile_num_devs(const struct btrfs_fs_info *fs_info, u64 type)
4224  {
4225  	u64 num_dev;
4226  
4227  	num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
4228  	if (!num_dev)
4229  		num_dev = fs_info->fs_devices->rw_devices;
4230  
4231  	return num_dev;
4232  }
4233  
reserve_chunk_space(struct btrfs_trans_handle * trans,u64 bytes,u64 type)4234  static void reserve_chunk_space(struct btrfs_trans_handle *trans,
4235  				u64 bytes,
4236  				u64 type)
4237  {
4238  	struct btrfs_fs_info *fs_info = trans->fs_info;
4239  	struct btrfs_space_info *info;
4240  	u64 left;
4241  	int ret = 0;
4242  
4243  	/*
4244  	 * Needed because we can end up allocating a system chunk and for an
4245  	 * atomic and race free space reservation in the chunk block reserve.
4246  	 */
4247  	lockdep_assert_held(&fs_info->chunk_mutex);
4248  
4249  	info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4250  	spin_lock(&info->lock);
4251  	left = info->total_bytes - btrfs_space_info_used(info, true);
4252  	spin_unlock(&info->lock);
4253  
4254  	if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
4255  		btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
4256  			   left, bytes, type);
4257  		btrfs_dump_space_info(fs_info, info, 0, 0);
4258  	}
4259  
4260  	if (left < bytes) {
4261  		u64 flags = btrfs_system_alloc_profile(fs_info);
4262  		struct btrfs_block_group *bg;
4263  
4264  		/*
4265  		 * Ignore failure to create system chunk. We might end up not
4266  		 * needing it, as we might not need to COW all nodes/leafs from
4267  		 * the paths we visit in the chunk tree (they were already COWed
4268  		 * or created in the current transaction for example).
4269  		 */
4270  		bg = btrfs_create_chunk(trans, flags);
4271  		if (IS_ERR(bg)) {
4272  			ret = PTR_ERR(bg);
4273  		} else {
4274  			/*
4275  			 * We have a new chunk. We also need to activate it for
4276  			 * zoned filesystem.
4277  			 */
4278  			ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
4279  			if (ret < 0)
4280  				return;
4281  
4282  			/*
4283  			 * If we fail to add the chunk item here, we end up
4284  			 * trying again at phase 2 of chunk allocation, at
4285  			 * btrfs_create_pending_block_groups(). So ignore
4286  			 * any error here. An ENOSPC here could happen, due to
4287  			 * the cases described at do_chunk_alloc() - the system
4288  			 * block group we just created was just turned into RO
4289  			 * mode by a scrub for example, or a running discard
4290  			 * temporarily removed its free space entries, etc.
4291  			 */
4292  			btrfs_chunk_alloc_add_chunk_item(trans, bg);
4293  		}
4294  	}
4295  
4296  	if (!ret) {
4297  		ret = btrfs_block_rsv_add(fs_info,
4298  					  &fs_info->chunk_block_rsv,
4299  					  bytes, BTRFS_RESERVE_NO_FLUSH);
4300  		if (!ret)
4301  			trans->chunk_bytes_reserved += bytes;
4302  	}
4303  }
4304  
4305  /*
4306   * Reserve space in the system space for allocating or removing a chunk.
4307   * The caller must be holding fs_info->chunk_mutex.
4308   */
check_system_chunk(struct btrfs_trans_handle * trans,u64 type)4309  void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
4310  {
4311  	struct btrfs_fs_info *fs_info = trans->fs_info;
4312  	const u64 num_devs = get_profile_num_devs(fs_info, type);
4313  	u64 bytes;
4314  
4315  	/* num_devs device items to update and 1 chunk item to add or remove. */
4316  	bytes = btrfs_calc_metadata_size(fs_info, num_devs) +
4317  		btrfs_calc_insert_metadata_size(fs_info, 1);
4318  
4319  	reserve_chunk_space(trans, bytes, type);
4320  }
4321  
4322  /*
4323   * Reserve space in the system space, if needed, for doing a modification to the
4324   * chunk btree.
4325   *
4326   * @trans:		A transaction handle.
4327   * @is_item_insertion:	Indicate if the modification is for inserting a new item
4328   *			in the chunk btree or if it's for the deletion or update
4329   *			of an existing item.
4330   *
4331   * This is used in a context where we need to update the chunk btree outside
4332   * block group allocation and removal, to avoid a deadlock with a concurrent
4333   * task that is allocating a metadata or data block group and therefore needs to
4334   * update the chunk btree while holding the chunk mutex. After the update to the
4335   * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called.
4336   *
4337   */
btrfs_reserve_chunk_metadata(struct btrfs_trans_handle * trans,bool is_item_insertion)4338  void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
4339  				  bool is_item_insertion)
4340  {
4341  	struct btrfs_fs_info *fs_info = trans->fs_info;
4342  	u64 bytes;
4343  
4344  	if (is_item_insertion)
4345  		bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
4346  	else
4347  		bytes = btrfs_calc_metadata_size(fs_info, 1);
4348  
4349  	mutex_lock(&fs_info->chunk_mutex);
4350  	reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM);
4351  	mutex_unlock(&fs_info->chunk_mutex);
4352  }
4353  
btrfs_put_block_group_cache(struct btrfs_fs_info * info)4354  void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
4355  {
4356  	struct btrfs_block_group *block_group;
4357  
4358  	block_group = btrfs_lookup_first_block_group(info, 0);
4359  	while (block_group) {
4360  		btrfs_wait_block_group_cache_done(block_group);
4361  		spin_lock(&block_group->lock);
4362  		if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF,
4363  				       &block_group->runtime_flags)) {
4364  			struct btrfs_inode *inode = block_group->inode;
4365  
4366  			block_group->inode = NULL;
4367  			spin_unlock(&block_group->lock);
4368  
4369  			ASSERT(block_group->io_ctl.inode == NULL);
4370  			iput(&inode->vfs_inode);
4371  		} else {
4372  			spin_unlock(&block_group->lock);
4373  		}
4374  		block_group = btrfs_next_block_group(block_group);
4375  	}
4376  }
4377  
4378  /*
4379   * Must be called only after stopping all workers, since we could have block
4380   * group caching kthreads running, and therefore they could race with us if we
4381   * freed the block groups before stopping them.
4382   */
btrfs_free_block_groups(struct btrfs_fs_info * info)4383  int btrfs_free_block_groups(struct btrfs_fs_info *info)
4384  {
4385  	struct btrfs_block_group *block_group;
4386  	struct btrfs_space_info *space_info;
4387  	struct btrfs_caching_control *caching_ctl;
4388  	struct rb_node *n;
4389  
4390  	if (btrfs_is_zoned(info)) {
4391  		if (info->active_meta_bg) {
4392  			btrfs_put_block_group(info->active_meta_bg);
4393  			info->active_meta_bg = NULL;
4394  		}
4395  		if (info->active_system_bg) {
4396  			btrfs_put_block_group(info->active_system_bg);
4397  			info->active_system_bg = NULL;
4398  		}
4399  	}
4400  
4401  	write_lock(&info->block_group_cache_lock);
4402  	while (!list_empty(&info->caching_block_groups)) {
4403  		caching_ctl = list_entry(info->caching_block_groups.next,
4404  					 struct btrfs_caching_control, list);
4405  		list_del(&caching_ctl->list);
4406  		btrfs_put_caching_control(caching_ctl);
4407  	}
4408  	write_unlock(&info->block_group_cache_lock);
4409  
4410  	spin_lock(&info->unused_bgs_lock);
4411  	while (!list_empty(&info->unused_bgs)) {
4412  		block_group = list_first_entry(&info->unused_bgs,
4413  					       struct btrfs_block_group,
4414  					       bg_list);
4415  		list_del_init(&block_group->bg_list);
4416  		btrfs_put_block_group(block_group);
4417  	}
4418  
4419  	while (!list_empty(&info->reclaim_bgs)) {
4420  		block_group = list_first_entry(&info->reclaim_bgs,
4421  					       struct btrfs_block_group,
4422  					       bg_list);
4423  		list_del_init(&block_group->bg_list);
4424  		btrfs_put_block_group(block_group);
4425  	}
4426  	spin_unlock(&info->unused_bgs_lock);
4427  
4428  	spin_lock(&info->zone_active_bgs_lock);
4429  	while (!list_empty(&info->zone_active_bgs)) {
4430  		block_group = list_first_entry(&info->zone_active_bgs,
4431  					       struct btrfs_block_group,
4432  					       active_bg_list);
4433  		list_del_init(&block_group->active_bg_list);
4434  		btrfs_put_block_group(block_group);
4435  	}
4436  	spin_unlock(&info->zone_active_bgs_lock);
4437  
4438  	write_lock(&info->block_group_cache_lock);
4439  	while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
4440  		block_group = rb_entry(n, struct btrfs_block_group,
4441  				       cache_node);
4442  		rb_erase_cached(&block_group->cache_node,
4443  				&info->block_group_cache_tree);
4444  		RB_CLEAR_NODE(&block_group->cache_node);
4445  		write_unlock(&info->block_group_cache_lock);
4446  
4447  		down_write(&block_group->space_info->groups_sem);
4448  		list_del(&block_group->list);
4449  		up_write(&block_group->space_info->groups_sem);
4450  
4451  		/*
4452  		 * We haven't cached this block group, which means we could
4453  		 * possibly have excluded extents on this block group.
4454  		 */
4455  		if (block_group->cached == BTRFS_CACHE_NO ||
4456  		    block_group->cached == BTRFS_CACHE_ERROR)
4457  			btrfs_free_excluded_extents(block_group);
4458  
4459  		btrfs_remove_free_space_cache(block_group);
4460  		ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
4461  		ASSERT(list_empty(&block_group->dirty_list));
4462  		ASSERT(list_empty(&block_group->io_list));
4463  		ASSERT(list_empty(&block_group->bg_list));
4464  		ASSERT(refcount_read(&block_group->refs) == 1);
4465  		ASSERT(block_group->swap_extents == 0);
4466  		btrfs_put_block_group(block_group);
4467  
4468  		write_lock(&info->block_group_cache_lock);
4469  	}
4470  	write_unlock(&info->block_group_cache_lock);
4471  
4472  	btrfs_release_global_block_rsv(info);
4473  
4474  	while (!list_empty(&info->space_info)) {
4475  		space_info = list_entry(info->space_info.next,
4476  					struct btrfs_space_info,
4477  					list);
4478  
4479  		/*
4480  		 * Do not hide this behind enospc_debug, this is actually
4481  		 * important and indicates a real bug if this happens.
4482  		 */
4483  		if (WARN_ON(space_info->bytes_pinned > 0 ||
4484  			    space_info->bytes_may_use > 0))
4485  			btrfs_dump_space_info(info, space_info, 0, 0);
4486  
4487  		/*
4488  		 * If there was a failure to cleanup a log tree, very likely due
4489  		 * to an IO failure on a writeback attempt of one or more of its
4490  		 * extent buffers, we could not do proper (and cheap) unaccounting
4491  		 * of their reserved space, so don't warn on bytes_reserved > 0 in
4492  		 * that case.
4493  		 */
4494  		if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
4495  		    !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
4496  			if (WARN_ON(space_info->bytes_reserved > 0))
4497  				btrfs_dump_space_info(info, space_info, 0, 0);
4498  		}
4499  
4500  		WARN_ON(space_info->reclaim_size > 0);
4501  		list_del(&space_info->list);
4502  		btrfs_sysfs_remove_space_info(space_info);
4503  	}
4504  	return 0;
4505  }
4506  
btrfs_freeze_block_group(struct btrfs_block_group * cache)4507  void btrfs_freeze_block_group(struct btrfs_block_group *cache)
4508  {
4509  	atomic_inc(&cache->frozen);
4510  }
4511  
btrfs_unfreeze_block_group(struct btrfs_block_group * block_group)4512  void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
4513  {
4514  	struct btrfs_fs_info *fs_info = block_group->fs_info;
4515  	bool cleanup;
4516  
4517  	spin_lock(&block_group->lock);
4518  	cleanup = (atomic_dec_and_test(&block_group->frozen) &&
4519  		   test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
4520  	spin_unlock(&block_group->lock);
4521  
4522  	if (cleanup) {
4523  		struct btrfs_chunk_map *map;
4524  
4525  		map = btrfs_find_chunk_map(fs_info, block_group->start, 1);
4526  		/* Logic error, can't happen. */
4527  		ASSERT(map);
4528  
4529  		btrfs_remove_chunk_map(fs_info, map);
4530  
4531  		/* Once for our lookup reference. */
4532  		btrfs_free_chunk_map(map);
4533  
4534  		/*
4535  		 * We may have left one free space entry and other possible
4536  		 * tasks trimming this block group have left 1 entry each one.
4537  		 * Free them if any.
4538  		 */
4539  		btrfs_remove_free_space_cache(block_group);
4540  	}
4541  }
4542  
btrfs_inc_block_group_swap_extents(struct btrfs_block_group * bg)4543  bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
4544  {
4545  	bool ret = true;
4546  
4547  	spin_lock(&bg->lock);
4548  	if (bg->ro)
4549  		ret = false;
4550  	else
4551  		bg->swap_extents++;
4552  	spin_unlock(&bg->lock);
4553  
4554  	return ret;
4555  }
4556  
btrfs_dec_block_group_swap_extents(struct btrfs_block_group * bg,int amount)4557  void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
4558  {
4559  	spin_lock(&bg->lock);
4560  	ASSERT(!bg->ro);
4561  	ASSERT(bg->swap_extents >= amount);
4562  	bg->swap_extents -= amount;
4563  	spin_unlock(&bg->lock);
4564  }
4565  
btrfs_calc_block_group_size_class(u64 size)4566  enum btrfs_block_group_size_class btrfs_calc_block_group_size_class(u64 size)
4567  {
4568  	if (size <= SZ_128K)
4569  		return BTRFS_BG_SZ_SMALL;
4570  	if (size <= SZ_8M)
4571  		return BTRFS_BG_SZ_MEDIUM;
4572  	return BTRFS_BG_SZ_LARGE;
4573  }
4574  
4575  /*
4576   * Handle a block group allocating an extent in a size class
4577   *
4578   * @bg:				The block group we allocated in.
4579   * @size_class:			The size class of the allocation.
4580   * @force_wrong_size_class:	Whether we are desperate enough to allow
4581   *				mismatched size classes.
4582   *
4583   * Returns: 0 if the size class was valid for this block_group, -EAGAIN in the
4584   * case of a race that leads to the wrong size class without
4585   * force_wrong_size_class set.
4586   *
4587   * find_free_extent will skip block groups with a mismatched size class until
4588   * it really needs to avoid ENOSPC. In that case it will set
4589   * force_wrong_size_class. However, if a block group is newly allocated and
4590   * doesn't yet have a size class, then it is possible for two allocations of
4591   * different sizes to race and both try to use it. The loser is caught here and
4592   * has to retry.
4593   */
btrfs_use_block_group_size_class(struct btrfs_block_group * bg,enum btrfs_block_group_size_class size_class,bool force_wrong_size_class)4594  int btrfs_use_block_group_size_class(struct btrfs_block_group *bg,
4595  				     enum btrfs_block_group_size_class size_class,
4596  				     bool force_wrong_size_class)
4597  {
4598  	ASSERT(size_class != BTRFS_BG_SZ_NONE);
4599  
4600  	/* The new allocation is in the right size class, do nothing */
4601  	if (bg->size_class == size_class)
4602  		return 0;
4603  	/*
4604  	 * The new allocation is in a mismatched size class.
4605  	 * This means one of two things:
4606  	 *
4607  	 * 1. Two tasks in find_free_extent for different size_classes raced
4608  	 *    and hit the same empty block_group. Make the loser try again.
4609  	 * 2. A call to find_free_extent got desperate enough to set
4610  	 *    'force_wrong_slab'. Don't change the size_class, but allow the
4611  	 *    allocation.
4612  	 */
4613  	if (bg->size_class != BTRFS_BG_SZ_NONE) {
4614  		if (force_wrong_size_class)
4615  			return 0;
4616  		return -EAGAIN;
4617  	}
4618  	/*
4619  	 * The happy new block group case: the new allocation is the first
4620  	 * one in the block_group so we set size_class.
4621  	 */
4622  	bg->size_class = size_class;
4623  
4624  	return 0;
4625  }
4626  
btrfs_block_group_should_use_size_class(const struct btrfs_block_group * bg)4627  bool btrfs_block_group_should_use_size_class(const struct btrfs_block_group *bg)
4628  {
4629  	if (btrfs_is_zoned(bg->fs_info))
4630  		return false;
4631  	if (!btrfs_is_block_group_data_only(bg))
4632  		return false;
4633  	return true;
4634  }
4635