1  /* SPDX-License-Identifier: GPL-2.0 */
2  /*
3   * Copyright (C) 2007 Oracle.  All rights reserved.
4   */
5  
6  #ifndef BTRFS_INODE_H
7  #define BTRFS_INODE_H
8  
9  #include <linux/hash.h>
10  #include <linux/refcount.h>
11  #include <linux/spinlock.h>
12  #include <linux/mutex.h>
13  #include <linux/rwsem.h>
14  #include <linux/fs.h>
15  #include <linux/mm.h>
16  #include <linux/compiler.h>
17  #include <linux/fscrypt.h>
18  #include <linux/lockdep.h>
19  #include <uapi/linux/btrfs_tree.h>
20  #include <trace/events/btrfs.h>
21  #include "block-rsv.h"
22  #include "extent_map.h"
23  #include "extent_io.h"
24  #include "extent-io-tree.h"
25  #include "ordered-data.h"
26  #include "delayed-inode.h"
27  
28  struct extent_state;
29  struct posix_acl;
30  struct iov_iter;
31  struct writeback_control;
32  struct btrfs_root;
33  struct btrfs_fs_info;
34  struct btrfs_trans_handle;
35  
36  /*
37   * Since we search a directory based on f_pos (struct dir_context::pos) we have
38   * to start at 2 since '.' and '..' have f_pos of 0 and 1 respectively, so
39   * everybody else has to start at 2 (see btrfs_real_readdir() and dir_emit_dots()).
40   */
41  #define BTRFS_DIR_START_INDEX 2
42  
43  /*
44   * ordered_data_close is set by truncate when a file that used
45   * to have good data has been truncated to zero.  When it is set
46   * the btrfs file release call will add this inode to the
47   * ordered operations list so that we make sure to flush out any
48   * new data the application may have written before commit.
49   */
50  enum {
51  	BTRFS_INODE_FLUSH_ON_CLOSE,
52  	BTRFS_INODE_DUMMY,
53  	BTRFS_INODE_IN_DEFRAG,
54  	BTRFS_INODE_HAS_ASYNC_EXTENT,
55  	 /*
56  	  * Always set under the VFS' inode lock, otherwise it can cause races
57  	  * during fsync (we start as a fast fsync and then end up in a full
58  	  * fsync racing with ordered extent completion).
59  	  */
60  	BTRFS_INODE_NEEDS_FULL_SYNC,
61  	BTRFS_INODE_COPY_EVERYTHING,
62  	BTRFS_INODE_HAS_PROPS,
63  	BTRFS_INODE_SNAPSHOT_FLUSH,
64  	/*
65  	 * Set and used when logging an inode and it serves to signal that an
66  	 * inode does not have xattrs, so subsequent fsyncs can avoid searching
67  	 * for xattrs to log. This bit must be cleared whenever a xattr is added
68  	 * to an inode.
69  	 */
70  	BTRFS_INODE_NO_XATTRS,
71  	/*
72  	 * Set when we are in a context where we need to start a transaction and
73  	 * have dirty pages with the respective file range locked. This is to
74  	 * ensure that when reserving space for the transaction, if we are low
75  	 * on available space and need to flush delalloc, we will not flush
76  	 * delalloc for this inode, because that could result in a deadlock (on
77  	 * the file range, inode's io_tree).
78  	 */
79  	BTRFS_INODE_NO_DELALLOC_FLUSH,
80  	/*
81  	 * Set when we are working on enabling verity for a file. Computing and
82  	 * writing the whole Merkle tree can take a while so we want to prevent
83  	 * races where two separate tasks attempt to simultaneously start verity
84  	 * on the same file.
85  	 */
86  	BTRFS_INODE_VERITY_IN_PROGRESS,
87  	/* Set when this inode is a free space inode. */
88  	BTRFS_INODE_FREE_SPACE_INODE,
89  	/* Set when there are no capabilities in XATTs for the inode. */
90  	BTRFS_INODE_NO_CAP_XATTR,
91  	/*
92  	 * Set if an error happened when doing a COW write before submitting a
93  	 * bio or during writeback. Used for both buffered writes and direct IO
94  	 * writes. This is to signal a fast fsync that it has to wait for
95  	 * ordered extents to complete and therefore not log extent maps that
96  	 * point to unwritten extents (when an ordered extent completes and it
97  	 * has the BTRFS_ORDERED_IOERR flag set, it drops extent maps in its
98  	 * range).
99  	 */
100  	BTRFS_INODE_COW_WRITE_ERROR,
101  	/*
102  	 * Indicate this is a directory that points to a subvolume for which
103  	 * there is no root reference item. That's a case like the following:
104  	 *
105  	 *   $ btrfs subvolume create /mnt/parent
106  	 *   $ btrfs subvolume create /mnt/parent/child
107  	 *   $ btrfs subvolume snapshot /mnt/parent /mnt/snap
108  	 *
109  	 * If subvolume "parent" is root 256, subvolume "child" is root 257 and
110  	 * snapshot "snap" is root 258, then there's no root reference item (key
111  	 * BTRFS_ROOT_REF_KEY in the root tree) for the subvolume "child"
112  	 * associated to root 258 (the snapshot) - there's only for the root
113  	 * of the "parent" subvolume (root 256). In the chunk root we have a
114  	 * (256 BTRFS_ROOT_REF_KEY 257) key but we don't have a
115  	 * (258 BTRFS_ROOT_REF_KEY 257) key - the sames goes for backrefs, we
116  	 * have a (257 BTRFS_ROOT_BACKREF_KEY 256) but we don't have a
117  	 * (257 BTRFS_ROOT_BACKREF_KEY 258) key.
118  	 *
119  	 * So when opening the "child" dentry from the snapshot's directory,
120  	 * we don't find a root ref item and we create a stub inode. This is
121  	 * done at new_simple_dir(), called from btrfs_lookup_dentry().
122  	 */
123  	BTRFS_INODE_ROOT_STUB,
124  };
125  
126  /* in memory btrfs inode */
127  struct btrfs_inode {
128  	/* which subvolume this inode belongs to */
129  	struct btrfs_root *root;
130  
131  #if BITS_PER_LONG == 32
132  	/*
133  	 * The objectid of the corresponding BTRFS_INODE_ITEM_KEY.
134  	 * On 64 bits platforms we can get it from vfs_inode.i_ino, which is an
135  	 * unsigned long and therefore 64 bits on such platforms.
136  	 */
137  	u64 objectid;
138  #endif
139  
140  	/* Cached value of inode property 'compression'. */
141  	u8 prop_compress;
142  
143  	/*
144  	 * Force compression on the file using the defrag ioctl, could be
145  	 * different from prop_compress and takes precedence if set.
146  	 */
147  	u8 defrag_compress;
148  
149  	/*
150  	 * Lock for counters and all fields used to determine if the inode is in
151  	 * the log or not (last_trans, last_sub_trans, last_log_commit,
152  	 * logged_trans), to access/update delalloc_bytes, new_delalloc_bytes,
153  	 * defrag_bytes, disk_i_size, outstanding_extents, csum_bytes and to
154  	 * update the VFS' inode number of bytes used.
155  	 * Also protects setting struct file::private_data.
156  	 */
157  	spinlock_t lock;
158  
159  	/* the extent_tree has caches of all the extent mappings to disk */
160  	struct extent_map_tree extent_tree;
161  
162  	/* the io_tree does range state (DIRTY, LOCKED etc) */
163  	struct extent_io_tree io_tree;
164  
165  	/*
166  	 * Keep track of where the inode has extent items mapped in order to
167  	 * make sure the i_size adjustments are accurate. Not required when the
168  	 * filesystem is NO_HOLES, the status can't be set while mounted as
169  	 * it's a mkfs-time feature.
170  	 */
171  	struct extent_io_tree *file_extent_tree;
172  
173  	/* held while logging the inode in tree-log.c */
174  	struct mutex log_mutex;
175  
176  	/*
177  	 * Counters to keep track of the number of extent item's we may use due
178  	 * to delalloc and such.  outstanding_extents is the number of extent
179  	 * items we think we'll end up using, and reserved_extents is the number
180  	 * of extent items we've reserved metadata for. Protected by 'lock'.
181  	 */
182  	unsigned outstanding_extents;
183  
184  	/* used to order data wrt metadata */
185  	spinlock_t ordered_tree_lock;
186  	struct rb_root ordered_tree;
187  	struct rb_node *ordered_tree_last;
188  
189  	/* list of all the delalloc inodes in the FS.  There are times we need
190  	 * to write all the delalloc pages to disk, and this list is used
191  	 * to walk them all.
192  	 */
193  	struct list_head delalloc_inodes;
194  
195  	unsigned long runtime_flags;
196  
197  	/* full 64 bit generation number, struct vfs_inode doesn't have a big
198  	 * enough field for this.
199  	 */
200  	u64 generation;
201  
202  	/*
203  	 * ID of the transaction handle that last modified this inode.
204  	 * Protected by 'lock'.
205  	 */
206  	u64 last_trans;
207  
208  	/*
209  	 * ID of the transaction that last logged this inode.
210  	 * Protected by 'lock'.
211  	 */
212  	u64 logged_trans;
213  
214  	/*
215  	 * Log transaction ID when this inode was last modified.
216  	 * Protected by 'lock'.
217  	 */
218  	int last_sub_trans;
219  
220  	/* A local copy of root's last_log_commit. Protected by 'lock'. */
221  	int last_log_commit;
222  
223  	union {
224  		/*
225  		 * Total number of bytes pending delalloc, used by stat to
226  		 * calculate the real block usage of the file. This is used
227  		 * only for files. Protected by 'lock'.
228  		 */
229  		u64 delalloc_bytes;
230  		/*
231  		 * The lowest possible index of the next dir index key which
232  		 * points to an inode that needs to be logged.
233  		 * This is used only for directories.
234  		 * Use the helpers btrfs_get_first_dir_index_to_log() and
235  		 * btrfs_set_first_dir_index_to_log() to access this field.
236  		 */
237  		u64 first_dir_index_to_log;
238  	};
239  
240  	union {
241  		/*
242  		 * Total number of bytes pending delalloc that fall within a file
243  		 * range that is either a hole or beyond EOF (and no prealloc extent
244  		 * exists in the range). This is always <= delalloc_bytes and this
245  		 * is used only for files. Protected by 'lock'.
246  		 */
247  		u64 new_delalloc_bytes;
248  		/*
249  		 * The offset of the last dir index key that was logged.
250  		 * This is used only for directories.
251  		 */
252  		u64 last_dir_index_offset;
253  	};
254  
255  	union {
256  		/*
257  		 * Total number of bytes pending defrag, used by stat to check whether
258  		 * it needs COW. Protected by 'lock'.
259  		 * Used by inodes other than the data relocation inode.
260  		 */
261  		u64 defrag_bytes;
262  
263  		/*
264  		 * Logical address of the block group being relocated.
265  		 * Used only by the data relocation inode.
266  		 */
267  		u64 reloc_block_group_start;
268  	};
269  
270  	/*
271  	 * The size of the file stored in the metadata on disk.  data=ordered
272  	 * means the in-memory i_size might be larger than the size on disk
273  	 * because not all the blocks are written yet. Protected by 'lock'.
274  	 */
275  	u64 disk_i_size;
276  
277  	union {
278  		/*
279  		 * If this is a directory then index_cnt is the counter for the
280  		 * index number for new files that are created. For an empty
281  		 * directory, this must be initialized to BTRFS_DIR_START_INDEX.
282  		 */
283  		u64 index_cnt;
284  
285  		/*
286  		 * If this is not a directory, this is the number of bytes
287  		 * outstanding that are going to need csums. This is used in
288  		 * ENOSPC accounting. Protected by 'lock'.
289  		 */
290  		u64 csum_bytes;
291  	};
292  
293  	/* Cache the directory index number to speed the dir/file remove */
294  	u64 dir_index;
295  
296  	/* the fsync log has some corner cases that mean we have to check
297  	 * directories to see if any unlinks have been done before
298  	 * the directory was logged.  See tree-log.c for all the
299  	 * details
300  	 */
301  	u64 last_unlink_trans;
302  
303  	union {
304  		/*
305  		 * The id/generation of the last transaction where this inode
306  		 * was either the source or the destination of a clone/dedupe
307  		 * operation. Used when logging an inode to know if there are
308  		 * shared extents that need special care when logging checksum
309  		 * items, to avoid duplicate checksum items in a log (which can
310  		 * lead to a corruption where we end up with missing checksum
311  		 * ranges after log replay). Protected by the VFS inode lock.
312  		 * Used for regular files only.
313  		 */
314  		u64 last_reflink_trans;
315  
316  		/*
317  		 * In case this a root stub inode (BTRFS_INODE_ROOT_STUB flag set),
318  		 * the ID of that root.
319  		 */
320  		u64 ref_root_id;
321  	};
322  
323  	/* Backwards incompatible flags, lower half of inode_item::flags  */
324  	u32 flags;
325  	/* Read-only compatibility flags, upper half of inode_item::flags */
326  	u32 ro_flags;
327  
328  	struct btrfs_block_rsv block_rsv;
329  
330  	struct btrfs_delayed_node *delayed_node;
331  
332  	/* File creation time. */
333  	u64 i_otime_sec;
334  	u32 i_otime_nsec;
335  
336  	/* Hook into fs_info->delayed_iputs */
337  	struct list_head delayed_iput;
338  
339  	struct rw_semaphore i_mmap_lock;
340  	struct inode vfs_inode;
341  };
342  
btrfs_get_first_dir_index_to_log(const struct btrfs_inode * inode)343  static inline u64 btrfs_get_first_dir_index_to_log(const struct btrfs_inode *inode)
344  {
345  	return READ_ONCE(inode->first_dir_index_to_log);
346  }
347  
btrfs_set_first_dir_index_to_log(struct btrfs_inode * inode,u64 index)348  static inline void btrfs_set_first_dir_index_to_log(struct btrfs_inode *inode,
349  						    u64 index)
350  {
351  	WRITE_ONCE(inode->first_dir_index_to_log, index);
352  }
353  
354  /* Type checked and const-preserving VFS inode -> btrfs inode. */
355  #define BTRFS_I(_inode)								\
356  	_Generic(_inode,							\
357  		 struct inode *: container_of(_inode, struct btrfs_inode, vfs_inode),	\
358  		 const struct inode *: (const struct btrfs_inode *)container_of(	\
359  					_inode, const struct btrfs_inode, vfs_inode))
360  
btrfs_inode_hash(u64 objectid,const struct btrfs_root * root)361  static inline unsigned long btrfs_inode_hash(u64 objectid,
362  					     const struct btrfs_root *root)
363  {
364  	u64 h = objectid ^ (root->root_key.objectid * GOLDEN_RATIO_PRIME);
365  
366  #if BITS_PER_LONG == 32
367  	h = (h >> 32) ^ (h & 0xffffffff);
368  #endif
369  
370  	return (unsigned long)h;
371  }
372  
373  #if BITS_PER_LONG == 32
374  
375  /*
376   * On 32 bit systems the i_ino of struct inode is 32 bits (unsigned long), so
377   * we use the inode's location objectid which is a u64 to avoid truncation.
378   */
btrfs_ino(const struct btrfs_inode * inode)379  static inline u64 btrfs_ino(const struct btrfs_inode *inode)
380  {
381  	u64 ino = inode->objectid;
382  
383  	if (test_bit(BTRFS_INODE_ROOT_STUB, &inode->runtime_flags))
384  		ino = inode->vfs_inode.i_ino;
385  	return ino;
386  }
387  
388  #else
389  
btrfs_ino(const struct btrfs_inode * inode)390  static inline u64 btrfs_ino(const struct btrfs_inode *inode)
391  {
392  	return inode->vfs_inode.i_ino;
393  }
394  
395  #endif
396  
btrfs_get_inode_key(const struct btrfs_inode * inode,struct btrfs_key * key)397  static inline void btrfs_get_inode_key(const struct btrfs_inode *inode,
398  				       struct btrfs_key *key)
399  {
400  	key->objectid = btrfs_ino(inode);
401  	key->type = BTRFS_INODE_ITEM_KEY;
402  	key->offset = 0;
403  }
404  
btrfs_set_inode_number(struct btrfs_inode * inode,u64 ino)405  static inline void btrfs_set_inode_number(struct btrfs_inode *inode, u64 ino)
406  {
407  #if BITS_PER_LONG == 32
408  	inode->objectid = ino;
409  #endif
410  	inode->vfs_inode.i_ino = ino;
411  }
412  
btrfs_i_size_write(struct btrfs_inode * inode,u64 size)413  static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size)
414  {
415  	i_size_write(&inode->vfs_inode, size);
416  	inode->disk_i_size = size;
417  }
418  
btrfs_is_free_space_inode(const struct btrfs_inode * inode)419  static inline bool btrfs_is_free_space_inode(const struct btrfs_inode *inode)
420  {
421  	return test_bit(BTRFS_INODE_FREE_SPACE_INODE, &inode->runtime_flags);
422  }
423  
is_data_inode(const struct btrfs_inode * inode)424  static inline bool is_data_inode(const struct btrfs_inode *inode)
425  {
426  	return btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID;
427  }
428  
btrfs_mod_outstanding_extents(struct btrfs_inode * inode,int mod)429  static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
430  						 int mod)
431  {
432  	lockdep_assert_held(&inode->lock);
433  	inode->outstanding_extents += mod;
434  	if (btrfs_is_free_space_inode(inode))
435  		return;
436  	trace_btrfs_inode_mod_outstanding_extents(inode->root, btrfs_ino(inode),
437  						  mod, inode->outstanding_extents);
438  }
439  
440  /*
441   * Called every time after doing a buffered, direct IO or memory mapped write.
442   *
443   * This is to ensure that if we write to a file that was previously fsynced in
444   * the current transaction, then try to fsync it again in the same transaction,
445   * we will know that there were changes in the file and that it needs to be
446   * logged.
447   */
btrfs_set_inode_last_sub_trans(struct btrfs_inode * inode)448  static inline void btrfs_set_inode_last_sub_trans(struct btrfs_inode *inode)
449  {
450  	spin_lock(&inode->lock);
451  	inode->last_sub_trans = inode->root->log_transid;
452  	spin_unlock(&inode->lock);
453  }
454  
455  /*
456   * Should be called while holding the inode's VFS lock in exclusive mode, or
457   * while holding the inode's mmap lock (struct btrfs_inode::i_mmap_lock) in
458   * either shared or exclusive mode, or in a context where no one else can access
459   * the inode concurrently (during inode creation or when loading an inode from
460   * disk).
461   */
btrfs_set_inode_full_sync(struct btrfs_inode * inode)462  static inline void btrfs_set_inode_full_sync(struct btrfs_inode *inode)
463  {
464  	set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags);
465  	/*
466  	 * The inode may have been part of a reflink operation in the last
467  	 * transaction that modified it, and then a fsync has reset the
468  	 * last_reflink_trans to avoid subsequent fsyncs in the same
469  	 * transaction to do unnecessary work. So update last_reflink_trans
470  	 * to the last_trans value (we have to be pessimistic and assume a
471  	 * reflink happened).
472  	 *
473  	 * The ->last_trans is protected by the inode's spinlock and we can
474  	 * have a concurrent ordered extent completion update it. Also set
475  	 * last_reflink_trans to ->last_trans only if the former is less than
476  	 * the later, because we can be called in a context where
477  	 * last_reflink_trans was set to the current transaction generation
478  	 * while ->last_trans was not yet updated in the current transaction,
479  	 * and therefore has a lower value.
480  	 */
481  	spin_lock(&inode->lock);
482  	if (inode->last_reflink_trans < inode->last_trans)
483  		inode->last_reflink_trans = inode->last_trans;
484  	spin_unlock(&inode->lock);
485  }
486  
btrfs_inode_in_log(struct btrfs_inode * inode,u64 generation)487  static inline bool btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
488  {
489  	bool ret = false;
490  
491  	spin_lock(&inode->lock);
492  	if (inode->logged_trans == generation &&
493  	    inode->last_sub_trans <= inode->last_log_commit &&
494  	    inode->last_sub_trans <= btrfs_get_root_last_log_commit(inode->root))
495  		ret = true;
496  	spin_unlock(&inode->lock);
497  	return ret;
498  }
499  
500  /*
501   * Check if the inode has flags compatible with compression
502   */
btrfs_inode_can_compress(const struct btrfs_inode * inode)503  static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode)
504  {
505  	if (inode->flags & BTRFS_INODE_NODATACOW ||
506  	    inode->flags & BTRFS_INODE_NODATASUM)
507  		return false;
508  	return true;
509  }
510  
btrfs_assert_inode_locked(struct btrfs_inode * inode)511  static inline void btrfs_assert_inode_locked(struct btrfs_inode *inode)
512  {
513  	/* Immediately trigger a crash if the inode is not locked. */
514  	ASSERT(inode_is_locked(&inode->vfs_inode));
515  	/* Trigger a splat in dmesg if this task is not holding the lock. */
516  	lockdep_assert_held(&inode->vfs_inode.i_rwsem);
517  }
518  
519  /* Array of bytes with variable length, hexadecimal format 0x1234 */
520  #define CSUM_FMT				"0x%*phN"
521  #define CSUM_FMT_VALUE(size, bytes)		size, bytes
522  
523  int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
524  			    u32 pgoff, u8 *csum, const u8 * const csum_expected);
525  bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
526  			u32 bio_offset, struct bio_vec *bv);
527  noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
528  			      struct btrfs_file_extent *file_extent,
529  			      bool nowait, bool strict);
530  
531  void btrfs_del_delalloc_inode(struct btrfs_inode *inode);
532  struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry);
533  int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index);
534  int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
535  		       struct btrfs_inode *dir, struct btrfs_inode *inode,
536  		       const struct fscrypt_str *name);
537  int btrfs_add_link(struct btrfs_trans_handle *trans,
538  		   struct btrfs_inode *parent_inode, struct btrfs_inode *inode,
539  		   const struct fscrypt_str *name, int add_backref, u64 index);
540  int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry);
541  int btrfs_truncate_block(struct btrfs_inode *inode, loff_t from, loff_t len,
542  			 int front);
543  
544  int btrfs_start_delalloc_snapshot(struct btrfs_root *root, bool in_reclaim_context);
545  int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, long nr,
546  			       bool in_reclaim_context);
547  int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
548  			      unsigned int extra_bits,
549  			      struct extent_state **cached_state);
550  
551  struct btrfs_new_inode_args {
552  	/* Input */
553  	struct inode *dir;
554  	struct dentry *dentry;
555  	struct inode *inode;
556  	bool orphan;
557  	bool subvol;
558  
559  	/* Output from btrfs_new_inode_prepare(), input to btrfs_create_new_inode(). */
560  	struct posix_acl *default_acl;
561  	struct posix_acl *acl;
562  	struct fscrypt_name fname;
563  };
564  
565  int btrfs_new_inode_prepare(struct btrfs_new_inode_args *args,
566  			    unsigned int *trans_num_items);
567  int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
568  			   struct btrfs_new_inode_args *args);
569  void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args *args);
570  struct inode *btrfs_new_subvol_inode(struct mnt_idmap *idmap,
571  				     struct inode *dir);
572   void btrfs_set_delalloc_extent(struct btrfs_inode *inode, struct extent_state *state,
573  			        u32 bits);
574  void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
575  				 struct extent_state *state, u32 bits);
576  void btrfs_merge_delalloc_extent(struct btrfs_inode *inode, struct extent_state *new,
577  				 struct extent_state *other);
578  void btrfs_split_delalloc_extent(struct btrfs_inode *inode,
579  				 struct extent_state *orig, u64 split);
580  void btrfs_set_range_writeback(struct btrfs_inode *inode, u64 start, u64 end);
581  void btrfs_evict_inode(struct inode *inode);
582  struct inode *btrfs_alloc_inode(struct super_block *sb);
583  void btrfs_destroy_inode(struct inode *inode);
584  void btrfs_free_inode(struct inode *inode);
585  int btrfs_drop_inode(struct inode *inode);
586  int __init btrfs_init_cachep(void);
587  void __cold btrfs_destroy_cachep(void);
588  struct inode *btrfs_iget_path(u64 ino, struct btrfs_root *root,
589  			      struct btrfs_path *path);
590  struct inode *btrfs_iget(u64 ino, struct btrfs_root *root);
591  struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
592  				    struct folio *folio, u64 start, u64 len);
593  int btrfs_update_inode(struct btrfs_trans_handle *trans,
594  		       struct btrfs_inode *inode);
595  int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
596  				struct btrfs_inode *inode);
597  int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct btrfs_inode *inode);
598  int btrfs_orphan_cleanup(struct btrfs_root *root);
599  int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);
600  void btrfs_add_delayed_iput(struct btrfs_inode *inode);
601  void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
602  int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info *fs_info);
603  int btrfs_prealloc_file_range(struct inode *inode, int mode,
604  			      u64 start, u64 num_bytes, u64 min_size,
605  			      loff_t actual_len, u64 *alloc_hint);
606  int btrfs_prealloc_file_range_trans(struct inode *inode,
607  				    struct btrfs_trans_handle *trans, int mode,
608  				    u64 start, u64 num_bytes, u64 min_size,
609  				    loff_t actual_len, u64 *alloc_hint);
610  int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct folio *locked_folio,
611  			     u64 start, u64 end, struct writeback_control *wbc);
612  int btrfs_writepage_cow_fixup(struct folio *folio);
613  int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
614  					     int compress_type);
615  int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
616  					  u64 file_offset, u64 disk_bytenr,
617  					  u64 disk_io_size,
618  					  struct page **pages);
619  ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
620  			   struct btrfs_ioctl_encoded_io_args *encoded);
621  ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
622  			       const struct btrfs_ioctl_encoded_io_args *encoded);
623  
624  struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino);
625  
626  extern const struct dentry_operations btrfs_dentry_operations;
627  
628  /* Inode locking type flags, by default the exclusive lock is taken. */
629  enum btrfs_ilock_type {
630  	ENUM_BIT(BTRFS_ILOCK_SHARED),
631  	ENUM_BIT(BTRFS_ILOCK_TRY),
632  	ENUM_BIT(BTRFS_ILOCK_MMAP),
633  };
634  
635  int btrfs_inode_lock(struct btrfs_inode *inode, unsigned int ilock_flags);
636  void btrfs_inode_unlock(struct btrfs_inode *inode, unsigned int ilock_flags);
637  void btrfs_update_inode_bytes(struct btrfs_inode *inode, const u64 add_bytes,
638  			      const u64 del_bytes);
639  void btrfs_assert_inode_range_clean(struct btrfs_inode *inode, u64 start, u64 end);
640  u64 btrfs_get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
641  				     u64 num_bytes);
642  struct extent_map *btrfs_create_io_em(struct btrfs_inode *inode, u64 start,
643  				      const struct btrfs_file_extent *file_extent,
644  				      int type);
645  
646  #endif
647