1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4   * All Rights Reserved.
5   */
6  #ifndef __XFS_BUF_H__
7  #define __XFS_BUF_H__
8  
9  #include <linux/list.h>
10  #include <linux/types.h>
11  #include <linux/spinlock.h>
12  #include <linux/mm.h>
13  #include <linux/fs.h>
14  #include <linux/dax.h>
15  #include <linux/uio.h>
16  #include <linux/list_lru.h>
17  
18  extern struct kmem_cache *xfs_buf_cache;
19  
20  /*
21   *	Base types
22   */
23  struct xfs_buf;
24  
25  #define XFS_BUF_DADDR_NULL	((xfs_daddr_t) (-1LL))
26  
27  #define XBF_READ	 (1u << 0) /* buffer intended for reading from device */
28  #define XBF_WRITE	 (1u << 1) /* buffer intended for writing to device */
29  #define XBF_READ_AHEAD	 (1u << 2) /* asynchronous read-ahead */
30  #define XBF_NO_IOACCT	 (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
31  #define XBF_ASYNC	 (1u << 4) /* initiator will not wait for completion */
32  #define XBF_DONE	 (1u << 5) /* all pages in the buffer uptodate */
33  #define XBF_STALE	 (1u << 6) /* buffer has been staled, do not find it */
34  #define XBF_WRITE_FAIL	 (1u << 7) /* async writes have failed on this buffer */
35  
36  /* buffer type flags for write callbacks */
37  #define _XBF_INODES	 (1u << 16)/* inode buffer */
38  #define _XBF_DQUOTS	 (1u << 17)/* dquot buffer */
39  #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
40  
41  /* flags used only internally */
42  #define _XBF_PAGES	 (1u << 20)/* backed by refcounted pages */
43  #define _XBF_KMEM	 (1u << 21)/* backed by heap memory */
44  #define _XBF_DELWRI_Q	 (1u << 22)/* buffer on a delwri queue */
45  
46  /* flags used only as arguments to access routines */
47  /*
48   * Online fsck is scanning the buffer cache for live buffers.  Do not warn
49   * about length mismatches during lookups and do not return stale buffers.
50   */
51  #define XBF_LIVESCAN	 (1u << 28)
52  #define XBF_INCORE	 (1u << 29)/* lookup only, return if found in cache */
53  #define XBF_TRYLOCK	 (1u << 30)/* lock requested, but do not wait */
54  #define XBF_UNMAPPED	 (1u << 31)/* do not map the buffer */
55  
56  
57  typedef unsigned int xfs_buf_flags_t;
58  
59  #define XFS_BUF_FLAGS \
60  	{ XBF_READ,		"READ" }, \
61  	{ XBF_WRITE,		"WRITE" }, \
62  	{ XBF_READ_AHEAD,	"READ_AHEAD" }, \
63  	{ XBF_NO_IOACCT,	"NO_IOACCT" }, \
64  	{ XBF_ASYNC,		"ASYNC" }, \
65  	{ XBF_DONE,		"DONE" }, \
66  	{ XBF_STALE,		"STALE" }, \
67  	{ XBF_WRITE_FAIL,	"WRITE_FAIL" }, \
68  	{ _XBF_INODES,		"INODES" }, \
69  	{ _XBF_DQUOTS,		"DQUOTS" }, \
70  	{ _XBF_LOGRECOVERY,	"LOG_RECOVERY" }, \
71  	{ _XBF_PAGES,		"PAGES" }, \
72  	{ _XBF_KMEM,		"KMEM" }, \
73  	{ _XBF_DELWRI_Q,	"DELWRI_Q" }, \
74  	/* The following interface flags should never be set */ \
75  	{ XBF_LIVESCAN,		"LIVESCAN" }, \
76  	{ XBF_INCORE,		"INCORE" }, \
77  	{ XBF_TRYLOCK,		"TRYLOCK" }, \
78  	{ XBF_UNMAPPED,		"UNMAPPED" }
79  
80  /*
81   * Internal state flags.
82   */
83  #define XFS_BSTATE_DISPOSE	 (1 << 0)	/* buffer being discarded */
84  #define XFS_BSTATE_IN_FLIGHT	 (1 << 1)	/* I/O in flight */
85  
86  struct xfs_buf_cache {
87  	spinlock_t		bc_lock;
88  	struct rhashtable	bc_hash;
89  };
90  
91  int xfs_buf_cache_init(struct xfs_buf_cache *bch);
92  void xfs_buf_cache_destroy(struct xfs_buf_cache *bch);
93  
94  /*
95   * The xfs_buftarg contains 2 notions of "sector size" -
96   *
97   * 1) The metadata sector size, which is the minimum unit and
98   *    alignment of IO which will be performed by metadata operations.
99   * 2) The device logical sector size
100   *
101   * The first is specified at mkfs time, and is stored on-disk in the
102   * superblock's sb_sectsize.
103   *
104   * The latter is derived from the underlying device, and controls direct IO
105   * alignment constraints.
106   */
107  struct xfs_buftarg {
108  	dev_t			bt_dev;
109  	struct file		*bt_bdev_file;
110  	struct block_device	*bt_bdev;
111  	struct dax_device	*bt_daxdev;
112  	struct file		*bt_file;
113  	u64			bt_dax_part_off;
114  	struct xfs_mount	*bt_mount;
115  	unsigned int		bt_meta_sectorsize;
116  	size_t			bt_meta_sectormask;
117  	size_t			bt_logical_sectorsize;
118  	size_t			bt_logical_sectormask;
119  
120  	/* LRU control structures */
121  	struct shrinker		*bt_shrinker;
122  	struct list_lru		bt_lru;
123  
124  	struct percpu_counter	bt_io_count;
125  	struct ratelimit_state	bt_ioerror_rl;
126  
127  	/* built-in cache, if we're not using the perag one */
128  	struct xfs_buf_cache	bt_cache[];
129  };
130  
131  #define XB_PAGES	2
132  
133  struct xfs_buf_map {
134  	xfs_daddr_t		bm_bn;	/* block number for I/O */
135  	int			bm_len;	/* size of I/O */
136  	unsigned int		bm_flags;
137  };
138  
139  /*
140   * Online fsck is scanning the buffer cache for live buffers.  Do not warn
141   * about length mismatches during lookups and do not return stale buffers.
142   */
143  #define XBM_LIVESCAN		(1U << 0)
144  
145  #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
146  	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
147  
148  struct xfs_buf_ops {
149  	char *name;
150  	union {
151  		__be32 magic[2];	/* v4 and v5 on disk magic values */
152  		__be16 magic16[2];	/* v4 and v5 on disk magic values */
153  	};
154  	void (*verify_read)(struct xfs_buf *);
155  	void (*verify_write)(struct xfs_buf *);
156  	xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
157  };
158  
159  struct xfs_buf {
160  	/*
161  	 * first cacheline holds all the fields needed for an uncontended cache
162  	 * hit to be fully processed. The semaphore straddles the cacheline
163  	 * boundary, but the counter and lock sits on the first cacheline,
164  	 * which is the only bit that is touched if we hit the semaphore
165  	 * fast-path on locking.
166  	 */
167  	struct rhash_head	b_rhash_head;	/* pag buffer hash node */
168  
169  	xfs_daddr_t		b_rhash_key;	/* buffer cache index */
170  	int			b_length;	/* size of buffer in BBs */
171  	atomic_t		b_hold;		/* reference count */
172  	atomic_t		b_lru_ref;	/* lru reclaim ref count */
173  	xfs_buf_flags_t		b_flags;	/* status flags */
174  	struct semaphore	b_sema;		/* semaphore for lockables */
175  
176  	/*
177  	 * concurrent access to b_lru and b_lru_flags are protected by
178  	 * bt_lru_lock and not by b_sema
179  	 */
180  	struct list_head	b_lru;		/* lru list */
181  	spinlock_t		b_lock;		/* internal state lock */
182  	unsigned int		b_state;	/* internal state flags */
183  	int			b_io_error;	/* internal IO error state */
184  	wait_queue_head_t	b_waiters;	/* unpin waiters */
185  	struct list_head	b_list;
186  	struct xfs_perag	*b_pag;		/* contains rbtree root */
187  	struct xfs_mount	*b_mount;
188  	struct xfs_buftarg	*b_target;	/* buffer target (device) */
189  	void			*b_addr;	/* virtual address of buffer */
190  	struct work_struct	b_ioend_work;
191  	struct completion	b_iowait;	/* queue for I/O waiters */
192  	struct xfs_buf_log_item	*b_log_item;
193  	struct list_head	b_li_list;	/* Log items list head */
194  	struct xfs_trans	*b_transp;
195  	struct page		**b_pages;	/* array of page pointers */
196  	struct page		*b_page_array[XB_PAGES]; /* inline pages */
197  	struct xfs_buf_map	*b_maps;	/* compound buffer map */
198  	struct xfs_buf_map	__b_map;	/* inline compound buffer map */
199  	int			b_map_count;
200  	atomic_t		b_pin_count;	/* pin count */
201  	atomic_t		b_io_remaining;	/* #outstanding I/O requests */
202  	unsigned int		b_page_count;	/* size of page array */
203  	unsigned int		b_offset;	/* page offset of b_addr,
204  						   only for _XBF_KMEM buffers */
205  	int			b_error;	/* error code on I/O */
206  
207  	/*
208  	 * async write failure retry count. Initialised to zero on the first
209  	 * failure, then when it exceeds the maximum configured without a
210  	 * success the write is considered to be failed permanently and the
211  	 * iodone handler will take appropriate action.
212  	 *
213  	 * For retry timeouts, we record the jiffy of the first failure. This
214  	 * means that we can change the retry timeout for buffers already under
215  	 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
216  	 *
217  	 * last_error is used to ensure that we are getting repeated errors, not
218  	 * different errors. e.g. a block device might change ENOSPC to EIO when
219  	 * a failure timeout occurs, so we want to re-initialise the error
220  	 * retry behaviour appropriately when that happens.
221  	 */
222  	int			b_retries;
223  	unsigned long		b_first_retry_time; /* in jiffies */
224  	int			b_last_error;
225  
226  	const struct xfs_buf_ops	*b_ops;
227  	struct rcu_head		b_rcu;
228  };
229  
230  /* Finding and Reading Buffers */
231  int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
232  		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
233  int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
234  		int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
235  		const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
236  void xfs_buf_readahead_map(struct xfs_buftarg *target,
237  			       struct xfs_buf_map *map, int nmaps,
238  			       const struct xfs_buf_ops *ops);
239  
240  static inline int
xfs_buf_incore(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags,struct xfs_buf ** bpp)241  xfs_buf_incore(
242  	struct xfs_buftarg	*target,
243  	xfs_daddr_t		blkno,
244  	size_t			numblks,
245  	xfs_buf_flags_t		flags,
246  	struct xfs_buf		**bpp)
247  {
248  	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
249  
250  	return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
251  }
252  
253  static inline int
xfs_buf_get(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,struct xfs_buf ** bpp)254  xfs_buf_get(
255  	struct xfs_buftarg	*target,
256  	xfs_daddr_t		blkno,
257  	size_t			numblks,
258  	struct xfs_buf		**bpp)
259  {
260  	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
261  
262  	return xfs_buf_get_map(target, &map, 1, 0, bpp);
263  }
264  
265  static inline int
xfs_buf_read(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,xfs_buf_flags_t flags,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops)266  xfs_buf_read(
267  	struct xfs_buftarg	*target,
268  	xfs_daddr_t		blkno,
269  	size_t			numblks,
270  	xfs_buf_flags_t		flags,
271  	struct xfs_buf		**bpp,
272  	const struct xfs_buf_ops *ops)
273  {
274  	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
275  
276  	return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
277  			__builtin_return_address(0));
278  }
279  
280  static inline void
xfs_buf_readahead(struct xfs_buftarg * target,xfs_daddr_t blkno,size_t numblks,const struct xfs_buf_ops * ops)281  xfs_buf_readahead(
282  	struct xfs_buftarg	*target,
283  	xfs_daddr_t		blkno,
284  	size_t			numblks,
285  	const struct xfs_buf_ops *ops)
286  {
287  	DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
288  	return xfs_buf_readahead_map(target, &map, 1, ops);
289  }
290  
291  int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
292  		xfs_buf_flags_t flags, struct xfs_buf **bpp);
293  int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
294  		size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
295  		const struct xfs_buf_ops *ops);
296  int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
297  void xfs_buf_hold(struct xfs_buf *bp);
298  
299  /* Releasing Buffers */
300  extern void xfs_buf_rele(struct xfs_buf *);
301  
302  /* Locking and Unlocking Buffers */
303  extern int xfs_buf_trylock(struct xfs_buf *);
304  extern void xfs_buf_lock(struct xfs_buf *);
305  extern void xfs_buf_unlock(struct xfs_buf *);
306  #define xfs_buf_islocked(bp) \
307  	((bp)->b_sema.count <= 0)
308  
xfs_buf_relse(struct xfs_buf * bp)309  static inline void xfs_buf_relse(struct xfs_buf *bp)
310  {
311  	xfs_buf_unlock(bp);
312  	xfs_buf_rele(bp);
313  }
314  
315  /* Buffer Read and Write Routines */
316  extern int xfs_bwrite(struct xfs_buf *bp);
317  
318  extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
319  		xfs_failaddr_t failaddr);
320  #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
321  extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
322  void xfs_buf_ioend_fail(struct xfs_buf *);
323  void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
324  void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
325  #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
326  
327  /* Buffer Utility Routines */
328  extern void *xfs_buf_offset(struct xfs_buf *, size_t);
329  extern void xfs_buf_stale(struct xfs_buf *bp);
330  
331  /* Delayed Write Buffer Routines */
332  extern void xfs_buf_delwri_cancel(struct list_head *);
333  extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
334  void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl);
335  extern int xfs_buf_delwri_submit(struct list_head *);
336  extern int xfs_buf_delwri_submit_nowait(struct list_head *);
337  extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
338  
xfs_buf_daddr(struct xfs_buf * bp)339  static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
340  {
341  	return bp->b_maps[0].bm_bn;
342  }
343  
344  void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
345  
346  /*
347   * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
348   * up with a reference count of 0 so it will be tossed from the cache when
349   * released.
350   */
xfs_buf_oneshot(struct xfs_buf * bp)351  static inline void xfs_buf_oneshot(struct xfs_buf *bp)
352  {
353  	if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
354  		return;
355  	atomic_set(&bp->b_lru_ref, 0);
356  }
357  
xfs_buf_ispinned(struct xfs_buf * bp)358  static inline int xfs_buf_ispinned(struct xfs_buf *bp)
359  {
360  	return atomic_read(&bp->b_pin_count);
361  }
362  
363  static inline int
xfs_buf_verify_cksum(struct xfs_buf * bp,unsigned long cksum_offset)364  xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
365  {
366  	return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
367  				cksum_offset);
368  }
369  
370  static inline void
xfs_buf_update_cksum(struct xfs_buf * bp,unsigned long cksum_offset)371  xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
372  {
373  	xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
374  			 cksum_offset);
375  }
376  
377  /*
378   *	Handling of buftargs.
379   */
380  struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
381  		struct file *bdev_file);
382  extern void xfs_free_buftarg(struct xfs_buftarg *);
383  extern void xfs_buftarg_wait(struct xfs_buftarg *);
384  extern void xfs_buftarg_drain(struct xfs_buftarg *);
385  extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
386  
387  #define xfs_getsize_buftarg(buftarg)	block_size((buftarg)->bt_bdev)
388  #define xfs_readonly_buftarg(buftarg)	bdev_read_only((buftarg)->bt_bdev)
389  
390  int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
391  bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
392  bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
393  
394  /* for xfs_buf_mem.c only: */
395  int xfs_init_buftarg(struct xfs_buftarg *btp, size_t logical_sectorsize,
396  		const char *descr);
397  void xfs_destroy_buftarg(struct xfs_buftarg *btp);
398  
399  #endif	/* __XFS_BUF_H__ */
400