1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (c) 2021-2024 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #ifndef __XFS_SCRUB_ISCAN_H__
7 #define __XFS_SCRUB_ISCAN_H__
8
9 struct xchk_iscan {
10 struct xfs_scrub *sc;
11
12 /* Lock to protect the scan cursor. */
13 struct mutex lock;
14
15 /*
16 * This is the first inode in the inumber address space that we
17 * examined. When the scan wraps around back to here, the scan is
18 * finished.
19 */
20 xfs_ino_t scan_start_ino;
21
22 /* This is the inode that will be examined next. */
23 xfs_ino_t cursor_ino;
24
25 /* If nonzero and non-NULL, skip this inode when scanning. */
26 xfs_ino_t skip_ino;
27
28 /*
29 * This is the last inode that we've successfully scanned, either
30 * because the caller scanned it, or we moved the cursor past an empty
31 * part of the inode address space. Scan callers should only use the
32 * xchk_iscan_visit function to modify this.
33 */
34 xfs_ino_t __visited_ino;
35
36 /* Operational state of the livescan. */
37 unsigned long __opstate;
38
39 /* Give up on iterating @cursor_ino if we can't iget it by this time. */
40 unsigned long __iget_deadline;
41
42 /* Amount of time (in ms) that we will try to iget an inode. */
43 unsigned int iget_timeout;
44
45 /* Wait this many ms to retry an iget. */
46 unsigned int iget_retry_delay;
47
48 /*
49 * The scan grabs batches of inodes and stashes them here before
50 * handing them out with _iter. Unallocated inodes are set in the
51 * mask so that all updates to that inode are selected for live
52 * update propagation.
53 */
54 xfs_ino_t __batch_ino;
55 xfs_inofree_t __skipped_inomask;
56 struct xfs_inode *__inodes[XFS_INODES_PER_CHUNK];
57 };
58
59 /* Set if the scan has been aborted due to some event in the fs. */
60 #define XCHK_ISCAN_OPSTATE_ABORTED (1)
61
62 /* Use trylock to acquire the AGI */
63 #define XCHK_ISCAN_OPSTATE_TRYLOCK_AGI (2)
64
65 static inline bool
xchk_iscan_aborted(const struct xchk_iscan * iscan)66 xchk_iscan_aborted(const struct xchk_iscan *iscan)
67 {
68 return test_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
69 }
70
71 static inline void
xchk_iscan_abort(struct xchk_iscan * iscan)72 xchk_iscan_abort(struct xchk_iscan *iscan)
73 {
74 set_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
75 }
76
77 static inline bool
xchk_iscan_agi_needs_trylock(const struct xchk_iscan * iscan)78 xchk_iscan_agi_needs_trylock(const struct xchk_iscan *iscan)
79 {
80 return test_bit(XCHK_ISCAN_OPSTATE_TRYLOCK_AGI, &iscan->__opstate);
81 }
82
83 static inline void
xchk_iscan_set_agi_trylock(struct xchk_iscan * iscan)84 xchk_iscan_set_agi_trylock(struct xchk_iscan *iscan)
85 {
86 set_bit(XCHK_ISCAN_OPSTATE_TRYLOCK_AGI, &iscan->__opstate);
87 }
88
89 void xchk_iscan_start(struct xfs_scrub *sc, unsigned int iget_timeout,
90 unsigned int iget_retry_delay, struct xchk_iscan *iscan);
91 void xchk_iscan_finish_early(struct xchk_iscan *iscan);
92 void xchk_iscan_teardown(struct xchk_iscan *iscan);
93
94 int xchk_iscan_iter(struct xchk_iscan *iscan, struct xfs_inode **ipp);
95 void xchk_iscan_iter_finish(struct xchk_iscan *iscan);
96
97 void xchk_iscan_mark_visited(struct xchk_iscan *iscan, struct xfs_inode *ip);
98 bool xchk_iscan_want_live_update(struct xchk_iscan *iscan, xfs_ino_t ino);
99
100 #endif /* __XFS_SCRUB_ISCAN_H__ */
101