1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * NILFS block mapping.
4  *
5  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Koji Sato.
8  */
9 
10 #ifndef _NILFS_BMAP_H
11 #define _NILFS_BMAP_H
12 
13 #include <linux/types.h>
14 #include <linux/fs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/nilfs2_ondisk.h>	/* nilfs_binfo, nilfs_inode, etc */
17 #include "alloc.h"
18 #include "dat.h"
19 
20 #define NILFS_BMAP_INVALID_PTR	0
21 
22 #define nilfs_bmap_keydiff_abs(diff)	((diff) < 0 ? -(diff) : (diff))
23 
24 
25 struct nilfs_bmap;
26 
27 /**
28  * union nilfs_bmap_ptr_req - request for bmap ptr
29  * @bpr_ptr: bmap pointer
30  * @bpr_req: request for persistent allocator
31  */
32 union nilfs_bmap_ptr_req {
33 	__u64 bpr_ptr;
34 	struct nilfs_palloc_req bpr_req;
35 };
36 
37 /**
38  * struct nilfs_bmap_stats - bmap statistics
39  * @bs_nblocks: number of blocks created or deleted
40  */
41 struct nilfs_bmap_stats {
42 	unsigned int bs_nblocks;
43 };
44 
45 /**
46  * struct nilfs_bmap_operations - bmap operation table
47  * @bop_lookup:               single block search operation
48  * @bop_lookup_contig:        consecutive block search operation
49  * @bop_insert:               block insertion operation
50  * @bop_delete:               block delete operation
51  * @bop_clear:                block mapping resource release operation
52  * @bop_propagate:            operation to propagate dirty state towards the
53  *                            mapping root
54  * @bop_lookup_dirty_buffers: operation to collect dirty block buffers
55  * @bop_assign:               disk block address assignment operation
56  * @bop_mark:                 operation to mark in-use blocks as dirty for
57  *                            relocation by GC
58  * @bop_seek_key:             find valid block key operation
59  * @bop_last_key:             find last valid block key operation
60  */
61 struct nilfs_bmap_operations {
62 	int (*bop_lookup)(const struct nilfs_bmap *, __u64, int, __u64 *);
63 	int (*bop_lookup_contig)(const struct nilfs_bmap *, __u64, __u64 *,
64 				 unsigned int);
65 	int (*bop_insert)(struct nilfs_bmap *, __u64, __u64);
66 	int (*bop_delete)(struct nilfs_bmap *, __u64);
67 	void (*bop_clear)(struct nilfs_bmap *);
68 
69 	int (*bop_propagate)(struct nilfs_bmap *, struct buffer_head *);
70 	void (*bop_lookup_dirty_buffers)(struct nilfs_bmap *,
71 					 struct list_head *);
72 
73 	int (*bop_assign)(struct nilfs_bmap *,
74 			  struct buffer_head **,
75 			  sector_t,
76 			  union nilfs_binfo *);
77 	int (*bop_mark)(struct nilfs_bmap *, __u64, int);
78 
79 	int (*bop_seek_key)(const struct nilfs_bmap *, __u64, __u64 *);
80 	int (*bop_last_key)(const struct nilfs_bmap *, __u64 *);
81 
82 	/* private: internal use only */
83 	int (*bop_check_insert)(const struct nilfs_bmap *, __u64);
84 	int (*bop_check_delete)(struct nilfs_bmap *, __u64);
85 	int (*bop_gather_data)(struct nilfs_bmap *, __u64 *, __u64 *, int);
86 };
87 
88 
89 #define NILFS_BMAP_SIZE		(NILFS_INODE_BMAP_SIZE * sizeof(__le64))
90 #define NILFS_BMAP_KEY_BIT	BITS_PER_LONG
91 #define NILFS_BMAP_NEW_PTR_INIT	(1UL << (BITS_PER_LONG - 1))
92 
nilfs_bmap_is_new_ptr(unsigned long ptr)93 static inline int nilfs_bmap_is_new_ptr(unsigned long ptr)
94 {
95 	return !!(ptr & NILFS_BMAP_NEW_PTR_INIT);
96 }
97 
98 
99 /**
100  * struct nilfs_bmap - bmap structure
101  * @b_u: raw data
102  * @b_sem: semaphore
103  * @b_inode: owner of bmap
104  * @b_ops: bmap operation table
105  * @b_last_allocated_key: last allocated key for data block
106  * @b_last_allocated_ptr: last allocated ptr for data block
107  * @b_ptr_type: pointer type
108  * @b_state: state
109  * @b_nchildren_per_block: maximum number of child nodes for non-root nodes
110  */
111 struct nilfs_bmap {
112 	union {
113 		__u8 u_flags;
114 		__le64 u_data[NILFS_BMAP_SIZE / sizeof(__le64)];
115 	} b_u;
116 	struct rw_semaphore b_sem;
117 	struct inode *b_inode;
118 	const struct nilfs_bmap_operations *b_ops;
119 	__u64 b_last_allocated_key;
120 	__u64 b_last_allocated_ptr;
121 	int b_ptr_type;
122 	int b_state;
123 	__u16 b_nchildren_per_block;
124 };
125 
126 /* pointer type */
127 #define NILFS_BMAP_PTR_P	0	/* physical block number (i.e. LBN) */
128 #define NILFS_BMAP_PTR_VS	1	/*
129 					 * virtual block number (single
130 					 * version)
131 					 */
132 #define NILFS_BMAP_PTR_VM	2	/*
133 					 * virtual block number (has multiple
134 					 * versions)
135 					 */
136 #define NILFS_BMAP_PTR_U	(-1)	/* never perform pointer operations */
137 
138 #define NILFS_BMAP_USE_VBN(bmap)	((bmap)->b_ptr_type > 0)
139 
140 /* state */
141 #define NILFS_BMAP_DIRTY	0x00000001
142 
143 /**
144  * struct nilfs_bmap_store - shadow copy of bmap state
145  * @data: cached raw block mapping of on-disk inode
146  * @last_allocated_key: cached value of last allocated key for data block
147  * @last_allocated_ptr: cached value of last allocated ptr for data block
148  * @state: cached value of state field of bmap structure
149  */
150 struct nilfs_bmap_store {
151 	__le64 data[NILFS_BMAP_SIZE / sizeof(__le64)];
152 	__u64 last_allocated_key;
153 	__u64 last_allocated_ptr;
154 	int state;
155 };
156 
157 int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *);
158 int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *);
159 void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *);
160 int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned int);
161 int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec);
162 int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key);
163 int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp);
164 int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp);
165 int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key);
166 void nilfs_bmap_clear(struct nilfs_bmap *);
167 int nilfs_bmap_propagate(struct nilfs_bmap *, struct buffer_head *);
168 void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *, struct list_head *);
169 int nilfs_bmap_assign(struct nilfs_bmap *, struct buffer_head **,
170 		      unsigned long, union nilfs_binfo *);
171 int nilfs_bmap_lookup_at_level(struct nilfs_bmap *, __u64, int, __u64 *);
172 int nilfs_bmap_mark(struct nilfs_bmap *, __u64, int);
173 
174 void nilfs_bmap_init_gc(struct nilfs_bmap *);
175 
176 void nilfs_bmap_save(const struct nilfs_bmap *, struct nilfs_bmap_store *);
177 void nilfs_bmap_restore(struct nilfs_bmap *, const struct nilfs_bmap_store *);
178 
nilfs_bmap_lookup(struct nilfs_bmap * bmap,__u64 key,__u64 * ptr)179 static inline int nilfs_bmap_lookup(struct nilfs_bmap *bmap, __u64 key,
180 				    __u64 *ptr)
181 {
182 	return nilfs_bmap_lookup_at_level(bmap, key, 1, ptr);
183 }
184 
185 /*
186  * Internal use only
187  */
188 struct inode *nilfs_bmap_get_dat(const struct nilfs_bmap *);
189 
nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap * bmap,union nilfs_bmap_ptr_req * req,struct inode * dat)190 static inline int nilfs_bmap_prepare_alloc_ptr(struct nilfs_bmap *bmap,
191 					       union nilfs_bmap_ptr_req *req,
192 					       struct inode *dat)
193 {
194 	if (dat)
195 		return nilfs_dat_prepare_alloc(dat, &req->bpr_req);
196 	/* ignore target ptr */
197 	req->bpr_ptr = bmap->b_last_allocated_ptr++;
198 	return 0;
199 }
200 
nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap * bmap,union nilfs_bmap_ptr_req * req,struct inode * dat)201 static inline void nilfs_bmap_commit_alloc_ptr(struct nilfs_bmap *bmap,
202 					       union nilfs_bmap_ptr_req *req,
203 					       struct inode *dat)
204 {
205 	if (dat)
206 		nilfs_dat_commit_alloc(dat, &req->bpr_req);
207 }
208 
nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap * bmap,union nilfs_bmap_ptr_req * req,struct inode * dat)209 static inline void nilfs_bmap_abort_alloc_ptr(struct nilfs_bmap *bmap,
210 					      union nilfs_bmap_ptr_req *req,
211 					      struct inode *dat)
212 {
213 	if (dat)
214 		nilfs_dat_abort_alloc(dat, &req->bpr_req);
215 	else
216 		bmap->b_last_allocated_ptr--;
217 }
218 
nilfs_bmap_prepare_end_ptr(struct nilfs_bmap * bmap,union nilfs_bmap_ptr_req * req,struct inode * dat)219 static inline int nilfs_bmap_prepare_end_ptr(struct nilfs_bmap *bmap,
220 					     union nilfs_bmap_ptr_req *req,
221 					     struct inode *dat)
222 {
223 	return dat ? nilfs_dat_prepare_end(dat, &req->bpr_req) : 0;
224 }
225 
nilfs_bmap_commit_end_ptr(struct nilfs_bmap * bmap,union nilfs_bmap_ptr_req * req,struct inode * dat)226 static inline void nilfs_bmap_commit_end_ptr(struct nilfs_bmap *bmap,
227 					     union nilfs_bmap_ptr_req *req,
228 					     struct inode *dat)
229 {
230 	if (dat)
231 		nilfs_dat_commit_end(dat, &req->bpr_req,
232 				     bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
233 }
234 
nilfs_bmap_abort_end_ptr(struct nilfs_bmap * bmap,union nilfs_bmap_ptr_req * req,struct inode * dat)235 static inline void nilfs_bmap_abort_end_ptr(struct nilfs_bmap *bmap,
236 					    union nilfs_bmap_ptr_req *req,
237 					    struct inode *dat)
238 {
239 	if (dat)
240 		nilfs_dat_abort_end(dat, &req->bpr_req);
241 }
242 
nilfs_bmap_set_target_v(struct nilfs_bmap * bmap,__u64 key,__u64 ptr)243 static inline void nilfs_bmap_set_target_v(struct nilfs_bmap *bmap, __u64 key,
244 					   __u64 ptr)
245 {
246 	bmap->b_last_allocated_key = key;
247 	bmap->b_last_allocated_ptr = ptr;
248 }
249 
250 __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *,
251 			      const struct buffer_head *);
252 
253 __u64 nilfs_bmap_find_target_seq(const struct nilfs_bmap *, __u64);
254 __u64 nilfs_bmap_find_target_in_group(const struct nilfs_bmap *);
255 
256 
257 /* Assume that bmap semaphore is locked. */
nilfs_bmap_dirty(const struct nilfs_bmap * bmap)258 static inline int nilfs_bmap_dirty(const struct nilfs_bmap *bmap)
259 {
260 	return !!(bmap->b_state & NILFS_BMAP_DIRTY);
261 }
262 
263 /* Assume that bmap semaphore is locked. */
nilfs_bmap_set_dirty(struct nilfs_bmap * bmap)264 static inline void nilfs_bmap_set_dirty(struct nilfs_bmap *bmap)
265 {
266 	bmap->b_state |= NILFS_BMAP_DIRTY;
267 }
268 
269 /* Assume that bmap semaphore is locked. */
nilfs_bmap_clear_dirty(struct nilfs_bmap * bmap)270 static inline void nilfs_bmap_clear_dirty(struct nilfs_bmap *bmap)
271 {
272 	bmap->b_state &= ~NILFS_BMAP_DIRTY;
273 }
274 
275 
276 #define NILFS_BMAP_LARGE	0x1
277 
278 #define NILFS_BMAP_SMALL_LOW	NILFS_DIRECT_KEY_MIN
279 #define NILFS_BMAP_SMALL_HIGH	NILFS_DIRECT_KEY_MAX
280 #define NILFS_BMAP_LARGE_LOW	NILFS_BTREE_ROOT_NCHILDREN_MAX
281 #define NILFS_BMAP_LARGE_HIGH	NILFS_BTREE_KEY_MAX
282 
283 #endif	/* _NILFS_BMAP_H */
284