1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS direct block pointer.
4  *
5  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Koji Sato.
8  */
9 
10 #include <linux/errno.h>
11 #include "nilfs.h"
12 #include "page.h"
13 #include "direct.h"
14 #include "alloc.h"
15 #include "dat.h"
16 
nilfs_direct_dptrs(const struct nilfs_bmap * direct)17 static inline __le64 *nilfs_direct_dptrs(const struct nilfs_bmap *direct)
18 {
19 	return (__le64 *)
20 		((struct nilfs_direct_node *)direct->b_u.u_data + 1);
21 }
22 
23 static inline __u64
nilfs_direct_get_ptr(const struct nilfs_bmap * direct,__u64 key)24 nilfs_direct_get_ptr(const struct nilfs_bmap *direct, __u64 key)
25 {
26 	return le64_to_cpu(*(nilfs_direct_dptrs(direct) + key));
27 }
28 
nilfs_direct_set_ptr(struct nilfs_bmap * direct,__u64 key,__u64 ptr)29 static inline void nilfs_direct_set_ptr(struct nilfs_bmap *direct,
30 					__u64 key, __u64 ptr)
31 {
32 	*(nilfs_direct_dptrs(direct) + key) = cpu_to_le64(ptr);
33 }
34 
nilfs_direct_lookup(const struct nilfs_bmap * direct,__u64 key,int level,__u64 * ptrp)35 static int nilfs_direct_lookup(const struct nilfs_bmap *direct,
36 			       __u64 key, int level, __u64 *ptrp)
37 {
38 	__u64 ptr;
39 
40 	if (key > NILFS_DIRECT_KEY_MAX || level != 1)
41 		return -ENOENT;
42 	ptr = nilfs_direct_get_ptr(direct, key);
43 	if (ptr == NILFS_BMAP_INVALID_PTR)
44 		return -ENOENT;
45 
46 	*ptrp = ptr;
47 	return 0;
48 }
49 
nilfs_direct_lookup_contig(const struct nilfs_bmap * direct,__u64 key,__u64 * ptrp,unsigned int maxblocks)50 static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct,
51 				      __u64 key, __u64 *ptrp,
52 				      unsigned int maxblocks)
53 {
54 	struct inode *dat = NULL;
55 	__u64 ptr, ptr2;
56 	sector_t blocknr;
57 	int ret, cnt;
58 
59 	if (key > NILFS_DIRECT_KEY_MAX)
60 		return -ENOENT;
61 	ptr = nilfs_direct_get_ptr(direct, key);
62 	if (ptr == NILFS_BMAP_INVALID_PTR)
63 		return -ENOENT;
64 
65 	if (NILFS_BMAP_USE_VBN(direct)) {
66 		dat = nilfs_bmap_get_dat(direct);
67 		ret = nilfs_dat_translate(dat, ptr, &blocknr);
68 		if (ret < 0)
69 			goto dat_error;
70 		ptr = blocknr;
71 	}
72 
73 	maxblocks = min_t(unsigned int, maxblocks,
74 			  NILFS_DIRECT_KEY_MAX - key + 1);
75 	for (cnt = 1; cnt < maxblocks &&
76 		     (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) !=
77 		     NILFS_BMAP_INVALID_PTR;
78 	     cnt++) {
79 		if (dat) {
80 			ret = nilfs_dat_translate(dat, ptr2, &blocknr);
81 			if (ret < 0)
82 				goto dat_error;
83 			ptr2 = blocknr;
84 		}
85 		if (ptr2 != ptr + cnt)
86 			break;
87 	}
88 	*ptrp = ptr;
89 	return cnt;
90 
91  dat_error:
92 	if (ret == -ENOENT)
93 		ret = -EINVAL;  /* Notify bmap layer of metadata corruption */
94 	return ret;
95 }
96 
97 static __u64
nilfs_direct_find_target_v(const struct nilfs_bmap * direct,__u64 key)98 nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key)
99 {
100 	__u64 ptr;
101 
102 	ptr = nilfs_bmap_find_target_seq(direct, key);
103 	if (ptr != NILFS_BMAP_INVALID_PTR)
104 		/* sequential access */
105 		return ptr;
106 
107 	/* block group */
108 	return nilfs_bmap_find_target_in_group(direct);
109 }
110 
nilfs_direct_insert(struct nilfs_bmap * bmap,__u64 key,__u64 ptr)111 static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
112 {
113 	union nilfs_bmap_ptr_req req;
114 	struct inode *dat = NULL;
115 	struct buffer_head *bh;
116 	int ret;
117 
118 	if (key > NILFS_DIRECT_KEY_MAX)
119 		return -ENOENT;
120 	if (nilfs_direct_get_ptr(bmap, key) != NILFS_BMAP_INVALID_PTR)
121 		return -EEXIST;
122 
123 	if (NILFS_BMAP_USE_VBN(bmap)) {
124 		req.bpr_ptr = nilfs_direct_find_target_v(bmap, key);
125 		dat = nilfs_bmap_get_dat(bmap);
126 	}
127 	ret = nilfs_bmap_prepare_alloc_ptr(bmap, &req, dat);
128 	if (!ret) {
129 		/* ptr must be a pointer to a buffer head. */
130 		bh = (struct buffer_head *)((unsigned long)ptr);
131 		set_buffer_nilfs_volatile(bh);
132 
133 		nilfs_bmap_commit_alloc_ptr(bmap, &req, dat);
134 		nilfs_direct_set_ptr(bmap, key, req.bpr_ptr);
135 
136 		if (!nilfs_bmap_dirty(bmap))
137 			nilfs_bmap_set_dirty(bmap);
138 
139 		if (NILFS_BMAP_USE_VBN(bmap))
140 			nilfs_bmap_set_target_v(bmap, key, req.bpr_ptr);
141 
142 		nilfs_inode_add_blocks(bmap->b_inode, 1);
143 	}
144 	return ret;
145 }
146 
nilfs_direct_delete(struct nilfs_bmap * bmap,__u64 key)147 static int nilfs_direct_delete(struct nilfs_bmap *bmap, __u64 key)
148 {
149 	union nilfs_bmap_ptr_req req;
150 	struct inode *dat;
151 	int ret;
152 
153 	if (key > NILFS_DIRECT_KEY_MAX ||
154 	    nilfs_direct_get_ptr(bmap, key) == NILFS_BMAP_INVALID_PTR)
155 		return -ENOENT;
156 
157 	dat = NILFS_BMAP_USE_VBN(bmap) ? nilfs_bmap_get_dat(bmap) : NULL;
158 	req.bpr_ptr = nilfs_direct_get_ptr(bmap, key);
159 
160 	ret = nilfs_bmap_prepare_end_ptr(bmap, &req, dat);
161 	if (!ret) {
162 		nilfs_bmap_commit_end_ptr(bmap, &req, dat);
163 		nilfs_direct_set_ptr(bmap, key, NILFS_BMAP_INVALID_PTR);
164 		nilfs_inode_sub_blocks(bmap->b_inode, 1);
165 	}
166 	return ret;
167 }
168 
nilfs_direct_seek_key(const struct nilfs_bmap * direct,__u64 start,__u64 * keyp)169 static int nilfs_direct_seek_key(const struct nilfs_bmap *direct, __u64 start,
170 				 __u64 *keyp)
171 {
172 	__u64 key;
173 
174 	for (key = start; key <= NILFS_DIRECT_KEY_MAX; key++) {
175 		if (nilfs_direct_get_ptr(direct, key) !=
176 		    NILFS_BMAP_INVALID_PTR) {
177 			*keyp = key;
178 			return 0;
179 		}
180 	}
181 	return -ENOENT;
182 }
183 
nilfs_direct_last_key(const struct nilfs_bmap * direct,__u64 * keyp)184 static int nilfs_direct_last_key(const struct nilfs_bmap *direct, __u64 *keyp)
185 {
186 	__u64 key, lastkey;
187 
188 	lastkey = NILFS_DIRECT_KEY_MAX + 1;
189 	for (key = NILFS_DIRECT_KEY_MIN; key <= NILFS_DIRECT_KEY_MAX; key++)
190 		if (nilfs_direct_get_ptr(direct, key) !=
191 		    NILFS_BMAP_INVALID_PTR)
192 			lastkey = key;
193 
194 	if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
195 		return -ENOENT;
196 
197 	*keyp = lastkey;
198 
199 	return 0;
200 }
201 
nilfs_direct_check_insert(const struct nilfs_bmap * bmap,__u64 key)202 static int nilfs_direct_check_insert(const struct nilfs_bmap *bmap, __u64 key)
203 {
204 	return key > NILFS_DIRECT_KEY_MAX;
205 }
206 
nilfs_direct_gather_data(struct nilfs_bmap * direct,__u64 * keys,__u64 * ptrs,int nitems)207 static int nilfs_direct_gather_data(struct nilfs_bmap *direct,
208 				    __u64 *keys, __u64 *ptrs, int nitems)
209 {
210 	__u64 key;
211 	__u64 ptr;
212 	int n;
213 
214 	if (nitems > NILFS_DIRECT_NBLOCKS)
215 		nitems = NILFS_DIRECT_NBLOCKS;
216 	n = 0;
217 	for (key = 0; key < nitems; key++) {
218 		ptr = nilfs_direct_get_ptr(direct, key);
219 		if (ptr != NILFS_BMAP_INVALID_PTR) {
220 			keys[n] = key;
221 			ptrs[n] = ptr;
222 			n++;
223 		}
224 	}
225 	return n;
226 }
227 
nilfs_direct_delete_and_convert(struct nilfs_bmap * bmap,__u64 key,__u64 * keys,__u64 * ptrs,int n)228 int nilfs_direct_delete_and_convert(struct nilfs_bmap *bmap,
229 				    __u64 key, __u64 *keys, __u64 *ptrs, int n)
230 {
231 	__le64 *dptrs;
232 	int ret, i, j;
233 
234 	/* no need to allocate any resource for conversion */
235 
236 	/* delete */
237 	ret = bmap->b_ops->bop_delete(bmap, key);
238 	if (ret < 0)
239 		return ret;
240 
241 	/* free resources */
242 	if (bmap->b_ops->bop_clear != NULL)
243 		bmap->b_ops->bop_clear(bmap);
244 
245 	/* convert */
246 	dptrs = nilfs_direct_dptrs(bmap);
247 	for (i = 0, j = 0; i < NILFS_DIRECT_NBLOCKS; i++) {
248 		if ((j < n) && (i == keys[j])) {
249 			dptrs[i] = (i != key) ?
250 				cpu_to_le64(ptrs[j]) :
251 				NILFS_BMAP_INVALID_PTR;
252 			j++;
253 		} else
254 			dptrs[i] = NILFS_BMAP_INVALID_PTR;
255 	}
256 
257 	nilfs_direct_init(bmap);
258 	return 0;
259 }
260 
nilfs_direct_propagate(struct nilfs_bmap * bmap,struct buffer_head * bh)261 static int nilfs_direct_propagate(struct nilfs_bmap *bmap,
262 				  struct buffer_head *bh)
263 {
264 	struct nilfs_palloc_req oldreq, newreq;
265 	struct inode *dat;
266 	__u64 key;
267 	__u64 ptr;
268 	int ret;
269 
270 	if (!NILFS_BMAP_USE_VBN(bmap))
271 		return 0;
272 
273 	dat = nilfs_bmap_get_dat(bmap);
274 	key = nilfs_bmap_data_get_key(bmap, bh);
275 	ptr = nilfs_direct_get_ptr(bmap, key);
276 	if (!buffer_nilfs_volatile(bh)) {
277 		oldreq.pr_entry_nr = ptr;
278 		newreq.pr_entry_nr = ptr;
279 		ret = nilfs_dat_prepare_update(dat, &oldreq, &newreq);
280 		if (ret < 0)
281 			return ret;
282 		nilfs_dat_commit_update(dat, &oldreq, &newreq,
283 					bmap->b_ptr_type == NILFS_BMAP_PTR_VS);
284 		set_buffer_nilfs_volatile(bh);
285 		nilfs_direct_set_ptr(bmap, key, newreq.pr_entry_nr);
286 	} else
287 		ret = nilfs_dat_mark_dirty(dat, ptr);
288 
289 	return ret;
290 }
291 
nilfs_direct_assign_v(struct nilfs_bmap * direct,__u64 key,__u64 ptr,struct buffer_head ** bh,sector_t blocknr,union nilfs_binfo * binfo)292 static int nilfs_direct_assign_v(struct nilfs_bmap *direct,
293 				 __u64 key, __u64 ptr,
294 				 struct buffer_head **bh,
295 				 sector_t blocknr,
296 				 union nilfs_binfo *binfo)
297 {
298 	struct inode *dat = nilfs_bmap_get_dat(direct);
299 	union nilfs_bmap_ptr_req req;
300 	int ret;
301 
302 	req.bpr_ptr = ptr;
303 	ret = nilfs_dat_prepare_start(dat, &req.bpr_req);
304 	if (!ret) {
305 		nilfs_dat_commit_start(dat, &req.bpr_req, blocknr);
306 		binfo->bi_v.bi_vblocknr = cpu_to_le64(ptr);
307 		binfo->bi_v.bi_blkoff = cpu_to_le64(key);
308 	}
309 	return ret;
310 }
311 
nilfs_direct_assign_p(struct nilfs_bmap * direct,__u64 key,__u64 ptr,struct buffer_head ** bh,sector_t blocknr,union nilfs_binfo * binfo)312 static int nilfs_direct_assign_p(struct nilfs_bmap *direct,
313 				 __u64 key, __u64 ptr,
314 				 struct buffer_head **bh,
315 				 sector_t blocknr,
316 				 union nilfs_binfo *binfo)
317 {
318 	nilfs_direct_set_ptr(direct, key, blocknr);
319 
320 	binfo->bi_dat.bi_blkoff = cpu_to_le64(key);
321 	binfo->bi_dat.bi_level = 0;
322 	memset(binfo->bi_dat.bi_pad, 0, sizeof(binfo->bi_dat.bi_pad));
323 
324 	return 0;
325 }
326 
nilfs_direct_assign(struct nilfs_bmap * bmap,struct buffer_head ** bh,sector_t blocknr,union nilfs_binfo * binfo)327 static int nilfs_direct_assign(struct nilfs_bmap *bmap,
328 			       struct buffer_head **bh,
329 			       sector_t blocknr,
330 			       union nilfs_binfo *binfo)
331 {
332 	__u64 key;
333 	__u64 ptr;
334 
335 	key = nilfs_bmap_data_get_key(bmap, *bh);
336 	if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
337 		nilfs_crit(bmap->b_inode->i_sb,
338 			   "%s (ino=%lu): invalid key: %llu",
339 			   __func__,
340 			   bmap->b_inode->i_ino, (unsigned long long)key);
341 		return -EINVAL;
342 	}
343 	ptr = nilfs_direct_get_ptr(bmap, key);
344 	if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
345 		nilfs_crit(bmap->b_inode->i_sb,
346 			   "%s (ino=%lu): invalid pointer: %llu",
347 			   __func__,
348 			   bmap->b_inode->i_ino, (unsigned long long)ptr);
349 		return -EINVAL;
350 	}
351 
352 	return NILFS_BMAP_USE_VBN(bmap) ?
353 		nilfs_direct_assign_v(bmap, key, ptr, bh, blocknr, binfo) :
354 		nilfs_direct_assign_p(bmap, key, ptr, bh, blocknr, binfo);
355 }
356 
357 static const struct nilfs_bmap_operations nilfs_direct_ops = {
358 	.bop_lookup		=	nilfs_direct_lookup,
359 	.bop_lookup_contig	=	nilfs_direct_lookup_contig,
360 	.bop_insert		=	nilfs_direct_insert,
361 	.bop_delete		=	nilfs_direct_delete,
362 	.bop_clear		=	NULL,
363 
364 	.bop_propagate		=	nilfs_direct_propagate,
365 
366 	.bop_lookup_dirty_buffers	=	NULL,
367 
368 	.bop_assign		=	nilfs_direct_assign,
369 	.bop_mark		=	NULL,
370 
371 	.bop_seek_key		=	nilfs_direct_seek_key,
372 	.bop_last_key		=	nilfs_direct_last_key,
373 
374 	.bop_check_insert	=	nilfs_direct_check_insert,
375 	.bop_check_delete	=	NULL,
376 	.bop_gather_data	=	nilfs_direct_gather_data,
377 };
378 
379 
nilfs_direct_init(struct nilfs_bmap * bmap)380 int nilfs_direct_init(struct nilfs_bmap *bmap)
381 {
382 	bmap->b_ops = &nilfs_direct_ops;
383 	return 0;
384 }
385