1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * Copyright (c) 2013 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_da_format.h"
17 #include "xfs_da_btree.h"
18 #include "xfs_inode.h"
19 #include "xfs_trans.h"
20 #include "xfs_bmap.h"
21 #include "xfs_attr.h"
22 #include "xfs_attr_remote.h"
23 #include "xfs_trace.h"
24 #include "xfs_error.h"
25 #include "xfs_health.h"
26 
27 #define ATTR_RMTVALUE_MAPSIZE	1	/* # of map entries at once */
28 
29 /*
30  * Remote Attribute Values
31  * =======================
32  *
33  * Remote extended attribute values are conceptually simple -- they're written
34  * to data blocks mapped by an inode's attribute fork, and they have an upper
35  * size limit of 64k.  Setting a value does not involve the XFS log.
36  *
37  * However, on a v5 filesystem, maximally sized remote attr values require one
38  * block more than 64k worth of space to hold both the remote attribute value
39  * header (64 bytes).  On a 4k block filesystem this results in a 68k buffer;
40  * on a 64k block filesystem, this would be a 128k buffer.  Note that the log
41  * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
42  * Therefore, we /must/ ensure that remote attribute value buffers never touch
43  * the logging system and therefore never have a log item.
44  */
45 
46 /* How many bytes can be stored in a remote value buffer? */
47 inline unsigned int
xfs_attr3_rmt_buf_space(struct xfs_mount * mp)48 xfs_attr3_rmt_buf_space(
49 	struct xfs_mount	*mp)
50 {
51 	unsigned int		blocksize = mp->m_attr_geo->blksize;
52 
53 	if (xfs_has_crc(mp))
54 		return blocksize - sizeof(struct xfs_attr3_rmt_hdr);
55 
56 	return blocksize;
57 }
58 
59 /* Compute number of fsblocks needed to store a remote attr value */
60 unsigned int
xfs_attr3_rmt_blocks(struct xfs_mount * mp,unsigned int attrlen)61 xfs_attr3_rmt_blocks(
62 	struct xfs_mount	*mp,
63 	unsigned int		attrlen)
64 {
65 	/*
66 	 * Each contiguous block has a header, so it is not just a simple
67 	 * attribute length to FSB conversion.
68 	 */
69 	if (xfs_has_crc(mp))
70 		return howmany(attrlen, xfs_attr3_rmt_buf_space(mp));
71 
72 	return XFS_B_TO_FSB(mp, attrlen);
73 }
74 
75 /*
76  * Checking of the remote attribute header is split into two parts. The verifier
77  * does CRC, location and bounds checking, the unpacking function checks the
78  * attribute parameters and owner.
79  */
80 static xfs_failaddr_t
xfs_attr3_rmt_hdr_ok(void * ptr,xfs_ino_t ino,uint32_t offset,uint32_t size,xfs_daddr_t bno)81 xfs_attr3_rmt_hdr_ok(
82 	void			*ptr,
83 	xfs_ino_t		ino,
84 	uint32_t		offset,
85 	uint32_t		size,
86 	xfs_daddr_t		bno)
87 {
88 	struct xfs_attr3_rmt_hdr *rmt = ptr;
89 
90 	if (bno != be64_to_cpu(rmt->rm_blkno))
91 		return __this_address;
92 	if (offset != be32_to_cpu(rmt->rm_offset))
93 		return __this_address;
94 	if (size != be32_to_cpu(rmt->rm_bytes))
95 		return __this_address;
96 	if (ino != be64_to_cpu(rmt->rm_owner))
97 		return __this_address;
98 
99 	/* ok */
100 	return NULL;
101 }
102 
103 static xfs_failaddr_t
xfs_attr3_rmt_verify(struct xfs_mount * mp,struct xfs_buf * bp,void * ptr,xfs_daddr_t bno)104 xfs_attr3_rmt_verify(
105 	struct xfs_mount	*mp,
106 	struct xfs_buf		*bp,
107 	void			*ptr,
108 	xfs_daddr_t		bno)
109 {
110 	struct xfs_attr3_rmt_hdr *rmt = ptr;
111 
112 	if (!xfs_verify_magic(bp, rmt->rm_magic))
113 		return __this_address;
114 	if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
115 		return __this_address;
116 	if (be64_to_cpu(rmt->rm_blkno) != bno)
117 		return __this_address;
118 	if (be32_to_cpu(rmt->rm_bytes) > mp->m_attr_geo->blksize - sizeof(*rmt))
119 		return __this_address;
120 	if (be32_to_cpu(rmt->rm_offset) +
121 				be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
122 		return __this_address;
123 	if (rmt->rm_owner == 0)
124 		return __this_address;
125 
126 	return NULL;
127 }
128 
129 static int
__xfs_attr3_rmt_read_verify(struct xfs_buf * bp,bool check_crc,xfs_failaddr_t * failaddr)130 __xfs_attr3_rmt_read_verify(
131 	struct xfs_buf	*bp,
132 	bool		check_crc,
133 	xfs_failaddr_t	*failaddr)
134 {
135 	struct xfs_mount *mp = bp->b_mount;
136 	char		*ptr;
137 	unsigned int	len;
138 	xfs_daddr_t	bno;
139 	unsigned int	blksize = mp->m_attr_geo->blksize;
140 
141 	/* no verification of non-crc buffers */
142 	if (!xfs_has_crc(mp))
143 		return 0;
144 
145 	ptr = bp->b_addr;
146 	bno = xfs_buf_daddr(bp);
147 	len = BBTOB(bp->b_length);
148 	ASSERT(len >= blksize);
149 
150 	while (len > 0) {
151 		if (check_crc &&
152 		    !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
153 			*failaddr = __this_address;
154 			return -EFSBADCRC;
155 		}
156 		*failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, bno);
157 		if (*failaddr)
158 			return -EFSCORRUPTED;
159 		len -= blksize;
160 		ptr += blksize;
161 		bno += BTOBB(blksize);
162 	}
163 
164 	if (len != 0) {
165 		*failaddr = __this_address;
166 		return -EFSCORRUPTED;
167 	}
168 
169 	return 0;
170 }
171 
172 static void
xfs_attr3_rmt_read_verify(struct xfs_buf * bp)173 xfs_attr3_rmt_read_verify(
174 	struct xfs_buf	*bp)
175 {
176 	xfs_failaddr_t	fa;
177 	int		error;
178 
179 	error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
180 	if (error)
181 		xfs_verifier_error(bp, error, fa);
182 }
183 
184 static xfs_failaddr_t
xfs_attr3_rmt_verify_struct(struct xfs_buf * bp)185 xfs_attr3_rmt_verify_struct(
186 	struct xfs_buf	*bp)
187 {
188 	xfs_failaddr_t	fa;
189 	int		error;
190 
191 	error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
192 	return error ? fa : NULL;
193 }
194 
195 static void
xfs_attr3_rmt_write_verify(struct xfs_buf * bp)196 xfs_attr3_rmt_write_verify(
197 	struct xfs_buf	*bp)
198 {
199 	struct xfs_mount *mp = bp->b_mount;
200 	xfs_failaddr_t	fa;
201 	unsigned int	blksize = mp->m_attr_geo->blksize;
202 	char		*ptr;
203 	int		len;
204 	xfs_daddr_t	bno;
205 
206 	/* no verification of non-crc buffers */
207 	if (!xfs_has_crc(mp))
208 		return;
209 
210 	ptr = bp->b_addr;
211 	bno = xfs_buf_daddr(bp);
212 	len = BBTOB(bp->b_length);
213 	ASSERT(len >= blksize);
214 
215 	while (len > 0) {
216 		struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
217 
218 		fa = xfs_attr3_rmt_verify(mp, bp, ptr, bno);
219 		if (fa) {
220 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
221 			return;
222 		}
223 
224 		/*
225 		 * Ensure we aren't writing bogus LSNs to disk. See
226 		 * xfs_attr3_rmt_hdr_set() for the explanation.
227 		 */
228 		if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
229 			xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
230 			return;
231 		}
232 		xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
233 
234 		len -= blksize;
235 		ptr += blksize;
236 		bno += BTOBB(blksize);
237 	}
238 
239 	if (len != 0)
240 		xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
241 }
242 
243 const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
244 	.name = "xfs_attr3_rmt",
245 	.magic = { 0, cpu_to_be32(XFS_ATTR3_RMT_MAGIC) },
246 	.verify_read = xfs_attr3_rmt_read_verify,
247 	.verify_write = xfs_attr3_rmt_write_verify,
248 	.verify_struct = xfs_attr3_rmt_verify_struct,
249 };
250 
251 STATIC int
xfs_attr3_rmt_hdr_set(struct xfs_mount * mp,void * ptr,xfs_ino_t ino,uint32_t offset,uint32_t size,xfs_daddr_t bno)252 xfs_attr3_rmt_hdr_set(
253 	struct xfs_mount	*mp,
254 	void			*ptr,
255 	xfs_ino_t		ino,
256 	uint32_t		offset,
257 	uint32_t		size,
258 	xfs_daddr_t		bno)
259 {
260 	struct xfs_attr3_rmt_hdr *rmt = ptr;
261 
262 	if (!xfs_has_crc(mp))
263 		return 0;
264 
265 	rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
266 	rmt->rm_offset = cpu_to_be32(offset);
267 	rmt->rm_bytes = cpu_to_be32(size);
268 	uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid);
269 	rmt->rm_owner = cpu_to_be64(ino);
270 	rmt->rm_blkno = cpu_to_be64(bno);
271 
272 	/*
273 	 * Remote attribute blocks are written synchronously, so we don't
274 	 * have an LSN that we can stamp in them that makes any sense to log
275 	 * recovery. To ensure that log recovery handles overwrites of these
276 	 * blocks sanely (i.e. once they've been freed and reallocated as some
277 	 * other type of metadata) we need to ensure that the LSN has a value
278 	 * that tells log recovery to ignore the LSN and overwrite the buffer
279 	 * with whatever is in it's log. To do this, we use the magic
280 	 * NULLCOMMITLSN to indicate that the LSN is invalid.
281 	 */
282 	rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
283 
284 	return sizeof(struct xfs_attr3_rmt_hdr);
285 }
286 
287 /*
288  * Helper functions to copy attribute data in and out of the one disk extents
289  */
290 STATIC int
xfs_attr_rmtval_copyout(struct xfs_mount * mp,struct xfs_buf * bp,struct xfs_inode * dp,xfs_ino_t owner,unsigned int * offset,unsigned int * valuelen,uint8_t ** dst)291 xfs_attr_rmtval_copyout(
292 	struct xfs_mount	*mp,
293 	struct xfs_buf		*bp,
294 	struct xfs_inode	*dp,
295 	xfs_ino_t		owner,
296 	unsigned int		*offset,
297 	unsigned int		*valuelen,
298 	uint8_t			**dst)
299 {
300 	char			*src = bp->b_addr;
301 	xfs_daddr_t		bno = xfs_buf_daddr(bp);
302 	unsigned int		len = BBTOB(bp->b_length);
303 	unsigned int		blksize = mp->m_attr_geo->blksize;
304 
305 	ASSERT(len >= blksize);
306 
307 	while (len > 0 && *valuelen > 0) {
308 		unsigned int hdr_size = 0;
309 		unsigned int byte_cnt = xfs_attr3_rmt_buf_space(mp);
310 
311 		byte_cnt = min(*valuelen, byte_cnt);
312 
313 		if (xfs_has_crc(mp)) {
314 			if (xfs_attr3_rmt_hdr_ok(src, owner, *offset,
315 						  byte_cnt, bno)) {
316 				xfs_alert(mp,
317 "remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
318 					bno, *offset, byte_cnt, owner);
319 				xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK);
320 				return -EFSCORRUPTED;
321 			}
322 			hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
323 		}
324 
325 		memcpy(*dst, src + hdr_size, byte_cnt);
326 
327 		/* roll buffer forwards */
328 		len -= blksize;
329 		src += blksize;
330 		bno += BTOBB(blksize);
331 
332 		/* roll attribute data forwards */
333 		*valuelen -= byte_cnt;
334 		*dst += byte_cnt;
335 		*offset += byte_cnt;
336 	}
337 	return 0;
338 }
339 
340 STATIC void
xfs_attr_rmtval_copyin(struct xfs_mount * mp,struct xfs_buf * bp,xfs_ino_t ino,unsigned int * offset,unsigned int * valuelen,uint8_t ** src)341 xfs_attr_rmtval_copyin(
342 	struct xfs_mount *mp,
343 	struct xfs_buf	*bp,
344 	xfs_ino_t	ino,
345 	unsigned int	*offset,
346 	unsigned int	*valuelen,
347 	uint8_t		**src)
348 {
349 	char		*dst = bp->b_addr;
350 	xfs_daddr_t	bno = xfs_buf_daddr(bp);
351 	unsigned int	len = BBTOB(bp->b_length);
352 	unsigned int	blksize = mp->m_attr_geo->blksize;
353 
354 	ASSERT(len >= blksize);
355 
356 	while (len > 0 && *valuelen > 0) {
357 		unsigned int hdr_size;
358 		unsigned int byte_cnt = xfs_attr3_rmt_buf_space(mp);
359 
360 		byte_cnt = min(*valuelen, byte_cnt);
361 		hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
362 						 byte_cnt, bno);
363 
364 		memcpy(dst + hdr_size, *src, byte_cnt);
365 
366 		/*
367 		 * If this is the last block, zero the remainder of it.
368 		 * Check that we are actually the last block, too.
369 		 */
370 		if (byte_cnt + hdr_size < blksize) {
371 			ASSERT(*valuelen - byte_cnt == 0);
372 			ASSERT(len == blksize);
373 			memset(dst + hdr_size + byte_cnt, 0,
374 					blksize - hdr_size - byte_cnt);
375 		}
376 
377 		/* roll buffer forwards */
378 		len -= blksize;
379 		dst += blksize;
380 		bno += BTOBB(blksize);
381 
382 		/* roll attribute data forwards */
383 		*valuelen -= byte_cnt;
384 		*src += byte_cnt;
385 		*offset += byte_cnt;
386 	}
387 }
388 
389 /*
390  * Read the value associated with an attribute from the out-of-line buffer
391  * that we stored it in.
392  *
393  * Returns 0 on successful retrieval, otherwise an error.
394  */
395 int
xfs_attr_rmtval_get(struct xfs_da_args * args)396 xfs_attr_rmtval_get(
397 	struct xfs_da_args	*args)
398 {
399 	struct xfs_bmbt_irec	map[ATTR_RMTVALUE_MAPSIZE];
400 	struct xfs_mount	*mp = args->dp->i_mount;
401 	struct xfs_buf		*bp;
402 	xfs_dablk_t		lblkno = args->rmtblkno;
403 	uint8_t			*dst = args->value;
404 	unsigned int		valuelen;
405 	int			nmap;
406 	int			error;
407 	unsigned int		blkcnt = args->rmtblkcnt;
408 	int			i;
409 	unsigned int		offset = 0;
410 
411 	trace_xfs_attr_rmtval_get(args);
412 
413 	ASSERT(args->valuelen != 0);
414 	ASSERT(args->rmtvaluelen == args->valuelen);
415 
416 	valuelen = args->rmtvaluelen;
417 	while (valuelen > 0) {
418 		nmap = ATTR_RMTVALUE_MAPSIZE;
419 		error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
420 				       blkcnt, map, &nmap,
421 				       XFS_BMAPI_ATTRFORK);
422 		if (error)
423 			return error;
424 		ASSERT(nmap >= 1);
425 
426 		for (i = 0; (i < nmap) && (valuelen > 0); i++) {
427 			xfs_daddr_t	dblkno;
428 			int		dblkcnt;
429 
430 			ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
431 			       (map[i].br_startblock != HOLESTARTBLOCK));
432 			dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
433 			dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
434 			error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
435 					0, &bp, &xfs_attr3_rmt_buf_ops);
436 			if (xfs_metadata_is_sick(error))
437 				xfs_dirattr_mark_sick(args->dp, XFS_ATTR_FORK);
438 			if (error)
439 				return error;
440 
441 			error = xfs_attr_rmtval_copyout(mp, bp, args->dp,
442 					args->owner, &offset, &valuelen, &dst);
443 			xfs_buf_relse(bp);
444 			if (error)
445 				return error;
446 
447 			/* roll attribute extent map forwards */
448 			lblkno += map[i].br_blockcount;
449 			blkcnt -= map[i].br_blockcount;
450 		}
451 	}
452 	ASSERT(valuelen == 0);
453 	return 0;
454 }
455 
456 /*
457  * Find a "hole" in the attribute address space large enough for us to drop the
458  * new attributes value into
459  */
460 int
xfs_attr_rmt_find_hole(struct xfs_da_args * args)461 xfs_attr_rmt_find_hole(
462 	struct xfs_da_args	*args)
463 {
464 	struct xfs_inode	*dp = args->dp;
465 	struct xfs_mount	*mp = dp->i_mount;
466 	int			error;
467 	unsigned int		blkcnt;
468 	xfs_fileoff_t		lfileoff = 0;
469 
470 	/*
471 	 * Because CRC enable attributes have headers, we can't just do a
472 	 * straight byte to FSB conversion and have to take the header space
473 	 * into account.
474 	 */
475 	blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
476 	error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
477 						   XFS_ATTR_FORK);
478 	if (error)
479 		return error;
480 
481 	args->rmtblkno = (xfs_dablk_t)lfileoff;
482 	args->rmtblkcnt = blkcnt;
483 
484 	return 0;
485 }
486 
487 int
xfs_attr_rmtval_set_value(struct xfs_da_args * args)488 xfs_attr_rmtval_set_value(
489 	struct xfs_da_args	*args)
490 {
491 	struct xfs_inode	*dp = args->dp;
492 	struct xfs_mount	*mp = dp->i_mount;
493 	struct xfs_bmbt_irec	map;
494 	xfs_dablk_t		lblkno;
495 	uint8_t			*src = args->value;
496 	unsigned int		blkcnt;
497 	unsigned int		valuelen;
498 	int			nmap;
499 	int			error;
500 	unsigned int		offset = 0;
501 
502 	/*
503 	 * Roll through the "value", copying the attribute value to the
504 	 * already-allocated blocks.  Blocks are written synchronously
505 	 * so that we can know they are all on disk before we turn off
506 	 * the INCOMPLETE flag.
507 	 */
508 	lblkno = args->rmtblkno;
509 	blkcnt = args->rmtblkcnt;
510 	valuelen = args->rmtvaluelen;
511 	while (valuelen > 0) {
512 		struct xfs_buf	*bp;
513 		xfs_daddr_t	dblkno;
514 		int		dblkcnt;
515 
516 		ASSERT(blkcnt > 0);
517 
518 		nmap = 1;
519 		error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
520 				       blkcnt, &map, &nmap,
521 				       XFS_BMAPI_ATTRFORK);
522 		if (error)
523 			return error;
524 		ASSERT(nmap == 1);
525 		ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
526 		       (map.br_startblock != HOLESTARTBLOCK));
527 
528 		dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
529 		dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
530 
531 		error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
532 		if (error)
533 			return error;
534 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
535 
536 		xfs_attr_rmtval_copyin(mp, bp, args->owner, &offset, &valuelen,
537 				&src);
538 
539 		error = xfs_bwrite(bp);	/* GROT: NOTE: synchronous write */
540 		xfs_buf_relse(bp);
541 		if (error)
542 			return error;
543 
544 
545 		/* roll attribute extent map forwards */
546 		lblkno += map.br_blockcount;
547 		blkcnt -= map.br_blockcount;
548 	}
549 	ASSERT(valuelen == 0);
550 	return 0;
551 }
552 
553 /* Mark stale any incore buffers for the remote value. */
554 int
xfs_attr_rmtval_stale(struct xfs_inode * ip,struct xfs_bmbt_irec * map,xfs_buf_flags_t incore_flags)555 xfs_attr_rmtval_stale(
556 	struct xfs_inode	*ip,
557 	struct xfs_bmbt_irec	*map,
558 	xfs_buf_flags_t		incore_flags)
559 {
560 	struct xfs_mount	*mp = ip->i_mount;
561 	struct xfs_buf		*bp;
562 	int			error;
563 
564 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
565 
566 	if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
567 	    XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK)) {
568 		xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
569 		return -EFSCORRUPTED;
570 	}
571 
572 	error = xfs_buf_incore(mp->m_ddev_targp,
573 			XFS_FSB_TO_DADDR(mp, map->br_startblock),
574 			XFS_FSB_TO_BB(mp, map->br_blockcount),
575 			incore_flags, &bp);
576 	if (error) {
577 		if (error == -ENOENT)
578 			return 0;
579 		return error;
580 	}
581 
582 	xfs_buf_stale(bp);
583 	xfs_buf_relse(bp);
584 	return 0;
585 }
586 
587 /*
588  * Find a hole for the attr and store it in the delayed attr context.  This
589  * initializes the context to roll through allocating an attr extent for a
590  * delayed attr operation
591  */
592 int
xfs_attr_rmtval_find_space(struct xfs_attr_intent * attr)593 xfs_attr_rmtval_find_space(
594 	struct xfs_attr_intent		*attr)
595 {
596 	struct xfs_da_args		*args = attr->xattri_da_args;
597 	struct xfs_bmbt_irec		*map = &attr->xattri_map;
598 	int				error;
599 
600 	attr->xattri_lblkno = 0;
601 	attr->xattri_blkcnt = 0;
602 	args->rmtblkcnt = 0;
603 	args->rmtblkno = 0;
604 	memset(map, 0, sizeof(struct xfs_bmbt_irec));
605 
606 	error = xfs_attr_rmt_find_hole(args);
607 	if (error)
608 		return error;
609 
610 	attr->xattri_blkcnt = args->rmtblkcnt;
611 	attr->xattri_lblkno = args->rmtblkno;
612 
613 	return 0;
614 }
615 
616 /*
617  * Write one block of the value associated with an attribute into the
618  * out-of-line buffer that we have defined for it. This is similar to a subset
619  * of xfs_attr_rmtval_set, but records the current block to the delayed attr
620  * context, and leaves transaction handling to the caller.
621  */
622 int
xfs_attr_rmtval_set_blk(struct xfs_attr_intent * attr)623 xfs_attr_rmtval_set_blk(
624 	struct xfs_attr_intent		*attr)
625 {
626 	struct xfs_da_args		*args = attr->xattri_da_args;
627 	struct xfs_inode		*dp = args->dp;
628 	struct xfs_bmbt_irec		*map = &attr->xattri_map;
629 	int nmap;
630 	int error;
631 
632 	nmap = 1;
633 	error = xfs_bmapi_write(args->trans, dp,
634 			(xfs_fileoff_t)attr->xattri_lblkno,
635 			attr->xattri_blkcnt, XFS_BMAPI_ATTRFORK, args->total,
636 			map, &nmap);
637 	if (error)
638 		return error;
639 
640 	ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
641 	       (map->br_startblock != HOLESTARTBLOCK));
642 
643 	/* roll attribute extent map forwards */
644 	attr->xattri_lblkno += map->br_blockcount;
645 	attr->xattri_blkcnt -= map->br_blockcount;
646 
647 	return 0;
648 }
649 
650 /*
651  * Remove the value associated with an attribute by deleting the
652  * out-of-line buffer that it is stored on.
653  */
654 int
xfs_attr_rmtval_invalidate(struct xfs_da_args * args)655 xfs_attr_rmtval_invalidate(
656 	struct xfs_da_args	*args)
657 {
658 	xfs_dablk_t		lblkno;
659 	unsigned int		blkcnt;
660 	int			error;
661 
662 	/*
663 	 * Roll through the "value", invalidating the attribute value's blocks.
664 	 */
665 	lblkno = args->rmtblkno;
666 	blkcnt = args->rmtblkcnt;
667 	while (blkcnt > 0) {
668 		struct xfs_bmbt_irec	map;
669 		int			nmap;
670 
671 		/*
672 		 * Try to remember where we decided to put the value.
673 		 */
674 		nmap = 1;
675 		error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
676 				       blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
677 		if (error)
678 			return error;
679 		if (XFS_IS_CORRUPT(args->dp->i_mount, nmap != 1)) {
680 			xfs_bmap_mark_sick(args->dp, XFS_ATTR_FORK);
681 			return -EFSCORRUPTED;
682 		}
683 		error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
684 		if (error)
685 			return error;
686 
687 		lblkno += map.br_blockcount;
688 		blkcnt -= map.br_blockcount;
689 	}
690 	return 0;
691 }
692 
693 /*
694  * Remove the value associated with an attribute by deleting the out-of-line
695  * buffer that it is stored on. Returns -EAGAIN for the caller to refresh the
696  * transaction and re-call the function.  Callers should keep calling this
697  * routine until it returns something other than -EAGAIN.
698  */
699 int
xfs_attr_rmtval_remove(struct xfs_attr_intent * attr)700 xfs_attr_rmtval_remove(
701 	struct xfs_attr_intent		*attr)
702 {
703 	struct xfs_da_args		*args = attr->xattri_da_args;
704 	int				error, done;
705 
706 	/*
707 	 * Unmap value blocks for this attr.
708 	 */
709 	error = xfs_bunmapi(args->trans, args->dp, args->rmtblkno,
710 			    args->rmtblkcnt, XFS_BMAPI_ATTRFORK, 1, &done);
711 	if (error)
712 		return error;
713 
714 	/*
715 	 * We don't need an explicit state here to pick up where we left off. We
716 	 * can figure it out using the !done return code. The actual value of
717 	 * attr->xattri_dela_state may be some value reminiscent of the calling
718 	 * function, but it's value is irrelevant with in the context of this
719 	 * function. Once we are done here, the next state is set as needed by
720 	 * the parent
721 	 */
722 	if (!done) {
723 		trace_xfs_attr_rmtval_remove_return(attr->xattri_dela_state,
724 						    args->dp);
725 		return -EAGAIN;
726 	}
727 
728 	args->rmtblkno = 0;
729 	args->rmtblkcnt = 0;
730 	return 0;
731 }
732