1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4   * Copyright (c) 2012 Red Hat, Inc.
5   * All Rights Reserved.
6   */
7  #include "xfs.h"
8  #include "xfs_fs.h"
9  #include "xfs_shared.h"
10  #include "xfs_format.h"
11  #include "xfs_log_format.h"
12  #include "xfs_trans_resv.h"
13  #include "xfs_bit.h"
14  #include "xfs_mount.h"
15  #include "xfs_defer.h"
16  #include "xfs_inode.h"
17  #include "xfs_btree.h"
18  #include "xfs_trans.h"
19  #include "xfs_alloc.h"
20  #include "xfs_bmap.h"
21  #include "xfs_bmap_util.h"
22  #include "xfs_bmap_btree.h"
23  #include "xfs_rtalloc.h"
24  #include "xfs_error.h"
25  #include "xfs_quota.h"
26  #include "xfs_trans_space.h"
27  #include "xfs_trace.h"
28  #include "xfs_icache.h"
29  #include "xfs_iomap.h"
30  #include "xfs_reflink.h"
31  #include "xfs_rtbitmap.h"
32  
33  /* Kernel only BMAP related definitions and functions */
34  
35  /*
36   * Convert the given file system block to a disk block.  We have to treat it
37   * differently based on whether the file is a real time file or not, because the
38   * bmap code does.
39   */
40  xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode * ip,xfs_fsblock_t fsb)41  xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
42  {
43  	if (XFS_IS_REALTIME_INODE(ip))
44  		return XFS_FSB_TO_BB(ip->i_mount, fsb);
45  	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
46  }
47  
48  /*
49   * Routine to zero an extent on disk allocated to the specific inode.
50   *
51   * The VFS functions take a linearised filesystem block offset, so we have to
52   * convert the sparse xfs fsb to the right format first.
53   * VFS types are real funky, too.
54   */
55  int
xfs_zero_extent(struct xfs_inode * ip,xfs_fsblock_t start_fsb,xfs_off_t count_fsb)56  xfs_zero_extent(
57  	struct xfs_inode	*ip,
58  	xfs_fsblock_t		start_fsb,
59  	xfs_off_t		count_fsb)
60  {
61  	struct xfs_mount	*mp = ip->i_mount;
62  	struct xfs_buftarg	*target = xfs_inode_buftarg(ip);
63  	xfs_daddr_t		sector = xfs_fsb_to_db(ip, start_fsb);
64  	sector_t		block = XFS_BB_TO_FSBT(mp, sector);
65  
66  	return blkdev_issue_zeroout(target->bt_bdev,
67  		block << (mp->m_super->s_blocksize_bits - 9),
68  		count_fsb << (mp->m_super->s_blocksize_bits - 9),
69  		GFP_KERNEL, 0);
70  }
71  
72  /*
73   * Extent tree block counting routines.
74   */
75  
76  /*
77   * Count leaf blocks given a range of extent records.  Delayed allocation
78   * extents are not counted towards the totals.
79   */
80  xfs_extnum_t
xfs_bmap_count_leaves(struct xfs_ifork * ifp,xfs_filblks_t * count)81  xfs_bmap_count_leaves(
82  	struct xfs_ifork	*ifp,
83  	xfs_filblks_t		*count)
84  {
85  	struct xfs_iext_cursor	icur;
86  	struct xfs_bmbt_irec	got;
87  	xfs_extnum_t		numrecs = 0;
88  
89  	for_each_xfs_iext(ifp, &icur, &got) {
90  		if (!isnullstartblock(got.br_startblock)) {
91  			*count += got.br_blockcount;
92  			numrecs++;
93  		}
94  	}
95  
96  	return numrecs;
97  }
98  
99  /*
100   * Count fsblocks of the given fork.  Delayed allocation extents are
101   * not counted towards the totals.
102   */
103  int
xfs_bmap_count_blocks(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_extnum_t * nextents,xfs_filblks_t * count)104  xfs_bmap_count_blocks(
105  	struct xfs_trans	*tp,
106  	struct xfs_inode	*ip,
107  	int			whichfork,
108  	xfs_extnum_t		*nextents,
109  	xfs_filblks_t		*count)
110  {
111  	struct xfs_mount	*mp = ip->i_mount;
112  	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
113  	struct xfs_btree_cur	*cur;
114  	xfs_extlen_t		btblocks = 0;
115  	int			error;
116  
117  	*nextents = 0;
118  	*count = 0;
119  
120  	if (!ifp)
121  		return 0;
122  
123  	switch (ifp->if_format) {
124  	case XFS_DINODE_FMT_BTREE:
125  		error = xfs_iread_extents(tp, ip, whichfork);
126  		if (error)
127  			return error;
128  
129  		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
130  		error = xfs_btree_count_blocks(cur, &btblocks);
131  		xfs_btree_del_cursor(cur, error);
132  		if (error)
133  			return error;
134  
135  		/*
136  		 * xfs_btree_count_blocks includes the root block contained in
137  		 * the inode fork in @btblocks, so subtract one because we're
138  		 * only interested in allocated disk blocks.
139  		 */
140  		*count += btblocks - 1;
141  
142  		fallthrough;
143  	case XFS_DINODE_FMT_EXTENTS:
144  		*nextents = xfs_bmap_count_leaves(ifp, count);
145  		break;
146  	}
147  
148  	return 0;
149  }
150  
151  static int
xfs_getbmap_report_one(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,struct xfs_bmbt_irec * got)152  xfs_getbmap_report_one(
153  	struct xfs_inode	*ip,
154  	struct getbmapx		*bmv,
155  	struct kgetbmap		*out,
156  	int64_t			bmv_end,
157  	struct xfs_bmbt_irec	*got)
158  {
159  	struct kgetbmap		*p = out + bmv->bmv_entries;
160  	bool			shared = false;
161  	int			error;
162  
163  	error = xfs_reflink_trim_around_shared(ip, got, &shared);
164  	if (error)
165  		return error;
166  
167  	if (isnullstartblock(got->br_startblock) ||
168  	    got->br_startblock == DELAYSTARTBLOCK) {
169  		/*
170  		 * Take the flush completion as being a point-in-time snapshot
171  		 * where there are no delalloc extents, and if any new ones
172  		 * have been created racily, just skip them as being 'after'
173  		 * the flush and so don't get reported.
174  		 */
175  		if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
176  			return 0;
177  
178  		p->bmv_oflags |= BMV_OF_DELALLOC;
179  		p->bmv_block = -2;
180  	} else {
181  		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
182  	}
183  
184  	if (got->br_state == XFS_EXT_UNWRITTEN &&
185  	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
186  		p->bmv_oflags |= BMV_OF_PREALLOC;
187  
188  	if (shared)
189  		p->bmv_oflags |= BMV_OF_SHARED;
190  
191  	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
192  	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
193  
194  	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
195  	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
196  	bmv->bmv_entries++;
197  	return 0;
198  }
199  
200  static void
xfs_getbmap_report_hole(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,xfs_fileoff_t bno,xfs_fileoff_t end)201  xfs_getbmap_report_hole(
202  	struct xfs_inode	*ip,
203  	struct getbmapx		*bmv,
204  	struct kgetbmap		*out,
205  	int64_t			bmv_end,
206  	xfs_fileoff_t		bno,
207  	xfs_fileoff_t		end)
208  {
209  	struct kgetbmap		*p = out + bmv->bmv_entries;
210  
211  	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
212  		return;
213  
214  	p->bmv_block = -1;
215  	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
216  	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
217  
218  	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
219  	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
220  	bmv->bmv_entries++;
221  }
222  
223  static inline bool
xfs_getbmap_full(struct getbmapx * bmv)224  xfs_getbmap_full(
225  	struct getbmapx		*bmv)
226  {
227  	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
228  }
229  
230  static bool
xfs_getbmap_next_rec(struct xfs_bmbt_irec * rec,xfs_fileoff_t total_end)231  xfs_getbmap_next_rec(
232  	struct xfs_bmbt_irec	*rec,
233  	xfs_fileoff_t		total_end)
234  {
235  	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
236  
237  	if (end == total_end)
238  		return false;
239  
240  	rec->br_startoff += rec->br_blockcount;
241  	if (!isnullstartblock(rec->br_startblock) &&
242  	    rec->br_startblock != DELAYSTARTBLOCK)
243  		rec->br_startblock += rec->br_blockcount;
244  	rec->br_blockcount = total_end - end;
245  	return true;
246  }
247  
248  /*
249   * Get inode's extents as described in bmv, and format for output.
250   * Calls formatter to fill the user's buffer until all extents
251   * are mapped, until the passed-in bmv->bmv_count slots have
252   * been filled, or until the formatter short-circuits the loop,
253   * if it is tracking filled-in extents on its own.
254   */
255  int						/* error code */
xfs_getbmap(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out)256  xfs_getbmap(
257  	struct xfs_inode	*ip,
258  	struct getbmapx		*bmv,		/* user bmap structure */
259  	struct kgetbmap		*out)
260  {
261  	struct xfs_mount	*mp = ip->i_mount;
262  	int			iflags = bmv->bmv_iflags;
263  	int			whichfork, lock, error = 0;
264  	int64_t			bmv_end, max_len;
265  	xfs_fileoff_t		bno, first_bno;
266  	struct xfs_ifork	*ifp;
267  	struct xfs_bmbt_irec	got, rec;
268  	xfs_filblks_t		len;
269  	struct xfs_iext_cursor	icur;
270  
271  	if (bmv->bmv_iflags & ~BMV_IF_VALID)
272  		return -EINVAL;
273  #ifndef DEBUG
274  	/* Only allow CoW fork queries if we're debugging. */
275  	if (iflags & BMV_IF_COWFORK)
276  		return -EINVAL;
277  #endif
278  	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
279  		return -EINVAL;
280  
281  	if (bmv->bmv_length < -1)
282  		return -EINVAL;
283  	bmv->bmv_entries = 0;
284  	if (bmv->bmv_length == 0)
285  		return 0;
286  
287  	if (iflags & BMV_IF_ATTRFORK)
288  		whichfork = XFS_ATTR_FORK;
289  	else if (iflags & BMV_IF_COWFORK)
290  		whichfork = XFS_COW_FORK;
291  	else
292  		whichfork = XFS_DATA_FORK;
293  
294  	xfs_ilock(ip, XFS_IOLOCK_SHARED);
295  	switch (whichfork) {
296  	case XFS_ATTR_FORK:
297  		lock = xfs_ilock_attr_map_shared(ip);
298  		if (!xfs_inode_has_attr_fork(ip))
299  			goto out_unlock_ilock;
300  
301  		max_len = 1LL << 32;
302  		break;
303  	case XFS_COW_FORK:
304  		lock = XFS_ILOCK_SHARED;
305  		xfs_ilock(ip, lock);
306  
307  		/* No CoW fork? Just return */
308  		if (!xfs_ifork_ptr(ip, whichfork))
309  			goto out_unlock_ilock;
310  
311  		if (xfs_get_cowextsz_hint(ip))
312  			max_len = mp->m_super->s_maxbytes;
313  		else
314  			max_len = XFS_ISIZE(ip);
315  		break;
316  	case XFS_DATA_FORK:
317  		if (!(iflags & BMV_IF_DELALLOC) &&
318  		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
319  			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
320  			if (error)
321  				goto out_unlock_iolock;
322  
323  			/*
324  			 * Even after flushing the inode, there can still be
325  			 * delalloc blocks on the inode beyond EOF due to
326  			 * speculative preallocation.  These are not removed
327  			 * until the release function is called or the inode
328  			 * is inactivated.  Hence we cannot assert here that
329  			 * ip->i_delayed_blks == 0.
330  			 */
331  		}
332  
333  		if (xfs_get_extsz_hint(ip) ||
334  		    (ip->i_diflags & XFS_DIFLAG_PREALLOC))
335  			max_len = mp->m_super->s_maxbytes;
336  		else
337  			max_len = XFS_ISIZE(ip);
338  
339  		lock = xfs_ilock_data_map_shared(ip);
340  		break;
341  	}
342  
343  	ifp = xfs_ifork_ptr(ip, whichfork);
344  
345  	switch (ifp->if_format) {
346  	case XFS_DINODE_FMT_EXTENTS:
347  	case XFS_DINODE_FMT_BTREE:
348  		break;
349  	case XFS_DINODE_FMT_LOCAL:
350  		/* Local format inode forks report no extents. */
351  		goto out_unlock_ilock;
352  	default:
353  		error = -EINVAL;
354  		goto out_unlock_ilock;
355  	}
356  
357  	if (bmv->bmv_length == -1) {
358  		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
359  		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
360  	}
361  
362  	bmv_end = bmv->bmv_offset + bmv->bmv_length;
363  
364  	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
365  	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
366  
367  	error = xfs_iread_extents(NULL, ip, whichfork);
368  	if (error)
369  		goto out_unlock_ilock;
370  
371  	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
372  		/*
373  		 * Report a whole-file hole if the delalloc flag is set to
374  		 * stay compatible with the old implementation.
375  		 */
376  		if (iflags & BMV_IF_DELALLOC)
377  			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
378  					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
379  		goto out_unlock_ilock;
380  	}
381  
382  	while (!xfs_getbmap_full(bmv)) {
383  		xfs_trim_extent(&got, first_bno, len);
384  
385  		/*
386  		 * Report an entry for a hole if this extent doesn't directly
387  		 * follow the previous one.
388  		 */
389  		if (got.br_startoff > bno) {
390  			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
391  					got.br_startoff);
392  			if (xfs_getbmap_full(bmv))
393  				break;
394  		}
395  
396  		/*
397  		 * In order to report shared extents accurately, we report each
398  		 * distinct shared / unshared part of a single bmbt record with
399  		 * an individual getbmapx record.
400  		 */
401  		bno = got.br_startoff + got.br_blockcount;
402  		rec = got;
403  		do {
404  			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
405  					&rec);
406  			if (error || xfs_getbmap_full(bmv))
407  				goto out_unlock_ilock;
408  		} while (xfs_getbmap_next_rec(&rec, bno));
409  
410  		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
411  			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
412  
413  			if (bmv->bmv_entries > 0)
414  				out[bmv->bmv_entries - 1].bmv_oflags |=
415  								BMV_OF_LAST;
416  
417  			if (whichfork != XFS_ATTR_FORK && bno < end &&
418  			    !xfs_getbmap_full(bmv)) {
419  				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
420  						bno, end);
421  			}
422  			break;
423  		}
424  
425  		if (bno >= first_bno + len)
426  			break;
427  	}
428  
429  out_unlock_ilock:
430  	xfs_iunlock(ip, lock);
431  out_unlock_iolock:
432  	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
433  	return error;
434  }
435  
436  /*
437   * Dead simple method of punching delalyed allocation blocks from a range in
438   * the inode.  This will always punch out both the start and end blocks, even
439   * if the ranges only partially overlap them, so it is up to the caller to
440   * ensure that partial blocks are not passed in.
441   */
442  void
xfs_bmap_punch_delalloc_range(struct xfs_inode * ip,int whichfork,xfs_off_t start_byte,xfs_off_t end_byte)443  xfs_bmap_punch_delalloc_range(
444  	struct xfs_inode	*ip,
445  	int			whichfork,
446  	xfs_off_t		start_byte,
447  	xfs_off_t		end_byte)
448  {
449  	struct xfs_mount	*mp = ip->i_mount;
450  	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
451  	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
452  	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
453  	struct xfs_bmbt_irec	got, del;
454  	struct xfs_iext_cursor	icur;
455  
456  	ASSERT(!xfs_need_iread_extents(ifp));
457  
458  	xfs_ilock(ip, XFS_ILOCK_EXCL);
459  	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
460  		goto out_unlock;
461  
462  	while (got.br_startoff + got.br_blockcount > start_fsb) {
463  		del = got;
464  		xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
465  
466  		/*
467  		 * A delete can push the cursor forward. Step back to the
468  		 * previous extent on non-delalloc or extents outside the
469  		 * target range.
470  		 */
471  		if (!del.br_blockcount ||
472  		    !isnullstartblock(del.br_startblock)) {
473  			if (!xfs_iext_prev_extent(ifp, &icur, &got))
474  				break;
475  			continue;
476  		}
477  
478  		xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got, &del);
479  		if (!xfs_iext_get_extent(ifp, &icur, &got))
480  			break;
481  	}
482  
483  	if (whichfork == XFS_COW_FORK && !ifp->if_bytes)
484  		xfs_inode_clear_cowblocks_tag(ip);
485  
486  out_unlock:
487  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
488  }
489  
490  /*
491   * Test whether it is appropriate to check an inode for and free post EOF
492   * blocks.
493   */
494  bool
xfs_can_free_eofblocks(struct xfs_inode * ip)495  xfs_can_free_eofblocks(
496  	struct xfs_inode	*ip)
497  {
498  	struct xfs_mount	*mp = ip->i_mount;
499  	bool			found_blocks = false;
500  	xfs_fileoff_t		end_fsb;
501  	xfs_fileoff_t		last_fsb;
502  	struct xfs_bmbt_irec	imap;
503  	struct xfs_iext_cursor	icur;
504  
505  	/*
506  	 * Caller must either hold the exclusive io lock; or be inactivating
507  	 * the inode, which guarantees there are no other users of the inode.
508  	 */
509  	if (!(VFS_I(ip)->i_state & I_FREEING))
510  		xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
511  
512  	/* prealloc/delalloc exists only on regular files */
513  	if (!S_ISREG(VFS_I(ip)->i_mode))
514  		return false;
515  
516  	/*
517  	 * Zero sized files with no cached pages and delalloc blocks will not
518  	 * have speculative prealloc/delalloc blocks to remove.
519  	 */
520  	if (VFS_I(ip)->i_size == 0 &&
521  	    VFS_I(ip)->i_mapping->nrpages == 0 &&
522  	    ip->i_delayed_blks == 0)
523  		return false;
524  
525  	/* If we haven't read in the extent list, then don't do it now. */
526  	if (xfs_need_iread_extents(&ip->i_df))
527  		return false;
528  
529  	/*
530  	 * Do not free real extents in preallocated files unless the file has
531  	 * delalloc blocks and we are forced to remove them.
532  	 */
533  	if ((ip->i_diflags & XFS_DIFLAG_PREALLOC) && !ip->i_delayed_blks)
534  		return false;
535  
536  	/*
537  	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
538  	 * range supported by the page cache, because the truncation will loop
539  	 * forever.
540  	 */
541  	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
542  	if (xfs_inode_has_bigrtalloc(ip))
543  		end_fsb = xfs_rtb_roundup_rtx(mp, end_fsb);
544  	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
545  	if (last_fsb <= end_fsb)
546  		return false;
547  
548  	/*
549  	 * Check if there is an post-EOF extent to free.
550  	 */
551  	xfs_ilock(ip, XFS_ILOCK_SHARED);
552  	if (xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
553  		found_blocks = true;
554  	xfs_iunlock(ip, XFS_ILOCK_SHARED);
555  	return found_blocks;
556  }
557  
558  /*
559   * This is called to free any blocks beyond eof. The caller must hold
560   * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
561   * reference to the inode.
562   */
563  int
xfs_free_eofblocks(struct xfs_inode * ip)564  xfs_free_eofblocks(
565  	struct xfs_inode	*ip)
566  {
567  	struct xfs_trans	*tp;
568  	struct xfs_mount	*mp = ip->i_mount;
569  	int			error;
570  
571  	/* Attach the dquots to the inode up front. */
572  	error = xfs_qm_dqattach(ip);
573  	if (error)
574  		return error;
575  
576  	/* Wait on dio to ensure i_size has settled. */
577  	inode_dio_wait(VFS_I(ip));
578  
579  	/*
580  	 * For preallocated files only free delayed allocations.
581  	 *
582  	 * Note that this means we also leave speculative preallocations in
583  	 * place for preallocated files.
584  	 */
585  	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
586  		if (ip->i_delayed_blks) {
587  			xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK,
588  				round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
589  				LLONG_MAX);
590  		}
591  		xfs_inode_clear_eofblocks_tag(ip);
592  		return 0;
593  	}
594  
595  	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
596  	if (error) {
597  		ASSERT(xfs_is_shutdown(mp));
598  		return error;
599  	}
600  
601  	xfs_ilock(ip, XFS_ILOCK_EXCL);
602  	xfs_trans_ijoin(tp, ip, 0);
603  
604  	/*
605  	 * Do not update the on-disk file size.  If we update the on-disk file
606  	 * size and then the system crashes before the contents of the file are
607  	 * flushed to disk then the files may be full of holes (ie NULL files
608  	 * bug).
609  	 */
610  	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
611  				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
612  	if (error)
613  		goto err_cancel;
614  
615  	error = xfs_trans_commit(tp);
616  	if (error)
617  		goto out_unlock;
618  
619  	xfs_inode_clear_eofblocks_tag(ip);
620  	goto out_unlock;
621  
622  err_cancel:
623  	/*
624  	 * If we get an error at this point we simply don't
625  	 * bother truncating the file.
626  	 */
627  	xfs_trans_cancel(tp);
628  out_unlock:
629  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
630  	return error;
631  }
632  
633  int
xfs_alloc_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)634  xfs_alloc_file_space(
635  	struct xfs_inode	*ip,
636  	xfs_off_t		offset,
637  	xfs_off_t		len)
638  {
639  	xfs_mount_t		*mp = ip->i_mount;
640  	xfs_off_t		count;
641  	xfs_filblks_t		allocatesize_fsb;
642  	xfs_extlen_t		extsz, temp;
643  	xfs_fileoff_t		startoffset_fsb;
644  	xfs_fileoff_t		endoffset_fsb;
645  	int			rt;
646  	xfs_trans_t		*tp;
647  	xfs_bmbt_irec_t		imaps[1], *imapp;
648  	int			error;
649  
650  	if (xfs_is_always_cow_inode(ip))
651  		return 0;
652  
653  	trace_xfs_alloc_file_space(ip);
654  
655  	if (xfs_is_shutdown(mp))
656  		return -EIO;
657  
658  	error = xfs_qm_dqattach(ip);
659  	if (error)
660  		return error;
661  
662  	if (len <= 0)
663  		return -EINVAL;
664  
665  	rt = XFS_IS_REALTIME_INODE(ip);
666  	extsz = xfs_get_extsz_hint(ip);
667  
668  	count = len;
669  	imapp = &imaps[0];
670  	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
671  	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
672  	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
673  
674  	/*
675  	 * Allocate file space until done or until there is an error
676  	 */
677  	while (allocatesize_fsb && !error) {
678  		xfs_fileoff_t	s, e;
679  		unsigned int	dblocks, rblocks, resblks;
680  		int		nimaps = 1;
681  
682  		/*
683  		 * Determine space reservations for data/realtime.
684  		 */
685  		if (unlikely(extsz)) {
686  			s = startoffset_fsb;
687  			do_div(s, extsz);
688  			s *= extsz;
689  			e = startoffset_fsb + allocatesize_fsb;
690  			div_u64_rem(startoffset_fsb, extsz, &temp);
691  			if (temp)
692  				e += temp;
693  			div_u64_rem(e, extsz, &temp);
694  			if (temp)
695  				e += extsz - temp;
696  		} else {
697  			s = 0;
698  			e = allocatesize_fsb;
699  		}
700  
701  		/*
702  		 * The transaction reservation is limited to a 32-bit block
703  		 * count, hence we need to limit the number of blocks we are
704  		 * trying to reserve to avoid an overflow. We can't allocate
705  		 * more than @nimaps extents, and an extent is limited on disk
706  		 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
707  		 * limit.
708  		 */
709  		resblks = min_t(xfs_fileoff_t, (e - s),
710  				(XFS_MAX_BMBT_EXTLEN * nimaps));
711  		if (unlikely(rt)) {
712  			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
713  			rblocks = resblks;
714  		} else {
715  			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
716  			rblocks = 0;
717  		}
718  
719  		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
720  				dblocks, rblocks, false, &tp);
721  		if (error)
722  			break;
723  
724  		error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
725  				XFS_IEXT_ADD_NOSPLIT_CNT);
726  		if (error)
727  			goto error;
728  
729  		/*
730  		 * If the allocator cannot find a single free extent large
731  		 * enough to cover the start block of the requested range,
732  		 * xfs_bmapi_write will return -ENOSR.
733  		 *
734  		 * In that case we simply need to keep looping with the same
735  		 * startoffset_fsb so that one of the following allocations
736  		 * will eventually reach the requested range.
737  		 */
738  		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
739  				allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
740  				&nimaps);
741  		if (error) {
742  			if (error != -ENOSR)
743  				goto error;
744  			error = 0;
745  		} else {
746  			startoffset_fsb += imapp->br_blockcount;
747  			allocatesize_fsb -= imapp->br_blockcount;
748  		}
749  
750  		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
751  		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
752  
753  		error = xfs_trans_commit(tp);
754  		xfs_iunlock(ip, XFS_ILOCK_EXCL);
755  	}
756  
757  	return error;
758  
759  error:
760  	xfs_trans_cancel(tp);
761  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
762  	return error;
763  }
764  
765  static int
xfs_unmap_extent(struct xfs_inode * ip,xfs_fileoff_t startoffset_fsb,xfs_filblks_t len_fsb,int * done)766  xfs_unmap_extent(
767  	struct xfs_inode	*ip,
768  	xfs_fileoff_t		startoffset_fsb,
769  	xfs_filblks_t		len_fsb,
770  	int			*done)
771  {
772  	struct xfs_mount	*mp = ip->i_mount;
773  	struct xfs_trans	*tp;
774  	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
775  	int			error;
776  
777  	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
778  			false, &tp);
779  	if (error)
780  		return error;
781  
782  	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
783  			XFS_IEXT_PUNCH_HOLE_CNT);
784  	if (error)
785  		goto out_trans_cancel;
786  
787  	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
788  	if (error)
789  		goto out_trans_cancel;
790  
791  	error = xfs_trans_commit(tp);
792  out_unlock:
793  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
794  	return error;
795  
796  out_trans_cancel:
797  	xfs_trans_cancel(tp);
798  	goto out_unlock;
799  }
800  
801  /* Caller must first wait for the completion of any pending DIOs if required. */
802  int
xfs_flush_unmap_range(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)803  xfs_flush_unmap_range(
804  	struct xfs_inode	*ip,
805  	xfs_off_t		offset,
806  	xfs_off_t		len)
807  {
808  	struct inode		*inode = VFS_I(ip);
809  	xfs_off_t		rounding, start, end;
810  	int			error;
811  
812  	/*
813  	 * Make sure we extend the flush out to extent alignment
814  	 * boundaries so any extent range overlapping the start/end
815  	 * of the modification we are about to do is clean and idle.
816  	 */
817  	rounding = max_t(xfs_off_t, xfs_inode_alloc_unitsize(ip), PAGE_SIZE);
818  	start = rounddown_64(offset, rounding);
819  	end = roundup_64(offset + len, rounding) - 1;
820  
821  	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
822  	if (error)
823  		return error;
824  	truncate_pagecache_range(inode, start, end);
825  	return 0;
826  }
827  
828  int
xfs_free_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)829  xfs_free_file_space(
830  	struct xfs_inode	*ip,
831  	xfs_off_t		offset,
832  	xfs_off_t		len)
833  {
834  	struct xfs_mount	*mp = ip->i_mount;
835  	xfs_fileoff_t		startoffset_fsb;
836  	xfs_fileoff_t		endoffset_fsb;
837  	int			done = 0, error;
838  
839  	trace_xfs_free_file_space(ip);
840  
841  	error = xfs_qm_dqattach(ip);
842  	if (error)
843  		return error;
844  
845  	if (len <= 0)	/* if nothing being freed */
846  		return 0;
847  
848  	/*
849  	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
850  	 * the cached range over the first operation we are about to run.
851  	 */
852  	error = xfs_flush_unmap_range(ip, offset, len);
853  	if (error)
854  		return error;
855  
856  	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
857  	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
858  
859  	/* We can only free complete realtime extents. */
860  	if (xfs_inode_has_bigrtalloc(ip)) {
861  		startoffset_fsb = xfs_rtb_roundup_rtx(mp, startoffset_fsb);
862  		endoffset_fsb = xfs_rtb_rounddown_rtx(mp, endoffset_fsb);
863  	}
864  
865  	/*
866  	 * Need to zero the stuff we're not freeing, on disk.
867  	 */
868  	if (endoffset_fsb > startoffset_fsb) {
869  		while (!done) {
870  			error = xfs_unmap_extent(ip, startoffset_fsb,
871  					endoffset_fsb - startoffset_fsb, &done);
872  			if (error)
873  				return error;
874  		}
875  	}
876  
877  	/*
878  	 * Now that we've unmap all full blocks we'll have to zero out any
879  	 * partial block at the beginning and/or end.  xfs_zero_range is smart
880  	 * enough to skip any holes, including those we just created, but we
881  	 * must take care not to zero beyond EOF and enlarge i_size.
882  	 */
883  	if (offset >= XFS_ISIZE(ip))
884  		return 0;
885  	if (offset + len > XFS_ISIZE(ip))
886  		len = XFS_ISIZE(ip) - offset;
887  	error = xfs_zero_range(ip, offset, len, NULL);
888  	if (error)
889  		return error;
890  
891  	/*
892  	 * If we zeroed right up to EOF and EOF straddles a page boundary we
893  	 * must make sure that the post-EOF area is also zeroed because the
894  	 * page could be mmap'd and xfs_zero_range doesn't do that for us.
895  	 * Writeback of the eof page will do this, albeit clumsily.
896  	 */
897  	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
898  		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
899  				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
900  	}
901  
902  	return error;
903  }
904  
905  static int
xfs_prepare_shift(struct xfs_inode * ip,loff_t offset)906  xfs_prepare_shift(
907  	struct xfs_inode	*ip,
908  	loff_t			offset)
909  {
910  	unsigned int		rounding;
911  	int			error;
912  
913  	/*
914  	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
915  	 * into the accessible region of the file.
916  	 */
917  	if (xfs_can_free_eofblocks(ip)) {
918  		error = xfs_free_eofblocks(ip);
919  		if (error)
920  			return error;
921  	}
922  
923  	/*
924  	 * Shift operations must stabilize the start block offset boundary along
925  	 * with the full range of the operation. If we don't, a COW writeback
926  	 * completion could race with an insert, front merge with the start
927  	 * extent (after split) during the shift and corrupt the file. Start
928  	 * with the allocation unit just prior to the start to stabilize the
929  	 * boundary.
930  	 */
931  	rounding = xfs_inode_alloc_unitsize(ip);
932  	offset = rounddown_64(offset, rounding);
933  	if (offset)
934  		offset -= rounding;
935  
936  	/*
937  	 * Writeback and invalidate cache for the remainder of the file as we're
938  	 * about to shift down every extent from offset to EOF.
939  	 */
940  	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
941  	if (error)
942  		return error;
943  
944  	/*
945  	 * Clean out anything hanging around in the cow fork now that
946  	 * we've flushed all the dirty data out to disk to avoid having
947  	 * CoW extents at the wrong offsets.
948  	 */
949  	if (xfs_inode_has_cow_data(ip)) {
950  		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
951  				true);
952  		if (error)
953  			return error;
954  	}
955  
956  	return 0;
957  }
958  
959  /*
960   * xfs_collapse_file_space()
961   *	This routine frees disk space and shift extent for the given file.
962   *	The first thing we do is to free data blocks in the specified range
963   *	by calling xfs_free_file_space(). It would also sync dirty data
964   *	and invalidate page cache over the region on which collapse range
965   *	is working. And Shift extent records to the left to cover a hole.
966   * RETURNS:
967   *	0 on success
968   *	errno on error
969   *
970   */
971  int
xfs_collapse_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)972  xfs_collapse_file_space(
973  	struct xfs_inode	*ip,
974  	xfs_off_t		offset,
975  	xfs_off_t		len)
976  {
977  	struct xfs_mount	*mp = ip->i_mount;
978  	struct xfs_trans	*tp;
979  	int			error;
980  	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
981  	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
982  	bool			done = false;
983  
984  	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
985  
986  	trace_xfs_collapse_file_space(ip);
987  
988  	error = xfs_free_file_space(ip, offset, len);
989  	if (error)
990  		return error;
991  
992  	error = xfs_prepare_shift(ip, offset);
993  	if (error)
994  		return error;
995  
996  	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
997  	if (error)
998  		return error;
999  
1000  	xfs_ilock(ip, XFS_ILOCK_EXCL);
1001  	xfs_trans_ijoin(tp, ip, 0);
1002  
1003  	while (!done) {
1004  		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1005  				&done);
1006  		if (error)
1007  			goto out_trans_cancel;
1008  		if (done)
1009  			break;
1010  
1011  		/* finish any deferred frees and roll the transaction */
1012  		error = xfs_defer_finish(&tp);
1013  		if (error)
1014  			goto out_trans_cancel;
1015  	}
1016  
1017  	error = xfs_trans_commit(tp);
1018  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1019  	return error;
1020  
1021  out_trans_cancel:
1022  	xfs_trans_cancel(tp);
1023  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1024  	return error;
1025  }
1026  
1027  /*
1028   * xfs_insert_file_space()
1029   *	This routine create hole space by shifting extents for the given file.
1030   *	The first thing we do is to sync dirty data and invalidate page cache
1031   *	over the region on which insert range is working. And split an extent
1032   *	to two extents at given offset by calling xfs_bmap_split_extent.
1033   *	And shift all extent records which are laying between [offset,
1034   *	last allocated extent] to the right to reserve hole range.
1035   * RETURNS:
1036   *	0 on success
1037   *	errno on error
1038   */
1039  int
xfs_insert_file_space(struct xfs_inode * ip,loff_t offset,loff_t len)1040  xfs_insert_file_space(
1041  	struct xfs_inode	*ip,
1042  	loff_t			offset,
1043  	loff_t			len)
1044  {
1045  	struct xfs_mount	*mp = ip->i_mount;
1046  	struct xfs_trans	*tp;
1047  	int			error;
1048  	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1049  	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1050  	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1051  	bool			done = false;
1052  
1053  	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
1054  
1055  	trace_xfs_insert_file_space(ip);
1056  
1057  	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1058  	if (error)
1059  		return error;
1060  
1061  	error = xfs_prepare_shift(ip, offset);
1062  	if (error)
1063  		return error;
1064  
1065  	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1066  			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1067  	if (error)
1068  		return error;
1069  
1070  	xfs_ilock(ip, XFS_ILOCK_EXCL);
1071  	xfs_trans_ijoin(tp, ip, 0);
1072  
1073  	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
1074  			XFS_IEXT_PUNCH_HOLE_CNT);
1075  	if (error)
1076  		goto out_trans_cancel;
1077  
1078  	/*
1079  	 * The extent shifting code works on extent granularity. So, if stop_fsb
1080  	 * is not the starting block of extent, we need to split the extent at
1081  	 * stop_fsb.
1082  	 */
1083  	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1084  	if (error)
1085  		goto out_trans_cancel;
1086  
1087  	do {
1088  		error = xfs_defer_finish(&tp);
1089  		if (error)
1090  			goto out_trans_cancel;
1091  
1092  		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1093  				&done, stop_fsb);
1094  		if (error)
1095  			goto out_trans_cancel;
1096  	} while (!done);
1097  
1098  	error = xfs_trans_commit(tp);
1099  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1100  	return error;
1101  
1102  out_trans_cancel:
1103  	xfs_trans_cancel(tp);
1104  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1105  	return error;
1106  }
1107  
1108  /*
1109   * We need to check that the format of the data fork in the temporary inode is
1110   * valid for the target inode before doing the swap. This is not a problem with
1111   * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1112   * data fork depending on the space the attribute fork is taking so we can get
1113   * invalid formats on the target inode.
1114   *
1115   * E.g. target has space for 7 extents in extent format, temp inode only has
1116   * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1117   * btree, but when swapped it needs to be in extent format. Hence we can't just
1118   * blindly swap data forks on attr2 filesystems.
1119   *
1120   * Note that we check the swap in both directions so that we don't end up with
1121   * a corrupt temporary inode, either.
1122   *
1123   * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1124   * inode will prevent this situation from occurring, so all we do here is
1125   * reject and log the attempt. basically we are putting the responsibility on
1126   * userspace to get this right.
1127   */
1128  static int
xfs_swap_extents_check_format(struct xfs_inode * ip,struct xfs_inode * tip)1129  xfs_swap_extents_check_format(
1130  	struct xfs_inode	*ip,	/* target inode */
1131  	struct xfs_inode	*tip)	/* tmp inode */
1132  {
1133  	struct xfs_ifork	*ifp = &ip->i_df;
1134  	struct xfs_ifork	*tifp = &tip->i_df;
1135  
1136  	/* User/group/project quota ids must match if quotas are enforced. */
1137  	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1138  	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1139  	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1140  	     ip->i_projid != tip->i_projid))
1141  		return -EINVAL;
1142  
1143  	/* Should never get a local format */
1144  	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1145  	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1146  		return -EINVAL;
1147  
1148  	/*
1149  	 * if the target inode has less extents that then temporary inode then
1150  	 * why did userspace call us?
1151  	 */
1152  	if (ifp->if_nextents < tifp->if_nextents)
1153  		return -EINVAL;
1154  
1155  	/*
1156  	 * If we have to use the (expensive) rmap swap method, we can
1157  	 * handle any number of extents and any format.
1158  	 */
1159  	if (xfs_has_rmapbt(ip->i_mount))
1160  		return 0;
1161  
1162  	/*
1163  	 * if the target inode is in extent form and the temp inode is in btree
1164  	 * form then we will end up with the target inode in the wrong format
1165  	 * as we already know there are less extents in the temp inode.
1166  	 */
1167  	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1168  	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1169  		return -EINVAL;
1170  
1171  	/* Check temp in extent form to max in target */
1172  	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1173  	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1174  		return -EINVAL;
1175  
1176  	/* Check target in extent form to max in temp */
1177  	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1178  	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1179  		return -EINVAL;
1180  
1181  	/*
1182  	 * If we are in a btree format, check that the temp root block will fit
1183  	 * in the target and that it has enough extents to be in btree format
1184  	 * in the target.
1185  	 *
1186  	 * Note that we have to be careful to allow btree->extent conversions
1187  	 * (a common defrag case) which will occur when the temp inode is in
1188  	 * extent format...
1189  	 */
1190  	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1191  		if (xfs_inode_has_attr_fork(ip) &&
1192  		    xfs_bmap_bmdr_space(tifp->if_broot) > xfs_inode_fork_boff(ip))
1193  			return -EINVAL;
1194  		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1195  			return -EINVAL;
1196  	}
1197  
1198  	/* Reciprocal target->temp btree format checks */
1199  	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1200  		if (xfs_inode_has_attr_fork(tip) &&
1201  		    xfs_bmap_bmdr_space(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
1202  			return -EINVAL;
1203  		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1204  			return -EINVAL;
1205  	}
1206  
1207  	return 0;
1208  }
1209  
1210  static int
xfs_swap_extent_flush(struct xfs_inode * ip)1211  xfs_swap_extent_flush(
1212  	struct xfs_inode	*ip)
1213  {
1214  	int	error;
1215  
1216  	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1217  	if (error)
1218  		return error;
1219  	truncate_pagecache_range(VFS_I(ip), 0, -1);
1220  
1221  	/* Verify O_DIRECT for ftmp */
1222  	if (VFS_I(ip)->i_mapping->nrpages)
1223  		return -EINVAL;
1224  	return 0;
1225  }
1226  
1227  /*
1228   * Move extents from one file to another, when rmap is enabled.
1229   */
1230  STATIC int
xfs_swap_extent_rmap(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tip)1231  xfs_swap_extent_rmap(
1232  	struct xfs_trans		**tpp,
1233  	struct xfs_inode		*ip,
1234  	struct xfs_inode		*tip)
1235  {
1236  	struct xfs_trans		*tp = *tpp;
1237  	struct xfs_bmbt_irec		irec;
1238  	struct xfs_bmbt_irec		uirec;
1239  	struct xfs_bmbt_irec		tirec;
1240  	xfs_fileoff_t			offset_fsb;
1241  	xfs_fileoff_t			end_fsb;
1242  	xfs_filblks_t			count_fsb;
1243  	int				error;
1244  	xfs_filblks_t			ilen;
1245  	xfs_filblks_t			rlen;
1246  	int				nimaps;
1247  	uint64_t			tip_flags2;
1248  
1249  	/*
1250  	 * If the source file has shared blocks, we must flag the donor
1251  	 * file as having shared blocks so that we get the shared-block
1252  	 * rmap functions when we go to fix up the rmaps.  The flags
1253  	 * will be switch for reals later.
1254  	 */
1255  	tip_flags2 = tip->i_diflags2;
1256  	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1257  		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1258  
1259  	offset_fsb = 0;
1260  	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1261  	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1262  
1263  	while (count_fsb) {
1264  		/* Read extent from the donor file */
1265  		nimaps = 1;
1266  		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1267  				&nimaps, 0);
1268  		if (error)
1269  			goto out;
1270  		ASSERT(nimaps == 1);
1271  		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1272  
1273  		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1274  		ilen = tirec.br_blockcount;
1275  
1276  		/* Unmap the old blocks in the source file. */
1277  		while (tirec.br_blockcount) {
1278  			ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1279  			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1280  
1281  			/* Read extent from the source file */
1282  			nimaps = 1;
1283  			error = xfs_bmapi_read(ip, tirec.br_startoff,
1284  					tirec.br_blockcount, &irec,
1285  					&nimaps, 0);
1286  			if (error)
1287  				goto out;
1288  			ASSERT(nimaps == 1);
1289  			ASSERT(tirec.br_startoff == irec.br_startoff);
1290  			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1291  
1292  			/* Trim the extent. */
1293  			uirec = tirec;
1294  			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1295  					tirec.br_blockcount,
1296  					irec.br_blockcount);
1297  			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1298  
1299  			if (xfs_bmap_is_real_extent(&uirec)) {
1300  				error = xfs_iext_count_extend(tp, ip,
1301  						XFS_DATA_FORK,
1302  						XFS_IEXT_SWAP_RMAP_CNT);
1303  				if (error)
1304  					goto out;
1305  			}
1306  
1307  			if (xfs_bmap_is_real_extent(&irec)) {
1308  				error = xfs_iext_count_extend(tp, tip,
1309  						XFS_DATA_FORK,
1310  						XFS_IEXT_SWAP_RMAP_CNT);
1311  				if (error)
1312  					goto out;
1313  			}
1314  
1315  			/* Remove the mapping from the donor file. */
1316  			xfs_bmap_unmap_extent(tp, tip, XFS_DATA_FORK, &uirec);
1317  
1318  			/* Remove the mapping from the source file. */
1319  			xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &irec);
1320  
1321  			/* Map the donor file's blocks into the source file. */
1322  			xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &uirec);
1323  
1324  			/* Map the source file's blocks into the donor file. */
1325  			xfs_bmap_map_extent(tp, tip, XFS_DATA_FORK, &irec);
1326  
1327  			error = xfs_defer_finish(tpp);
1328  			tp = *tpp;
1329  			if (error)
1330  				goto out;
1331  
1332  			tirec.br_startoff += rlen;
1333  			if (tirec.br_startblock != HOLESTARTBLOCK &&
1334  			    tirec.br_startblock != DELAYSTARTBLOCK)
1335  				tirec.br_startblock += rlen;
1336  			tirec.br_blockcount -= rlen;
1337  		}
1338  
1339  		/* Roll on... */
1340  		count_fsb -= ilen;
1341  		offset_fsb += ilen;
1342  	}
1343  
1344  	tip->i_diflags2 = tip_flags2;
1345  	return 0;
1346  
1347  out:
1348  	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1349  	tip->i_diflags2 = tip_flags2;
1350  	return error;
1351  }
1352  
1353  /* Swap the extents of two files by swapping data forks. */
1354  STATIC int
xfs_swap_extent_forks(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_inode * tip,int * src_log_flags,int * target_log_flags)1355  xfs_swap_extent_forks(
1356  	struct xfs_trans	*tp,
1357  	struct xfs_inode	*ip,
1358  	struct xfs_inode	*tip,
1359  	int			*src_log_flags,
1360  	int			*target_log_flags)
1361  {
1362  	xfs_filblks_t		aforkblks = 0;
1363  	xfs_filblks_t		taforkblks = 0;
1364  	xfs_extnum_t		junk;
1365  	uint64_t		tmp;
1366  	int			error;
1367  
1368  	/*
1369  	 * Count the number of extended attribute blocks
1370  	 */
1371  	if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
1372  	    ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1373  		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1374  				&aforkblks);
1375  		if (error)
1376  			return error;
1377  	}
1378  	if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
1379  	    tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1380  		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1381  				&taforkblks);
1382  		if (error)
1383  			return error;
1384  	}
1385  
1386  	/*
1387  	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1388  	 * block headers. We can't start changing the bmbt blocks until the
1389  	 * inode owner change is logged so recovery does the right thing in the
1390  	 * event of a crash. Set the owner change log flags now and leave the
1391  	 * bmbt scan as the last step.
1392  	 */
1393  	if (xfs_has_v3inodes(ip->i_mount)) {
1394  		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1395  			(*target_log_flags) |= XFS_ILOG_DOWNER;
1396  		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1397  			(*src_log_flags) |= XFS_ILOG_DOWNER;
1398  	}
1399  
1400  	/*
1401  	 * Swap the data forks of the inodes
1402  	 */
1403  	swap(ip->i_df, tip->i_df);
1404  
1405  	/*
1406  	 * Fix the on-disk inode values
1407  	 */
1408  	tmp = (uint64_t)ip->i_nblocks;
1409  	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1410  	tip->i_nblocks = tmp + taforkblks - aforkblks;
1411  
1412  	/*
1413  	 * The extents in the source inode could still contain speculative
1414  	 * preallocation beyond EOF (e.g. the file is open but not modified
1415  	 * while defrag is in progress). In that case, we need to copy over the
1416  	 * number of delalloc blocks the data fork in the source inode is
1417  	 * tracking beyond EOF so that when the fork is truncated away when the
1418  	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1419  	 * counter on that inode.
1420  	 */
1421  	ASSERT(tip->i_delayed_blks == 0);
1422  	tip->i_delayed_blks = ip->i_delayed_blks;
1423  	ip->i_delayed_blks = 0;
1424  
1425  	switch (ip->i_df.if_format) {
1426  	case XFS_DINODE_FMT_EXTENTS:
1427  		(*src_log_flags) |= XFS_ILOG_DEXT;
1428  		break;
1429  	case XFS_DINODE_FMT_BTREE:
1430  		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1431  		       (*src_log_flags & XFS_ILOG_DOWNER));
1432  		(*src_log_flags) |= XFS_ILOG_DBROOT;
1433  		break;
1434  	}
1435  
1436  	switch (tip->i_df.if_format) {
1437  	case XFS_DINODE_FMT_EXTENTS:
1438  		(*target_log_flags) |= XFS_ILOG_DEXT;
1439  		break;
1440  	case XFS_DINODE_FMT_BTREE:
1441  		(*target_log_flags) |= XFS_ILOG_DBROOT;
1442  		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1443  		       (*target_log_flags & XFS_ILOG_DOWNER));
1444  		break;
1445  	}
1446  
1447  	return 0;
1448  }
1449  
1450  /*
1451   * Fix up the owners of the bmbt blocks to refer to the current inode. The
1452   * change owner scan attempts to order all modified buffers in the current
1453   * transaction. In the event of ordered buffer failure, the offending buffer is
1454   * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1455   * the transaction in this case to replenish the fallback log reservation and
1456   * restart the scan. This process repeats until the scan completes.
1457   */
1458  static int
xfs_swap_change_owner(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tmpip)1459  xfs_swap_change_owner(
1460  	struct xfs_trans	**tpp,
1461  	struct xfs_inode	*ip,
1462  	struct xfs_inode	*tmpip)
1463  {
1464  	int			error;
1465  	struct xfs_trans	*tp = *tpp;
1466  
1467  	do {
1468  		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1469  					      NULL);
1470  		/* success or fatal error */
1471  		if (error != -EAGAIN)
1472  			break;
1473  
1474  		error = xfs_trans_roll(tpp);
1475  		if (error)
1476  			break;
1477  		tp = *tpp;
1478  
1479  		/*
1480  		 * Redirty both inodes so they can relog and keep the log tail
1481  		 * moving forward.
1482  		 */
1483  		xfs_trans_ijoin(tp, ip, 0);
1484  		xfs_trans_ijoin(tp, tmpip, 0);
1485  		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1486  		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1487  	} while (true);
1488  
1489  	return error;
1490  }
1491  
1492  int
xfs_swap_extents(struct xfs_inode * ip,struct xfs_inode * tip,struct xfs_swapext * sxp)1493  xfs_swap_extents(
1494  	struct xfs_inode	*ip,	/* target inode */
1495  	struct xfs_inode	*tip,	/* tmp inode */
1496  	struct xfs_swapext	*sxp)
1497  {
1498  	struct xfs_mount	*mp = ip->i_mount;
1499  	struct xfs_trans	*tp;
1500  	struct xfs_bstat	*sbp = &sxp->sx_stat;
1501  	int			src_log_flags, target_log_flags;
1502  	int			error = 0;
1503  	uint64_t		f;
1504  	int			resblks = 0;
1505  	unsigned int		flags = 0;
1506  	struct timespec64	ctime, mtime;
1507  
1508  	/*
1509  	 * Lock the inodes against other IO, page faults and truncate to
1510  	 * begin with.  Then we can ensure the inodes are flushed and have no
1511  	 * page cache safely. Once we have done this we can take the ilocks and
1512  	 * do the rest of the checks.
1513  	 */
1514  	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1515  	filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1516  				    VFS_I(tip)->i_mapping);
1517  
1518  	/* Verify that both files have the same format */
1519  	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1520  		error = -EINVAL;
1521  		goto out_unlock;
1522  	}
1523  
1524  	/* Verify both files are either real-time or non-realtime */
1525  	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1526  		error = -EINVAL;
1527  		goto out_unlock;
1528  	}
1529  
1530  	error = xfs_qm_dqattach(ip);
1531  	if (error)
1532  		goto out_unlock;
1533  
1534  	error = xfs_qm_dqattach(tip);
1535  	if (error)
1536  		goto out_unlock;
1537  
1538  	error = xfs_swap_extent_flush(ip);
1539  	if (error)
1540  		goto out_unlock;
1541  	error = xfs_swap_extent_flush(tip);
1542  	if (error)
1543  		goto out_unlock;
1544  
1545  	if (xfs_inode_has_cow_data(tip)) {
1546  		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1547  		if (error)
1548  			goto out_unlock;
1549  	}
1550  
1551  	/*
1552  	 * Extent "swapping" with rmap requires a permanent reservation and
1553  	 * a block reservation because it's really just a remap operation
1554  	 * performed with log redo items!
1555  	 */
1556  	if (xfs_has_rmapbt(mp)) {
1557  		int		w = XFS_DATA_FORK;
1558  		uint32_t	ipnext = ip->i_df.if_nextents;
1559  		uint32_t	tipnext	= tip->i_df.if_nextents;
1560  
1561  		/*
1562  		 * Conceptually this shouldn't affect the shape of either bmbt,
1563  		 * but since we atomically move extents one by one, we reserve
1564  		 * enough space to rebuild both trees.
1565  		 */
1566  		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1567  		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1568  
1569  		/*
1570  		 * If either inode straddles a bmapbt block allocation boundary,
1571  		 * the rmapbt algorithm triggers repeated allocs and frees as
1572  		 * extents are remapped. This can exhaust the block reservation
1573  		 * prematurely and cause shutdown. Return freed blocks to the
1574  		 * transaction reservation to counter this behavior.
1575  		 */
1576  		flags |= XFS_TRANS_RES_FDBLKS;
1577  	}
1578  	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1579  				&tp);
1580  	if (error)
1581  		goto out_unlock;
1582  
1583  	/*
1584  	 * Lock and join the inodes to the tansaction so that transaction commit
1585  	 * or cancel will unlock the inodes from this point onwards.
1586  	 */
1587  	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1588  	xfs_trans_ijoin(tp, ip, 0);
1589  	xfs_trans_ijoin(tp, tip, 0);
1590  
1591  
1592  	/* Verify all data are being swapped */
1593  	if (sxp->sx_offset != 0 ||
1594  	    sxp->sx_length != ip->i_disk_size ||
1595  	    sxp->sx_length != tip->i_disk_size) {
1596  		error = -EFAULT;
1597  		goto out_trans_cancel;
1598  	}
1599  
1600  	trace_xfs_swap_extent_before(ip, 0);
1601  	trace_xfs_swap_extent_before(tip, 1);
1602  
1603  	/* check inode formats now that data is flushed */
1604  	error = xfs_swap_extents_check_format(ip, tip);
1605  	if (error) {
1606  		xfs_notice(mp,
1607  		    "%s: inode 0x%llx format is incompatible for exchanging.",
1608  				__func__, ip->i_ino);
1609  		goto out_trans_cancel;
1610  	}
1611  
1612  	/*
1613  	 * Compare the current change & modify times with that
1614  	 * passed in.  If they differ, we abort this swap.
1615  	 * This is the mechanism used to ensure the calling
1616  	 * process that the file was not changed out from
1617  	 * under it.
1618  	 */
1619  	ctime = inode_get_ctime(VFS_I(ip));
1620  	mtime = inode_get_mtime(VFS_I(ip));
1621  	if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
1622  	    (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
1623  	    (sbp->bs_mtime.tv_sec != mtime.tv_sec) ||
1624  	    (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) {
1625  		error = -EBUSY;
1626  		goto out_trans_cancel;
1627  	}
1628  
1629  	/*
1630  	 * Note the trickiness in setting the log flags - we set the owner log
1631  	 * flag on the opposite inode (i.e. the inode we are setting the new
1632  	 * owner to be) because once we swap the forks and log that, log
1633  	 * recovery is going to see the fork as owned by the swapped inode,
1634  	 * not the pre-swapped inodes.
1635  	 */
1636  	src_log_flags = XFS_ILOG_CORE;
1637  	target_log_flags = XFS_ILOG_CORE;
1638  
1639  	if (xfs_has_rmapbt(mp))
1640  		error = xfs_swap_extent_rmap(&tp, ip, tip);
1641  	else
1642  		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1643  				&target_log_flags);
1644  	if (error)
1645  		goto out_trans_cancel;
1646  
1647  	/* Do we have to swap reflink flags? */
1648  	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1649  	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1650  		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1651  		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1652  		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1653  		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1654  		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1655  	}
1656  
1657  	/* Swap the cow forks. */
1658  	if (xfs_has_reflink(mp)) {
1659  		ASSERT(!ip->i_cowfp ||
1660  		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1661  		ASSERT(!tip->i_cowfp ||
1662  		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1663  
1664  		swap(ip->i_cowfp, tip->i_cowfp);
1665  
1666  		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1667  			xfs_inode_set_cowblocks_tag(ip);
1668  		else
1669  			xfs_inode_clear_cowblocks_tag(ip);
1670  		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1671  			xfs_inode_set_cowblocks_tag(tip);
1672  		else
1673  			xfs_inode_clear_cowblocks_tag(tip);
1674  	}
1675  
1676  	xfs_trans_log_inode(tp, ip,  src_log_flags);
1677  	xfs_trans_log_inode(tp, tip, target_log_flags);
1678  
1679  	/*
1680  	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1681  	 * have inode number owner values in the bmbt blocks that still refer to
1682  	 * the old inode. Scan each bmbt to fix up the owner values with the
1683  	 * inode number of the current inode.
1684  	 */
1685  	if (src_log_flags & XFS_ILOG_DOWNER) {
1686  		error = xfs_swap_change_owner(&tp, ip, tip);
1687  		if (error)
1688  			goto out_trans_cancel;
1689  	}
1690  	if (target_log_flags & XFS_ILOG_DOWNER) {
1691  		error = xfs_swap_change_owner(&tp, tip, ip);
1692  		if (error)
1693  			goto out_trans_cancel;
1694  	}
1695  
1696  	/*
1697  	 * If this is a synchronous mount, make sure that the
1698  	 * transaction goes to disk before returning to the user.
1699  	 */
1700  	if (xfs_has_wsync(mp))
1701  		xfs_trans_set_sync(tp);
1702  
1703  	error = xfs_trans_commit(tp);
1704  
1705  	trace_xfs_swap_extent_after(ip, 0);
1706  	trace_xfs_swap_extent_after(tip, 1);
1707  
1708  out_unlock_ilock:
1709  	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1710  	xfs_iunlock(tip, XFS_ILOCK_EXCL);
1711  out_unlock:
1712  	filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1713  				      VFS_I(tip)->i_mapping);
1714  	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1715  	return error;
1716  
1717  out_trans_cancel:
1718  	xfs_trans_cancel(tp);
1719  	goto out_unlock_ilock;
1720  }
1721