1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2010 Red Hat, Inc.
4   * Copyright (C) 2016-2023 Christoph Hellwig.
5   */
6  #include <linux/module.h>
7  #include <linux/compiler.h>
8  #include <linux/fs.h>
9  #include <linux/iomap.h>
10  #include <linux/pagemap.h>
11  #include <linux/uio.h>
12  #include <linux/buffer_head.h>
13  #include <linux/dax.h>
14  #include <linux/writeback.h>
15  #include <linux/list_sort.h>
16  #include <linux/swap.h>
17  #include <linux/bio.h>
18  #include <linux/sched/signal.h>
19  #include <linux/migrate.h>
20  #include "trace.h"
21  
22  #include "../internal.h"
23  
24  #define IOEND_BATCH_SIZE	4096
25  
26  /*
27   * Structure allocated for each folio to track per-block uptodate, dirty state
28   * and I/O completions.
29   */
30  struct iomap_folio_state {
31  	spinlock_t		state_lock;
32  	unsigned int		read_bytes_pending;
33  	atomic_t		write_bytes_pending;
34  
35  	/*
36  	 * Each block has two bits in this bitmap:
37  	 * Bits [0..blocks_per_folio) has the uptodate status.
38  	 * Bits [b_p_f...(2*b_p_f))   has the dirty status.
39  	 */
40  	unsigned long		state[];
41  };
42  
43  static struct bio_set iomap_ioend_bioset;
44  
ifs_is_fully_uptodate(struct folio * folio,struct iomap_folio_state * ifs)45  static inline bool ifs_is_fully_uptodate(struct folio *folio,
46  		struct iomap_folio_state *ifs)
47  {
48  	struct inode *inode = folio->mapping->host;
49  
50  	return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
51  }
52  
ifs_block_is_uptodate(struct iomap_folio_state * ifs,unsigned int block)53  static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
54  		unsigned int block)
55  {
56  	return test_bit(block, ifs->state);
57  }
58  
ifs_set_range_uptodate(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)59  static bool ifs_set_range_uptodate(struct folio *folio,
60  		struct iomap_folio_state *ifs, size_t off, size_t len)
61  {
62  	struct inode *inode = folio->mapping->host;
63  	unsigned int first_blk = off >> inode->i_blkbits;
64  	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
65  	unsigned int nr_blks = last_blk - first_blk + 1;
66  
67  	bitmap_set(ifs->state, first_blk, nr_blks);
68  	return ifs_is_fully_uptodate(folio, ifs);
69  }
70  
iomap_set_range_uptodate(struct folio * folio,size_t off,size_t len)71  static void iomap_set_range_uptodate(struct folio *folio, size_t off,
72  		size_t len)
73  {
74  	struct iomap_folio_state *ifs = folio->private;
75  	unsigned long flags;
76  	bool uptodate = true;
77  
78  	if (ifs) {
79  		spin_lock_irqsave(&ifs->state_lock, flags);
80  		uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
81  		spin_unlock_irqrestore(&ifs->state_lock, flags);
82  	}
83  
84  	if (uptodate)
85  		folio_mark_uptodate(folio);
86  }
87  
ifs_block_is_dirty(struct folio * folio,struct iomap_folio_state * ifs,int block)88  static inline bool ifs_block_is_dirty(struct folio *folio,
89  		struct iomap_folio_state *ifs, int block)
90  {
91  	struct inode *inode = folio->mapping->host;
92  	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
93  
94  	return test_bit(block + blks_per_folio, ifs->state);
95  }
96  
ifs_find_dirty_range(struct folio * folio,struct iomap_folio_state * ifs,u64 * range_start,u64 range_end)97  static unsigned ifs_find_dirty_range(struct folio *folio,
98  		struct iomap_folio_state *ifs, u64 *range_start, u64 range_end)
99  {
100  	struct inode *inode = folio->mapping->host;
101  	unsigned start_blk =
102  		offset_in_folio(folio, *range_start) >> inode->i_blkbits;
103  	unsigned end_blk = min_not_zero(
104  		offset_in_folio(folio, range_end) >> inode->i_blkbits,
105  		i_blocks_per_folio(inode, folio));
106  	unsigned nblks = 1;
107  
108  	while (!ifs_block_is_dirty(folio, ifs, start_blk))
109  		if (++start_blk == end_blk)
110  			return 0;
111  
112  	while (start_blk + nblks < end_blk) {
113  		if (!ifs_block_is_dirty(folio, ifs, start_blk + nblks))
114  			break;
115  		nblks++;
116  	}
117  
118  	*range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
119  	return nblks << inode->i_blkbits;
120  }
121  
iomap_find_dirty_range(struct folio * folio,u64 * range_start,u64 range_end)122  static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start,
123  		u64 range_end)
124  {
125  	struct iomap_folio_state *ifs = folio->private;
126  
127  	if (*range_start >= range_end)
128  		return 0;
129  
130  	if (ifs)
131  		return ifs_find_dirty_range(folio, ifs, range_start, range_end);
132  	return range_end - *range_start;
133  }
134  
ifs_clear_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)135  static void ifs_clear_range_dirty(struct folio *folio,
136  		struct iomap_folio_state *ifs, size_t off, size_t len)
137  {
138  	struct inode *inode = folio->mapping->host;
139  	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
140  	unsigned int first_blk = (off >> inode->i_blkbits);
141  	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
142  	unsigned int nr_blks = last_blk - first_blk + 1;
143  	unsigned long flags;
144  
145  	spin_lock_irqsave(&ifs->state_lock, flags);
146  	bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
147  	spin_unlock_irqrestore(&ifs->state_lock, flags);
148  }
149  
iomap_clear_range_dirty(struct folio * folio,size_t off,size_t len)150  static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
151  {
152  	struct iomap_folio_state *ifs = folio->private;
153  
154  	if (ifs)
155  		ifs_clear_range_dirty(folio, ifs, off, len);
156  }
157  
ifs_set_range_dirty(struct folio * folio,struct iomap_folio_state * ifs,size_t off,size_t len)158  static void ifs_set_range_dirty(struct folio *folio,
159  		struct iomap_folio_state *ifs, size_t off, size_t len)
160  {
161  	struct inode *inode = folio->mapping->host;
162  	unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
163  	unsigned int first_blk = (off >> inode->i_blkbits);
164  	unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
165  	unsigned int nr_blks = last_blk - first_blk + 1;
166  	unsigned long flags;
167  
168  	spin_lock_irqsave(&ifs->state_lock, flags);
169  	bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
170  	spin_unlock_irqrestore(&ifs->state_lock, flags);
171  }
172  
iomap_set_range_dirty(struct folio * folio,size_t off,size_t len)173  static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
174  {
175  	struct iomap_folio_state *ifs = folio->private;
176  
177  	if (ifs)
178  		ifs_set_range_dirty(folio, ifs, off, len);
179  }
180  
ifs_alloc(struct inode * inode,struct folio * folio,unsigned int flags)181  static struct iomap_folio_state *ifs_alloc(struct inode *inode,
182  		struct folio *folio, unsigned int flags)
183  {
184  	struct iomap_folio_state *ifs = folio->private;
185  	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
186  	gfp_t gfp;
187  
188  	if (ifs || nr_blocks <= 1)
189  		return ifs;
190  
191  	if (flags & IOMAP_NOWAIT)
192  		gfp = GFP_NOWAIT;
193  	else
194  		gfp = GFP_NOFS | __GFP_NOFAIL;
195  
196  	/*
197  	 * ifs->state tracks two sets of state flags when the
198  	 * filesystem block size is smaller than the folio size.
199  	 * The first state tracks per-block uptodate and the
200  	 * second tracks per-block dirty state.
201  	 */
202  	ifs = kzalloc(struct_size(ifs, state,
203  		      BITS_TO_LONGS(2 * nr_blocks)), gfp);
204  	if (!ifs)
205  		return ifs;
206  
207  	spin_lock_init(&ifs->state_lock);
208  	if (folio_test_uptodate(folio))
209  		bitmap_set(ifs->state, 0, nr_blocks);
210  	if (folio_test_dirty(folio))
211  		bitmap_set(ifs->state, nr_blocks, nr_blocks);
212  	folio_attach_private(folio, ifs);
213  
214  	return ifs;
215  }
216  
ifs_free(struct folio * folio)217  static void ifs_free(struct folio *folio)
218  {
219  	struct iomap_folio_state *ifs = folio_detach_private(folio);
220  
221  	if (!ifs)
222  		return;
223  	WARN_ON_ONCE(ifs->read_bytes_pending != 0);
224  	WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
225  	WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
226  			folio_test_uptodate(folio));
227  	kfree(ifs);
228  }
229  
230  /*
231   * Calculate the range inside the folio that we actually need to read.
232   */
iomap_adjust_read_range(struct inode * inode,struct folio * folio,loff_t * pos,loff_t length,size_t * offp,size_t * lenp)233  static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
234  		loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
235  {
236  	struct iomap_folio_state *ifs = folio->private;
237  	loff_t orig_pos = *pos;
238  	loff_t isize = i_size_read(inode);
239  	unsigned block_bits = inode->i_blkbits;
240  	unsigned block_size = (1 << block_bits);
241  	size_t poff = offset_in_folio(folio, *pos);
242  	size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
243  	size_t orig_plen = plen;
244  	unsigned first = poff >> block_bits;
245  	unsigned last = (poff + plen - 1) >> block_bits;
246  
247  	/*
248  	 * If the block size is smaller than the page size, we need to check the
249  	 * per-block uptodate status and adjust the offset and length if needed
250  	 * to avoid reading in already uptodate ranges.
251  	 */
252  	if (ifs) {
253  		unsigned int i;
254  
255  		/* move forward for each leading block marked uptodate */
256  		for (i = first; i <= last; i++) {
257  			if (!ifs_block_is_uptodate(ifs, i))
258  				break;
259  			*pos += block_size;
260  			poff += block_size;
261  			plen -= block_size;
262  			first++;
263  		}
264  
265  		/* truncate len if we find any trailing uptodate block(s) */
266  		for ( ; i <= last; i++) {
267  			if (ifs_block_is_uptodate(ifs, i)) {
268  				plen -= (last - i + 1) * block_size;
269  				last = i - 1;
270  				break;
271  			}
272  		}
273  	}
274  
275  	/*
276  	 * If the extent spans the block that contains the i_size, we need to
277  	 * handle both halves separately so that we properly zero data in the
278  	 * page cache for blocks that are entirely outside of i_size.
279  	 */
280  	if (orig_pos <= isize && orig_pos + orig_plen > isize) {
281  		unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
282  
283  		if (first <= end && last > end)
284  			plen -= (last - end) * block_size;
285  	}
286  
287  	*offp = poff;
288  	*lenp = plen;
289  }
290  
iomap_finish_folio_read(struct folio * folio,size_t off,size_t len,int error)291  static void iomap_finish_folio_read(struct folio *folio, size_t off,
292  		size_t len, int error)
293  {
294  	struct iomap_folio_state *ifs = folio->private;
295  	bool uptodate = !error;
296  	bool finished = true;
297  
298  	if (ifs) {
299  		unsigned long flags;
300  
301  		spin_lock_irqsave(&ifs->state_lock, flags);
302  		if (!error)
303  			uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
304  		ifs->read_bytes_pending -= len;
305  		finished = !ifs->read_bytes_pending;
306  		spin_unlock_irqrestore(&ifs->state_lock, flags);
307  	}
308  
309  	if (finished)
310  		folio_end_read(folio, uptodate);
311  }
312  
iomap_read_end_io(struct bio * bio)313  static void iomap_read_end_io(struct bio *bio)
314  {
315  	int error = blk_status_to_errno(bio->bi_status);
316  	struct folio_iter fi;
317  
318  	bio_for_each_folio_all(fi, bio)
319  		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
320  	bio_put(bio);
321  }
322  
323  struct iomap_readpage_ctx {
324  	struct folio		*cur_folio;
325  	bool			cur_folio_in_bio;
326  	struct bio		*bio;
327  	struct readahead_control *rac;
328  };
329  
330  /**
331   * iomap_read_inline_data - copy inline data into the page cache
332   * @iter: iteration structure
333   * @folio: folio to copy to
334   *
335   * Copy the inline data in @iter into @folio and zero out the rest of the folio.
336   * Only a single IOMAP_INLINE extent is allowed at the end of each file.
337   * Returns zero for success to complete the read, or the usual negative errno.
338   */
iomap_read_inline_data(const struct iomap_iter * iter,struct folio * folio)339  static int iomap_read_inline_data(const struct iomap_iter *iter,
340  		struct folio *folio)
341  {
342  	const struct iomap *iomap = iomap_iter_srcmap(iter);
343  	size_t size = i_size_read(iter->inode) - iomap->offset;
344  	size_t offset = offset_in_folio(folio, iomap->offset);
345  
346  	if (folio_test_uptodate(folio))
347  		return 0;
348  
349  	if (WARN_ON_ONCE(size > iomap->length))
350  		return -EIO;
351  	if (offset > 0)
352  		ifs_alloc(iter->inode, folio, iter->flags);
353  
354  	folio_fill_tail(folio, offset, iomap->inline_data, size);
355  	iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
356  	return 0;
357  }
358  
iomap_block_needs_zeroing(const struct iomap_iter * iter,loff_t pos)359  static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
360  		loff_t pos)
361  {
362  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
363  
364  	return srcmap->type != IOMAP_MAPPED ||
365  		(srcmap->flags & IOMAP_F_NEW) ||
366  		pos >= i_size_read(iter->inode);
367  }
368  
iomap_readpage_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx,loff_t offset)369  static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
370  		struct iomap_readpage_ctx *ctx, loff_t offset)
371  {
372  	const struct iomap *iomap = &iter->iomap;
373  	loff_t pos = iter->pos + offset;
374  	loff_t length = iomap_length(iter) - offset;
375  	struct folio *folio = ctx->cur_folio;
376  	struct iomap_folio_state *ifs;
377  	loff_t orig_pos = pos;
378  	size_t poff, plen;
379  	sector_t sector;
380  
381  	if (iomap->type == IOMAP_INLINE)
382  		return iomap_read_inline_data(iter, folio);
383  
384  	/* zero post-eof blocks as the page may be mapped */
385  	ifs = ifs_alloc(iter->inode, folio, iter->flags);
386  	iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
387  	if (plen == 0)
388  		goto done;
389  
390  	if (iomap_block_needs_zeroing(iter, pos)) {
391  		folio_zero_range(folio, poff, plen);
392  		iomap_set_range_uptodate(folio, poff, plen);
393  		goto done;
394  	}
395  
396  	ctx->cur_folio_in_bio = true;
397  	if (ifs) {
398  		spin_lock_irq(&ifs->state_lock);
399  		ifs->read_bytes_pending += plen;
400  		spin_unlock_irq(&ifs->state_lock);
401  	}
402  
403  	sector = iomap_sector(iomap, pos);
404  	if (!ctx->bio ||
405  	    bio_end_sector(ctx->bio) != sector ||
406  	    !bio_add_folio(ctx->bio, folio, plen, poff)) {
407  		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
408  		gfp_t orig_gfp = gfp;
409  		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
410  
411  		if (ctx->bio)
412  			submit_bio(ctx->bio);
413  
414  		if (ctx->rac) /* same as readahead_gfp_mask */
415  			gfp |= __GFP_NORETRY | __GFP_NOWARN;
416  		ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
417  				     REQ_OP_READ, gfp);
418  		/*
419  		 * If the bio_alloc fails, try it again for a single page to
420  		 * avoid having to deal with partial page reads.  This emulates
421  		 * what do_mpage_read_folio does.
422  		 */
423  		if (!ctx->bio) {
424  			ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
425  					     orig_gfp);
426  		}
427  		if (ctx->rac)
428  			ctx->bio->bi_opf |= REQ_RAHEAD;
429  		ctx->bio->bi_iter.bi_sector = sector;
430  		ctx->bio->bi_end_io = iomap_read_end_io;
431  		bio_add_folio_nofail(ctx->bio, folio, plen, poff);
432  	}
433  
434  done:
435  	/*
436  	 * Move the caller beyond our range so that it keeps making progress.
437  	 * For that, we have to include any leading non-uptodate ranges, but
438  	 * we can skip trailing ones as they will be handled in the next
439  	 * iteration.
440  	 */
441  	return pos - orig_pos + plen;
442  }
443  
iomap_read_folio_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)444  static loff_t iomap_read_folio_iter(const struct iomap_iter *iter,
445  		struct iomap_readpage_ctx *ctx)
446  {
447  	struct folio *folio = ctx->cur_folio;
448  	size_t offset = offset_in_folio(folio, iter->pos);
449  	loff_t length = min_t(loff_t, folio_size(folio) - offset,
450  			      iomap_length(iter));
451  	loff_t done, ret;
452  
453  	for (done = 0; done < length; done += ret) {
454  		ret = iomap_readpage_iter(iter, ctx, done);
455  		if (ret <= 0)
456  			return ret;
457  	}
458  
459  	return done;
460  }
461  
iomap_read_folio(struct folio * folio,const struct iomap_ops * ops)462  int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
463  {
464  	struct iomap_iter iter = {
465  		.inode		= folio->mapping->host,
466  		.pos		= folio_pos(folio),
467  		.len		= folio_size(folio),
468  	};
469  	struct iomap_readpage_ctx ctx = {
470  		.cur_folio	= folio,
471  	};
472  	int ret;
473  
474  	trace_iomap_readpage(iter.inode, 1);
475  
476  	while ((ret = iomap_iter(&iter, ops)) > 0)
477  		iter.processed = iomap_read_folio_iter(&iter, &ctx);
478  
479  	if (ctx.bio) {
480  		submit_bio(ctx.bio);
481  		WARN_ON_ONCE(!ctx.cur_folio_in_bio);
482  	} else {
483  		WARN_ON_ONCE(ctx.cur_folio_in_bio);
484  		folio_unlock(folio);
485  	}
486  
487  	/*
488  	 * Just like mpage_readahead and block_read_full_folio, we always
489  	 * return 0 and just set the folio error flag on errors.  This
490  	 * should be cleaned up throughout the stack eventually.
491  	 */
492  	return 0;
493  }
494  EXPORT_SYMBOL_GPL(iomap_read_folio);
495  
iomap_readahead_iter(const struct iomap_iter * iter,struct iomap_readpage_ctx * ctx)496  static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
497  		struct iomap_readpage_ctx *ctx)
498  {
499  	loff_t length = iomap_length(iter);
500  	loff_t done, ret;
501  
502  	for (done = 0; done < length; done += ret) {
503  		if (ctx->cur_folio &&
504  		    offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
505  			if (!ctx->cur_folio_in_bio)
506  				folio_unlock(ctx->cur_folio);
507  			ctx->cur_folio = NULL;
508  		}
509  		if (!ctx->cur_folio) {
510  			ctx->cur_folio = readahead_folio(ctx->rac);
511  			ctx->cur_folio_in_bio = false;
512  		}
513  		ret = iomap_readpage_iter(iter, ctx, done);
514  		if (ret <= 0)
515  			return ret;
516  	}
517  
518  	return done;
519  }
520  
521  /**
522   * iomap_readahead - Attempt to read pages from a file.
523   * @rac: Describes the pages to be read.
524   * @ops: The operations vector for the filesystem.
525   *
526   * This function is for filesystems to call to implement their readahead
527   * address_space operation.
528   *
529   * Context: The @ops callbacks may submit I/O (eg to read the addresses of
530   * blocks from disc), and may wait for it.  The caller may be trying to
531   * access a different page, and so sleeping excessively should be avoided.
532   * It may allocate memory, but should avoid costly allocations.  This
533   * function is called with memalloc_nofs set, so allocations will not cause
534   * the filesystem to be reentered.
535   */
iomap_readahead(struct readahead_control * rac,const struct iomap_ops * ops)536  void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
537  {
538  	struct iomap_iter iter = {
539  		.inode	= rac->mapping->host,
540  		.pos	= readahead_pos(rac),
541  		.len	= readahead_length(rac),
542  	};
543  	struct iomap_readpage_ctx ctx = {
544  		.rac	= rac,
545  	};
546  
547  	trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
548  
549  	while (iomap_iter(&iter, ops) > 0)
550  		iter.processed = iomap_readahead_iter(&iter, &ctx);
551  
552  	if (ctx.bio)
553  		submit_bio(ctx.bio);
554  	if (ctx.cur_folio) {
555  		if (!ctx.cur_folio_in_bio)
556  			folio_unlock(ctx.cur_folio);
557  	}
558  }
559  EXPORT_SYMBOL_GPL(iomap_readahead);
560  
561  /*
562   * iomap_is_partially_uptodate checks whether blocks within a folio are
563   * uptodate or not.
564   *
565   * Returns true if all blocks which correspond to the specified part
566   * of the folio are uptodate.
567   */
iomap_is_partially_uptodate(struct folio * folio,size_t from,size_t count)568  bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
569  {
570  	struct iomap_folio_state *ifs = folio->private;
571  	struct inode *inode = folio->mapping->host;
572  	unsigned first, last, i;
573  
574  	if (!ifs)
575  		return false;
576  
577  	/* Caller's range may extend past the end of this folio */
578  	count = min(folio_size(folio) - from, count);
579  
580  	/* First and last blocks in range within folio */
581  	first = from >> inode->i_blkbits;
582  	last = (from + count - 1) >> inode->i_blkbits;
583  
584  	for (i = first; i <= last; i++)
585  		if (!ifs_block_is_uptodate(ifs, i))
586  			return false;
587  	return true;
588  }
589  EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
590  
591  /**
592   * iomap_get_folio - get a folio reference for writing
593   * @iter: iteration structure
594   * @pos: start offset of write
595   * @len: Suggested size of folio to create.
596   *
597   * Returns a locked reference to the folio at @pos, or an error pointer if the
598   * folio could not be obtained.
599   */
iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)600  struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
601  {
602  	fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
603  
604  	if (iter->flags & IOMAP_NOWAIT)
605  		fgp |= FGP_NOWAIT;
606  	fgp |= fgf_set_order(len);
607  
608  	return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
609  			fgp, mapping_gfp_mask(iter->inode->i_mapping));
610  }
611  EXPORT_SYMBOL_GPL(iomap_get_folio);
612  
iomap_release_folio(struct folio * folio,gfp_t gfp_flags)613  bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
614  {
615  	trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
616  			folio_size(folio));
617  
618  	/*
619  	 * If the folio is dirty, we refuse to release our metadata because
620  	 * it may be partially dirty.  Once we track per-block dirty state,
621  	 * we can release the metadata if every block is dirty.
622  	 */
623  	if (folio_test_dirty(folio))
624  		return false;
625  	ifs_free(folio);
626  	return true;
627  }
628  EXPORT_SYMBOL_GPL(iomap_release_folio);
629  
iomap_invalidate_folio(struct folio * folio,size_t offset,size_t len)630  void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
631  {
632  	trace_iomap_invalidate_folio(folio->mapping->host,
633  					folio_pos(folio) + offset, len);
634  
635  	/*
636  	 * If we're invalidating the entire folio, clear the dirty state
637  	 * from it and release it to avoid unnecessary buildup of the LRU.
638  	 */
639  	if (offset == 0 && len == folio_size(folio)) {
640  		WARN_ON_ONCE(folio_test_writeback(folio));
641  		folio_cancel_dirty(folio);
642  		ifs_free(folio);
643  	}
644  }
645  EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
646  
iomap_dirty_folio(struct address_space * mapping,struct folio * folio)647  bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
648  {
649  	struct inode *inode = mapping->host;
650  	size_t len = folio_size(folio);
651  
652  	ifs_alloc(inode, folio, 0);
653  	iomap_set_range_dirty(folio, 0, len);
654  	return filemap_dirty_folio(mapping, folio);
655  }
656  EXPORT_SYMBOL_GPL(iomap_dirty_folio);
657  
658  static void
iomap_write_failed(struct inode * inode,loff_t pos,unsigned len)659  iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
660  {
661  	loff_t i_size = i_size_read(inode);
662  
663  	/*
664  	 * Only truncate newly allocated pages beyoned EOF, even if the
665  	 * write started inside the existing inode size.
666  	 */
667  	if (pos + len > i_size)
668  		truncate_pagecache_range(inode, max(pos, i_size),
669  					 pos + len - 1);
670  }
671  
iomap_read_folio_sync(loff_t block_start,struct folio * folio,size_t poff,size_t plen,const struct iomap * iomap)672  static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
673  		size_t poff, size_t plen, const struct iomap *iomap)
674  {
675  	struct bio_vec bvec;
676  	struct bio bio;
677  
678  	bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
679  	bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
680  	bio_add_folio_nofail(&bio, folio, plen, poff);
681  	return submit_bio_wait(&bio);
682  }
683  
__iomap_write_begin(const struct iomap_iter * iter,loff_t pos,size_t len,struct folio * folio)684  static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
685  		size_t len, struct folio *folio)
686  {
687  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
688  	struct iomap_folio_state *ifs;
689  	loff_t block_size = i_blocksize(iter->inode);
690  	loff_t block_start = round_down(pos, block_size);
691  	loff_t block_end = round_up(pos + len, block_size);
692  	unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
693  	size_t from = offset_in_folio(folio, pos), to = from + len;
694  	size_t poff, plen;
695  
696  	/*
697  	 * If the write or zeroing completely overlaps the current folio, then
698  	 * entire folio will be dirtied so there is no need for
699  	 * per-block state tracking structures to be attached to this folio.
700  	 * For the unshare case, we must read in the ondisk contents because we
701  	 * are not changing pagecache contents.
702  	 */
703  	if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
704  	    pos + len >= folio_pos(folio) + folio_size(folio))
705  		return 0;
706  
707  	ifs = ifs_alloc(iter->inode, folio, iter->flags);
708  	if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
709  		return -EAGAIN;
710  
711  	if (folio_test_uptodate(folio))
712  		return 0;
713  
714  	do {
715  		iomap_adjust_read_range(iter->inode, folio, &block_start,
716  				block_end - block_start, &poff, &plen);
717  		if (plen == 0)
718  			break;
719  
720  		if (!(iter->flags & IOMAP_UNSHARE) &&
721  		    (from <= poff || from >= poff + plen) &&
722  		    (to <= poff || to >= poff + plen))
723  			continue;
724  
725  		if (iomap_block_needs_zeroing(iter, block_start)) {
726  			if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
727  				return -EIO;
728  			folio_zero_segments(folio, poff, from, to, poff + plen);
729  		} else {
730  			int status;
731  
732  			if (iter->flags & IOMAP_NOWAIT)
733  				return -EAGAIN;
734  
735  			status = iomap_read_folio_sync(block_start, folio,
736  					poff, plen, srcmap);
737  			if (status)
738  				return status;
739  		}
740  		iomap_set_range_uptodate(folio, poff, plen);
741  	} while ((block_start += plen) < block_end);
742  
743  	return 0;
744  }
745  
__iomap_get_folio(struct iomap_iter * iter,loff_t pos,size_t len)746  static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
747  		size_t len)
748  {
749  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
750  
751  	if (folio_ops && folio_ops->get_folio)
752  		return folio_ops->get_folio(iter, pos, len);
753  	else
754  		return iomap_get_folio(iter, pos, len);
755  }
756  
__iomap_put_folio(struct iomap_iter * iter,loff_t pos,size_t ret,struct folio * folio)757  static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
758  		struct folio *folio)
759  {
760  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
761  
762  	if (folio_ops && folio_ops->put_folio) {
763  		folio_ops->put_folio(iter->inode, pos, ret, folio);
764  	} else {
765  		folio_unlock(folio);
766  		folio_put(folio);
767  	}
768  }
769  
iomap_write_begin_inline(const struct iomap_iter * iter,struct folio * folio)770  static int iomap_write_begin_inline(const struct iomap_iter *iter,
771  		struct folio *folio)
772  {
773  	/* needs more work for the tailpacking case; disable for now */
774  	if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
775  		return -EIO;
776  	return iomap_read_inline_data(iter, folio);
777  }
778  
iomap_write_begin(struct iomap_iter * iter,loff_t pos,size_t len,struct folio ** foliop)779  static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
780  		size_t len, struct folio **foliop)
781  {
782  	const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
783  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
784  	struct folio *folio;
785  	int status = 0;
786  
787  	BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
788  	if (srcmap != &iter->iomap)
789  		BUG_ON(pos + len > srcmap->offset + srcmap->length);
790  
791  	if (fatal_signal_pending(current))
792  		return -EINTR;
793  
794  	if (!mapping_large_folio_support(iter->inode->i_mapping))
795  		len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
796  
797  	folio = __iomap_get_folio(iter, pos, len);
798  	if (IS_ERR(folio))
799  		return PTR_ERR(folio);
800  
801  	/*
802  	 * Now we have a locked folio, before we do anything with it we need to
803  	 * check that the iomap we have cached is not stale. The inode extent
804  	 * mapping can change due to concurrent IO in flight (e.g.
805  	 * IOMAP_UNWRITTEN state can change and memory reclaim could have
806  	 * reclaimed a previously partially written page at this index after IO
807  	 * completion before this write reaches this file offset) and hence we
808  	 * could do the wrong thing here (zero a page range incorrectly or fail
809  	 * to zero) and corrupt data.
810  	 */
811  	if (folio_ops && folio_ops->iomap_valid) {
812  		bool iomap_valid = folio_ops->iomap_valid(iter->inode,
813  							 &iter->iomap);
814  		if (!iomap_valid) {
815  			iter->iomap.flags |= IOMAP_F_STALE;
816  			status = 0;
817  			goto out_unlock;
818  		}
819  	}
820  
821  	if (pos + len > folio_pos(folio) + folio_size(folio))
822  		len = folio_pos(folio) + folio_size(folio) - pos;
823  
824  	if (srcmap->type == IOMAP_INLINE)
825  		status = iomap_write_begin_inline(iter, folio);
826  	else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
827  		status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
828  	else
829  		status = __iomap_write_begin(iter, pos, len, folio);
830  
831  	if (unlikely(status))
832  		goto out_unlock;
833  
834  	*foliop = folio;
835  	return 0;
836  
837  out_unlock:
838  	__iomap_put_folio(iter, pos, 0, folio);
839  
840  	return status;
841  }
842  
__iomap_write_end(struct inode * inode,loff_t pos,size_t len,size_t copied,struct folio * folio)843  static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
844  		size_t copied, struct folio *folio)
845  {
846  	flush_dcache_folio(folio);
847  
848  	/*
849  	 * The blocks that were entirely written will now be uptodate, so we
850  	 * don't have to worry about a read_folio reading them and overwriting a
851  	 * partial write.  However, if we've encountered a short write and only
852  	 * partially written into a block, it will not be marked uptodate, so a
853  	 * read_folio might come in and destroy our partial write.
854  	 *
855  	 * Do the simplest thing and just treat any short write to a
856  	 * non-uptodate page as a zero-length write, and force the caller to
857  	 * redo the whole thing.
858  	 */
859  	if (unlikely(copied < len && !folio_test_uptodate(folio)))
860  		return false;
861  	iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
862  	iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
863  	filemap_dirty_folio(inode->i_mapping, folio);
864  	return true;
865  }
866  
iomap_write_end_inline(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t copied)867  static void iomap_write_end_inline(const struct iomap_iter *iter,
868  		struct folio *folio, loff_t pos, size_t copied)
869  {
870  	const struct iomap *iomap = &iter->iomap;
871  	void *addr;
872  
873  	WARN_ON_ONCE(!folio_test_uptodate(folio));
874  	BUG_ON(!iomap_inline_data_valid(iomap));
875  
876  	flush_dcache_folio(folio);
877  	addr = kmap_local_folio(folio, pos);
878  	memcpy(iomap_inline_data(iomap, pos), addr, copied);
879  	kunmap_local(addr);
880  
881  	mark_inode_dirty(iter->inode);
882  }
883  
884  /*
885   * Returns true if all copied bytes have been written to the pagecache,
886   * otherwise return false.
887   */
iomap_write_end(struct iomap_iter * iter,loff_t pos,size_t len,size_t copied,struct folio * folio)888  static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
889  		size_t copied, struct folio *folio)
890  {
891  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
892  
893  	if (srcmap->type == IOMAP_INLINE) {
894  		iomap_write_end_inline(iter, folio, pos, copied);
895  		return true;
896  	}
897  
898  	if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
899  		size_t bh_written;
900  
901  		bh_written = block_write_end(NULL, iter->inode->i_mapping, pos,
902  					len, copied, folio, NULL);
903  		WARN_ON_ONCE(bh_written != copied && bh_written != 0);
904  		return bh_written == copied;
905  	}
906  
907  	return __iomap_write_end(iter->inode, pos, len, copied, folio);
908  }
909  
iomap_write_iter(struct iomap_iter * iter,struct iov_iter * i)910  static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
911  {
912  	loff_t length = iomap_length(iter);
913  	loff_t pos = iter->pos;
914  	ssize_t total_written = 0;
915  	long status = 0;
916  	struct address_space *mapping = iter->inode->i_mapping;
917  	size_t chunk = mapping_max_folio_size(mapping);
918  	unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
919  
920  	do {
921  		struct folio *folio;
922  		loff_t old_size;
923  		size_t offset;		/* Offset into folio */
924  		size_t bytes;		/* Bytes to write to folio */
925  		size_t copied;		/* Bytes copied from user */
926  		size_t written;		/* Bytes have been written */
927  
928  		bytes = iov_iter_count(i);
929  retry:
930  		offset = pos & (chunk - 1);
931  		bytes = min(chunk - offset, bytes);
932  		status = balance_dirty_pages_ratelimited_flags(mapping,
933  							       bdp_flags);
934  		if (unlikely(status))
935  			break;
936  
937  		if (bytes > length)
938  			bytes = length;
939  
940  		/*
941  		 * Bring in the user page that we'll copy from _first_.
942  		 * Otherwise there's a nasty deadlock on copying from the
943  		 * same page as we're writing to, without it being marked
944  		 * up-to-date.
945  		 *
946  		 * For async buffered writes the assumption is that the user
947  		 * page has already been faulted in. This can be optimized by
948  		 * faulting the user page.
949  		 */
950  		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
951  			status = -EFAULT;
952  			break;
953  		}
954  
955  		status = iomap_write_begin(iter, pos, bytes, &folio);
956  		if (unlikely(status)) {
957  			iomap_write_failed(iter->inode, pos, bytes);
958  			break;
959  		}
960  		if (iter->iomap.flags & IOMAP_F_STALE)
961  			break;
962  
963  		offset = offset_in_folio(folio, pos);
964  		if (bytes > folio_size(folio) - offset)
965  			bytes = folio_size(folio) - offset;
966  
967  		if (mapping_writably_mapped(mapping))
968  			flush_dcache_folio(folio);
969  
970  		copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
971  		written = iomap_write_end(iter, pos, bytes, copied, folio) ?
972  			  copied : 0;
973  
974  		/*
975  		 * Update the in-memory inode size after copying the data into
976  		 * the page cache.  It's up to the file system to write the
977  		 * updated size to disk, preferably after I/O completion so that
978  		 * no stale data is exposed.  Only once that's done can we
979  		 * unlock and release the folio.
980  		 */
981  		old_size = iter->inode->i_size;
982  		if (pos + written > old_size) {
983  			i_size_write(iter->inode, pos + written);
984  			iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
985  		}
986  		__iomap_put_folio(iter, pos, written, folio);
987  
988  		if (old_size < pos)
989  			pagecache_isize_extended(iter->inode, old_size, pos);
990  
991  		cond_resched();
992  		if (unlikely(written == 0)) {
993  			/*
994  			 * A short copy made iomap_write_end() reject the
995  			 * thing entirely.  Might be memory poisoning
996  			 * halfway through, might be a race with munmap,
997  			 * might be severe memory pressure.
998  			 */
999  			iomap_write_failed(iter->inode, pos, bytes);
1000  			iov_iter_revert(i, copied);
1001  
1002  			if (chunk > PAGE_SIZE)
1003  				chunk /= 2;
1004  			if (copied) {
1005  				bytes = copied;
1006  				goto retry;
1007  			}
1008  		} else {
1009  			pos += written;
1010  			total_written += written;
1011  			length -= written;
1012  		}
1013  	} while (iov_iter_count(i) && length);
1014  
1015  	if (status == -EAGAIN) {
1016  		iov_iter_revert(i, total_written);
1017  		return -EAGAIN;
1018  	}
1019  	return total_written ? total_written : status;
1020  }
1021  
1022  ssize_t
iomap_file_buffered_write(struct kiocb * iocb,struct iov_iter * i,const struct iomap_ops * ops,void * private)1023  iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
1024  		const struct iomap_ops *ops, void *private)
1025  {
1026  	struct iomap_iter iter = {
1027  		.inode		= iocb->ki_filp->f_mapping->host,
1028  		.pos		= iocb->ki_pos,
1029  		.len		= iov_iter_count(i),
1030  		.flags		= IOMAP_WRITE,
1031  		.private	= private,
1032  	};
1033  	ssize_t ret;
1034  
1035  	if (iocb->ki_flags & IOCB_NOWAIT)
1036  		iter.flags |= IOMAP_NOWAIT;
1037  
1038  	while ((ret = iomap_iter(&iter, ops)) > 0)
1039  		iter.processed = iomap_write_iter(&iter, i);
1040  
1041  	if (unlikely(iter.pos == iocb->ki_pos))
1042  		return ret;
1043  	ret = iter.pos - iocb->ki_pos;
1044  	iocb->ki_pos = iter.pos;
1045  	return ret;
1046  }
1047  EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
1048  
iomap_write_delalloc_ifs_punch(struct inode * inode,struct folio * folio,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1049  static void iomap_write_delalloc_ifs_punch(struct inode *inode,
1050  		struct folio *folio, loff_t start_byte, loff_t end_byte,
1051  		struct iomap *iomap, iomap_punch_t punch)
1052  {
1053  	unsigned int first_blk, last_blk, i;
1054  	loff_t last_byte;
1055  	u8 blkbits = inode->i_blkbits;
1056  	struct iomap_folio_state *ifs;
1057  
1058  	/*
1059  	 * When we have per-block dirty tracking, there can be
1060  	 * blocks within a folio which are marked uptodate
1061  	 * but not dirty. In that case it is necessary to punch
1062  	 * out such blocks to avoid leaking any delalloc blocks.
1063  	 */
1064  	ifs = folio->private;
1065  	if (!ifs)
1066  		return;
1067  
1068  	last_byte = min_t(loff_t, end_byte - 1,
1069  			folio_pos(folio) + folio_size(folio) - 1);
1070  	first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1071  	last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1072  	for (i = first_blk; i <= last_blk; i++) {
1073  		if (!ifs_block_is_dirty(folio, ifs, i))
1074  			punch(inode, folio_pos(folio) + (i << blkbits),
1075  				    1 << blkbits, iomap);
1076  	}
1077  }
1078  
iomap_write_delalloc_punch(struct inode * inode,struct folio * folio,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1079  static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1080  		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1081  		struct iomap *iomap, iomap_punch_t punch)
1082  {
1083  	if (!folio_test_dirty(folio))
1084  		return;
1085  
1086  	/* if dirty, punch up to offset */
1087  	if (start_byte > *punch_start_byte) {
1088  		punch(inode, *punch_start_byte, start_byte - *punch_start_byte,
1089  				iomap);
1090  	}
1091  
1092  	/* Punch non-dirty blocks within folio */
1093  	iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte,
1094  			iomap, punch);
1095  
1096  	/*
1097  	 * Make sure the next punch start is correctly bound to
1098  	 * the end of this data range, not the end of the folio.
1099  	 */
1100  	*punch_start_byte = min_t(loff_t, end_byte,
1101  				folio_pos(folio) + folio_size(folio));
1102  }
1103  
1104  /*
1105   * Scan the data range passed to us for dirty page cache folios. If we find a
1106   * dirty folio, punch out the preceding range and update the offset from which
1107   * the next punch will start from.
1108   *
1109   * We can punch out storage reservations under clean pages because they either
1110   * contain data that has been written back - in which case the delalloc punch
1111   * over that range is a no-op - or they have been read faults in which case they
1112   * contain zeroes and we can remove the delalloc backing range and any new
1113   * writes to those pages will do the normal hole filling operation...
1114   *
1115   * This makes the logic simple: we only need to keep the delalloc extents only
1116   * over the dirty ranges of the page cache.
1117   *
1118   * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1119   * simplify range iterations.
1120   */
iomap_write_delalloc_scan(struct inode * inode,loff_t * punch_start_byte,loff_t start_byte,loff_t end_byte,struct iomap * iomap,iomap_punch_t punch)1121  static void iomap_write_delalloc_scan(struct inode *inode,
1122  		loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1123  		struct iomap *iomap, iomap_punch_t punch)
1124  {
1125  	while (start_byte < end_byte) {
1126  		struct folio	*folio;
1127  
1128  		/* grab locked page */
1129  		folio = filemap_lock_folio(inode->i_mapping,
1130  				start_byte >> PAGE_SHIFT);
1131  		if (IS_ERR(folio)) {
1132  			start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1133  					PAGE_SIZE;
1134  			continue;
1135  		}
1136  
1137  		iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1138  				start_byte, end_byte, iomap, punch);
1139  
1140  		/* move offset to start of next folio in range */
1141  		start_byte = folio_next_index(folio) << PAGE_SHIFT;
1142  		folio_unlock(folio);
1143  		folio_put(folio);
1144  	}
1145  }
1146  
1147  /*
1148   * When a short write occurs, the filesystem might need to use ->iomap_end
1149   * to remove space reservations created in ->iomap_begin.
1150   *
1151   * For filesystems that use delayed allocation, there can be dirty pages over
1152   * the delalloc extent outside the range of a short write but still within the
1153   * delalloc extent allocated for this iomap if the write raced with page
1154   * faults.
1155   *
1156   * Punch out all the delalloc blocks in the range given except for those that
1157   * have dirty data still pending in the page cache - those are going to be
1158   * written and so must still retain the delalloc backing for writeback.
1159   *
1160   * The punch() callback *must* only punch delalloc extents in the range passed
1161   * to it. It must skip over all other types of extents in the range and leave
1162   * them completely unchanged. It must do this punch atomically with respect to
1163   * other extent modifications.
1164   *
1165   * The punch() callback may be called with a folio locked to prevent writeback
1166   * extent allocation racing at the edge of the range we are currently punching.
1167   * The locked folio may or may not cover the range being punched, so it is not
1168   * safe for the punch() callback to lock folios itself.
1169   *
1170   * Lock order is:
1171   *
1172   * inode->i_rwsem (shared or exclusive)
1173   *   inode->i_mapping->invalidate_lock (exclusive)
1174   *     folio_lock()
1175   *       ->punch
1176   *         internal filesystem allocation lock
1177   *
1178   * As we are scanning the page cache for data, we don't need to reimplement the
1179   * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1180   * start and end of data ranges correctly even for sub-folio block sizes. This
1181   * byte range based iteration is especially convenient because it means we
1182   * don't have to care about variable size folios, nor where the start or end of
1183   * the data range lies within a folio, if they lie within the same folio or even
1184   * if there are multiple discontiguous data ranges within the folio.
1185   *
1186   * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1187   * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1188   * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1189   * date. A write page fault can then mark it dirty. If we then fail a write()
1190   * beyond EOF into that up to date cached range, we allocate a delalloc block
1191   * beyond EOF and then have to punch it out. Because the range is up to date,
1192   * mapping_seek_hole_data() will return it, and we will skip the punch because
1193   * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1194   * beyond EOF in this case as writeback will never write back and covert that
1195   * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1196   * resulting in always punching out the range from the EOF to the end of the
1197   * range the iomap spans.
1198   *
1199   * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1200   * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1201   * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1202   * returns the end of the data range (data_end). Using closed intervals would
1203   * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1204   * the code to subtle off-by-one bugs....
1205   */
iomap_write_delalloc_release(struct inode * inode,loff_t start_byte,loff_t end_byte,unsigned flags,struct iomap * iomap,iomap_punch_t punch)1206  void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
1207  		loff_t end_byte, unsigned flags, struct iomap *iomap,
1208  		iomap_punch_t punch)
1209  {
1210  	loff_t punch_start_byte = start_byte;
1211  	loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1212  
1213  	/*
1214  	 * The caller must hold invalidate_lock to avoid races with page faults
1215  	 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
1216  	 * we walk the cache and perform delalloc extent removal.  Failing to do
1217  	 * this can leave dirty pages with no space reservation in the cache.
1218  	 */
1219  	lockdep_assert_held_write(&inode->i_mapping->invalidate_lock);
1220  
1221  	while (start_byte < scan_end_byte) {
1222  		loff_t		data_end;
1223  
1224  		start_byte = mapping_seek_hole_data(inode->i_mapping,
1225  				start_byte, scan_end_byte, SEEK_DATA);
1226  		/*
1227  		 * If there is no more data to scan, all that is left is to
1228  		 * punch out the remaining range.
1229  		 *
1230  		 * Note that mapping_seek_hole_data is only supposed to return
1231  		 * either an offset or -ENXIO, so WARN on any other error as
1232  		 * that would be an API change without updating the callers.
1233  		 */
1234  		if (start_byte == -ENXIO || start_byte == scan_end_byte)
1235  			break;
1236  		if (WARN_ON_ONCE(start_byte < 0))
1237  			return;
1238  		WARN_ON_ONCE(start_byte < punch_start_byte);
1239  		WARN_ON_ONCE(start_byte > scan_end_byte);
1240  
1241  		/*
1242  		 * We find the end of this contiguous cached data range by
1243  		 * seeking from start_byte to the beginning of the next hole.
1244  		 */
1245  		data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1246  				scan_end_byte, SEEK_HOLE);
1247  		if (WARN_ON_ONCE(data_end < 0))
1248  			return;
1249  
1250  		/*
1251  		 * If we race with post-direct I/O invalidation of the page cache,
1252  		 * there might be no data left at start_byte.
1253  		 */
1254  		if (data_end == start_byte)
1255  			continue;
1256  
1257  		WARN_ON_ONCE(data_end < start_byte);
1258  		WARN_ON_ONCE(data_end > scan_end_byte);
1259  
1260  		iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte,
1261  				data_end, iomap, punch);
1262  
1263  		/* The next data search starts at the end of this one. */
1264  		start_byte = data_end;
1265  	}
1266  
1267  	if (punch_start_byte < end_byte)
1268  		punch(inode, punch_start_byte, end_byte - punch_start_byte,
1269  				iomap);
1270  }
1271  EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
1272  
iomap_unshare_iter(struct iomap_iter * iter)1273  static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1274  {
1275  	struct iomap *iomap = &iter->iomap;
1276  	loff_t pos = iter->pos;
1277  	loff_t length = iomap_length(iter);
1278  	loff_t written = 0;
1279  
1280  	if (!iomap_want_unshare_iter(iter))
1281  		return length;
1282  
1283  	do {
1284  		struct folio *folio;
1285  		int status;
1286  		size_t offset;
1287  		size_t bytes = min_t(u64, SIZE_MAX, length);
1288  		bool ret;
1289  
1290  		status = iomap_write_begin(iter, pos, bytes, &folio);
1291  		if (unlikely(status))
1292  			return status;
1293  		if (iomap->flags & IOMAP_F_STALE)
1294  			break;
1295  
1296  		offset = offset_in_folio(folio, pos);
1297  		if (bytes > folio_size(folio) - offset)
1298  			bytes = folio_size(folio) - offset;
1299  
1300  		ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1301  		__iomap_put_folio(iter, pos, bytes, folio);
1302  		if (WARN_ON_ONCE(!ret))
1303  			return -EIO;
1304  
1305  		cond_resched();
1306  
1307  		pos += bytes;
1308  		written += bytes;
1309  		length -= bytes;
1310  
1311  		balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1312  	} while (length > 0);
1313  
1314  	return written;
1315  }
1316  
1317  int
iomap_file_unshare(struct inode * inode,loff_t pos,loff_t len,const struct iomap_ops * ops)1318  iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1319  		const struct iomap_ops *ops)
1320  {
1321  	struct iomap_iter iter = {
1322  		.inode		= inode,
1323  		.pos		= pos,
1324  		.flags		= IOMAP_WRITE | IOMAP_UNSHARE,
1325  	};
1326  	loff_t size = i_size_read(inode);
1327  	int ret;
1328  
1329  	if (pos < 0 || pos >= size)
1330  		return 0;
1331  
1332  	iter.len = min(len, size - pos);
1333  	while ((ret = iomap_iter(&iter, ops)) > 0)
1334  		iter.processed = iomap_unshare_iter(&iter);
1335  	return ret;
1336  }
1337  EXPORT_SYMBOL_GPL(iomap_file_unshare);
1338  
1339  /*
1340   * Flush the remaining range of the iter and mark the current mapping stale.
1341   * This is used when zero range sees an unwritten mapping that may have had
1342   * dirty pagecache over it.
1343   */
iomap_zero_iter_flush_and_stale(struct iomap_iter * i)1344  static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i)
1345  {
1346  	struct address_space *mapping = i->inode->i_mapping;
1347  	loff_t end = i->pos + i->len - 1;
1348  
1349  	i->iomap.flags |= IOMAP_F_STALE;
1350  	return filemap_write_and_wait_range(mapping, i->pos, end);
1351  }
1352  
iomap_zero_iter(struct iomap_iter * iter,bool * did_zero,bool * range_dirty)1353  static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero,
1354  		bool *range_dirty)
1355  {
1356  	const struct iomap *srcmap = iomap_iter_srcmap(iter);
1357  	loff_t pos = iter->pos;
1358  	loff_t length = iomap_length(iter);
1359  	loff_t written = 0;
1360  
1361  	/*
1362  	 * We must zero subranges of unwritten mappings that might be dirty in
1363  	 * pagecache from previous writes. We only know whether the entire range
1364  	 * was clean or not, however, and dirty folios may have been written
1365  	 * back or reclaimed at any point after mapping lookup.
1366  	 *
1367  	 * The easiest way to deal with this is to flush pagecache to trigger
1368  	 * any pending unwritten conversions and then grab the updated extents
1369  	 * from the fs. The flush may change the current mapping, so mark it
1370  	 * stale for the iterator to remap it for the next pass to handle
1371  	 * properly.
1372  	 *
1373  	 * Note that holes are treated the same as unwritten because zero range
1374  	 * is (ab)used for partial folio zeroing in some cases. Hole backed
1375  	 * post-eof ranges can be dirtied via mapped write and the flush
1376  	 * triggers writeback time post-eof zeroing.
1377  	 */
1378  	if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) {
1379  		if (*range_dirty) {
1380  			*range_dirty = false;
1381  			return iomap_zero_iter_flush_and_stale(iter);
1382  		}
1383  		/* range is clean and already zeroed, nothing to do */
1384  		return length;
1385  	}
1386  
1387  	do {
1388  		struct folio *folio;
1389  		int status;
1390  		size_t offset;
1391  		size_t bytes = min_t(u64, SIZE_MAX, length);
1392  		bool ret;
1393  
1394  		status = iomap_write_begin(iter, pos, bytes, &folio);
1395  		if (status)
1396  			return status;
1397  		if (iter->iomap.flags & IOMAP_F_STALE)
1398  			break;
1399  
1400  		offset = offset_in_folio(folio, pos);
1401  		if (bytes > folio_size(folio) - offset)
1402  			bytes = folio_size(folio) - offset;
1403  
1404  		folio_zero_range(folio, offset, bytes);
1405  		folio_mark_accessed(folio);
1406  
1407  		ret = iomap_write_end(iter, pos, bytes, bytes, folio);
1408  		__iomap_put_folio(iter, pos, bytes, folio);
1409  		if (WARN_ON_ONCE(!ret))
1410  			return -EIO;
1411  
1412  		pos += bytes;
1413  		length -= bytes;
1414  		written += bytes;
1415  	} while (length > 0);
1416  
1417  	if (did_zero)
1418  		*did_zero = true;
1419  	return written;
1420  }
1421  
1422  int
iomap_zero_range(struct inode * inode,loff_t pos,loff_t len,bool * did_zero,const struct iomap_ops * ops)1423  iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1424  		const struct iomap_ops *ops)
1425  {
1426  	struct iomap_iter iter = {
1427  		.inode		= inode,
1428  		.pos		= pos,
1429  		.len		= len,
1430  		.flags		= IOMAP_ZERO,
1431  	};
1432  	int ret;
1433  	bool range_dirty;
1434  
1435  	/*
1436  	 * Zero range wants to skip pre-zeroed (i.e. unwritten) mappings, but
1437  	 * pagecache must be flushed to ensure stale data from previous
1438  	 * buffered writes is not exposed. A flush is only required for certain
1439  	 * types of mappings, but checking pagecache after mapping lookup is
1440  	 * racy with writeback and reclaim.
1441  	 *
1442  	 * Therefore, check the entire range first and pass along whether any
1443  	 * part of it is dirty. If so and an underlying mapping warrants it,
1444  	 * flush the cache at that point. This trades off the occasional false
1445  	 * positive (and spurious flush, if the dirty data and mapping don't
1446  	 * happen to overlap) for simplicity in handling a relatively uncommon
1447  	 * situation.
1448  	 */
1449  	range_dirty = filemap_range_needs_writeback(inode->i_mapping,
1450  					pos, pos + len - 1);
1451  
1452  	while ((ret = iomap_iter(&iter, ops)) > 0)
1453  		iter.processed = iomap_zero_iter(&iter, did_zero, &range_dirty);
1454  	return ret;
1455  }
1456  EXPORT_SYMBOL_GPL(iomap_zero_range);
1457  
1458  int
iomap_truncate_page(struct inode * inode,loff_t pos,bool * did_zero,const struct iomap_ops * ops)1459  iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1460  		const struct iomap_ops *ops)
1461  {
1462  	unsigned int blocksize = i_blocksize(inode);
1463  	unsigned int off = pos & (blocksize - 1);
1464  
1465  	/* Block boundary? Nothing to do */
1466  	if (!off)
1467  		return 0;
1468  	return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1469  }
1470  EXPORT_SYMBOL_GPL(iomap_truncate_page);
1471  
iomap_folio_mkwrite_iter(struct iomap_iter * iter,struct folio * folio)1472  static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1473  		struct folio *folio)
1474  {
1475  	loff_t length = iomap_length(iter);
1476  	int ret;
1477  
1478  	if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1479  		ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1480  					      &iter->iomap);
1481  		if (ret)
1482  			return ret;
1483  		block_commit_write(&folio->page, 0, length);
1484  	} else {
1485  		WARN_ON_ONCE(!folio_test_uptodate(folio));
1486  		folio_mark_dirty(folio);
1487  	}
1488  
1489  	return length;
1490  }
1491  
iomap_page_mkwrite(struct vm_fault * vmf,const struct iomap_ops * ops)1492  vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1493  {
1494  	struct iomap_iter iter = {
1495  		.inode		= file_inode(vmf->vma->vm_file),
1496  		.flags		= IOMAP_WRITE | IOMAP_FAULT,
1497  	};
1498  	struct folio *folio = page_folio(vmf->page);
1499  	ssize_t ret;
1500  
1501  	folio_lock(folio);
1502  	ret = folio_mkwrite_check_truncate(folio, iter.inode);
1503  	if (ret < 0)
1504  		goto out_unlock;
1505  	iter.pos = folio_pos(folio);
1506  	iter.len = ret;
1507  	while ((ret = iomap_iter(&iter, ops)) > 0)
1508  		iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1509  
1510  	if (ret < 0)
1511  		goto out_unlock;
1512  	folio_wait_stable(folio);
1513  	return VM_FAULT_LOCKED;
1514  out_unlock:
1515  	folio_unlock(folio);
1516  	return vmf_fs_error(ret);
1517  }
1518  EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1519  
iomap_finish_folio_write(struct inode * inode,struct folio * folio,size_t len)1520  static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1521  		size_t len)
1522  {
1523  	struct iomap_folio_state *ifs = folio->private;
1524  
1525  	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1526  	WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1527  
1528  	if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1529  		folio_end_writeback(folio);
1530  }
1531  
1532  /*
1533   * We're now finished for good with this ioend structure.  Update the page
1534   * state, release holds on bios, and finally free up memory.  Do not use the
1535   * ioend after this.
1536   */
1537  static u32
iomap_finish_ioend(struct iomap_ioend * ioend,int error)1538  iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1539  {
1540  	struct inode *inode = ioend->io_inode;
1541  	struct bio *bio = &ioend->io_bio;
1542  	struct folio_iter fi;
1543  	u32 folio_count = 0;
1544  
1545  	if (error) {
1546  		mapping_set_error(inode->i_mapping, error);
1547  		if (!bio_flagged(bio, BIO_QUIET)) {
1548  			pr_err_ratelimited(
1549  "%s: writeback error on inode %lu, offset %lld, sector %llu",
1550  				inode->i_sb->s_id, inode->i_ino,
1551  				ioend->io_offset, ioend->io_sector);
1552  		}
1553  	}
1554  
1555  	/* walk all folios in bio, ending page IO on them */
1556  	bio_for_each_folio_all(fi, bio) {
1557  		iomap_finish_folio_write(inode, fi.folio, fi.length);
1558  		folio_count++;
1559  	}
1560  
1561  	bio_put(bio);	/* frees the ioend */
1562  	return folio_count;
1563  }
1564  
1565  /*
1566   * Ioend completion routine for merged bios. This can only be called from task
1567   * contexts as merged ioends can be of unbound length. Hence we have to break up
1568   * the writeback completions into manageable chunks to avoid long scheduler
1569   * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1570   * good batch processing throughput without creating adverse scheduler latency
1571   * conditions.
1572   */
1573  void
iomap_finish_ioends(struct iomap_ioend * ioend,int error)1574  iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1575  {
1576  	struct list_head tmp;
1577  	u32 completions;
1578  
1579  	might_sleep();
1580  
1581  	list_replace_init(&ioend->io_list, &tmp);
1582  	completions = iomap_finish_ioend(ioend, error);
1583  
1584  	while (!list_empty(&tmp)) {
1585  		if (completions > IOEND_BATCH_SIZE * 8) {
1586  			cond_resched();
1587  			completions = 0;
1588  		}
1589  		ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1590  		list_del_init(&ioend->io_list);
1591  		completions += iomap_finish_ioend(ioend, error);
1592  	}
1593  }
1594  EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1595  
1596  /*
1597   * We can merge two adjacent ioends if they have the same set of work to do.
1598   */
1599  static bool
iomap_ioend_can_merge(struct iomap_ioend * ioend,struct iomap_ioend * next)1600  iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1601  {
1602  	if (ioend->io_bio.bi_status != next->io_bio.bi_status)
1603  		return false;
1604  	if ((ioend->io_flags & IOMAP_F_SHARED) ^
1605  	    (next->io_flags & IOMAP_F_SHARED))
1606  		return false;
1607  	if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1608  	    (next->io_type == IOMAP_UNWRITTEN))
1609  		return false;
1610  	if (ioend->io_offset + ioend->io_size != next->io_offset)
1611  		return false;
1612  	/*
1613  	 * Do not merge physically discontiguous ioends. The filesystem
1614  	 * completion functions will have to iterate the physical
1615  	 * discontiguities even if we merge the ioends at a logical level, so
1616  	 * we don't gain anything by merging physical discontiguities here.
1617  	 *
1618  	 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1619  	 * submission so does not point to the start sector of the bio at
1620  	 * completion.
1621  	 */
1622  	if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1623  		return false;
1624  	return true;
1625  }
1626  
1627  void
iomap_ioend_try_merge(struct iomap_ioend * ioend,struct list_head * more_ioends)1628  iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1629  {
1630  	struct iomap_ioend *next;
1631  
1632  	INIT_LIST_HEAD(&ioend->io_list);
1633  
1634  	while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1635  			io_list))) {
1636  		if (!iomap_ioend_can_merge(ioend, next))
1637  			break;
1638  		list_move_tail(&next->io_list, &ioend->io_list);
1639  		ioend->io_size += next->io_size;
1640  	}
1641  }
1642  EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1643  
1644  static int
iomap_ioend_compare(void * priv,const struct list_head * a,const struct list_head * b)1645  iomap_ioend_compare(void *priv, const struct list_head *a,
1646  		const struct list_head *b)
1647  {
1648  	struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1649  	struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1650  
1651  	if (ia->io_offset < ib->io_offset)
1652  		return -1;
1653  	if (ia->io_offset > ib->io_offset)
1654  		return 1;
1655  	return 0;
1656  }
1657  
1658  void
iomap_sort_ioends(struct list_head * ioend_list)1659  iomap_sort_ioends(struct list_head *ioend_list)
1660  {
1661  	list_sort(NULL, ioend_list, iomap_ioend_compare);
1662  }
1663  EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1664  
iomap_writepage_end_bio(struct bio * bio)1665  static void iomap_writepage_end_bio(struct bio *bio)
1666  {
1667  	iomap_finish_ioend(iomap_ioend_from_bio(bio),
1668  			blk_status_to_errno(bio->bi_status));
1669  }
1670  
1671  /*
1672   * Submit the final bio for an ioend.
1673   *
1674   * If @error is non-zero, it means that we have a situation where some part of
1675   * the submission process has failed after we've marked pages for writeback.
1676   * We cannot cancel ioend directly in that case, so call the bio end I/O handler
1677   * with the error status here to run the normal I/O completion handler to clear
1678   * the writeback bit and let the file system proess the errors.
1679   */
iomap_submit_ioend(struct iomap_writepage_ctx * wpc,int error)1680  static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error)
1681  {
1682  	if (!wpc->ioend)
1683  		return error;
1684  
1685  	/*
1686  	 * Let the file systems prepare the I/O submission and hook in an I/O
1687  	 * comletion handler.  This also needs to happen in case after a
1688  	 * failure happened so that the file system end I/O handler gets called
1689  	 * to clean up.
1690  	 */
1691  	if (wpc->ops->prepare_ioend)
1692  		error = wpc->ops->prepare_ioend(wpc->ioend, error);
1693  
1694  	if (error) {
1695  		wpc->ioend->io_bio.bi_status = errno_to_blk_status(error);
1696  		bio_endio(&wpc->ioend->io_bio);
1697  	} else {
1698  		submit_bio(&wpc->ioend->io_bio);
1699  	}
1700  
1701  	wpc->ioend = NULL;
1702  	return error;
1703  }
1704  
iomap_alloc_ioend(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct inode * inode,loff_t pos)1705  static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc,
1706  		struct writeback_control *wbc, struct inode *inode, loff_t pos)
1707  {
1708  	struct iomap_ioend *ioend;
1709  	struct bio *bio;
1710  
1711  	bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1712  			       REQ_OP_WRITE | wbc_to_write_flags(wbc),
1713  			       GFP_NOFS, &iomap_ioend_bioset);
1714  	bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos);
1715  	bio->bi_end_io = iomap_writepage_end_bio;
1716  	wbc_init_bio(wbc, bio);
1717  	bio->bi_write_hint = inode->i_write_hint;
1718  
1719  	ioend = iomap_ioend_from_bio(bio);
1720  	INIT_LIST_HEAD(&ioend->io_list);
1721  	ioend->io_type = wpc->iomap.type;
1722  	ioend->io_flags = wpc->iomap.flags;
1723  	ioend->io_inode = inode;
1724  	ioend->io_size = 0;
1725  	ioend->io_offset = pos;
1726  	ioend->io_sector = bio->bi_iter.bi_sector;
1727  
1728  	wpc->nr_folios = 0;
1729  	return ioend;
1730  }
1731  
iomap_can_add_to_ioend(struct iomap_writepage_ctx * wpc,loff_t pos)1732  static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos)
1733  {
1734  	if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1735  	    (wpc->ioend->io_flags & IOMAP_F_SHARED))
1736  		return false;
1737  	if (wpc->iomap.type != wpc->ioend->io_type)
1738  		return false;
1739  	if (pos != wpc->ioend->io_offset + wpc->ioend->io_size)
1740  		return false;
1741  	if (iomap_sector(&wpc->iomap, pos) !=
1742  	    bio_end_sector(&wpc->ioend->io_bio))
1743  		return false;
1744  	/*
1745  	 * Limit ioend bio chain lengths to minimise IO completion latency. This
1746  	 * also prevents long tight loops ending page writeback on all the
1747  	 * folios in the ioend.
1748  	 */
1749  	if (wpc->nr_folios >= IOEND_BATCH_SIZE)
1750  		return false;
1751  	return true;
1752  }
1753  
1754  /*
1755   * Test to see if we have an existing ioend structure that we could append to
1756   * first; otherwise finish off the current ioend and start another.
1757   *
1758   * If a new ioend is created and cached, the old ioend is submitted to the block
1759   * layer instantly.  Batching optimisations are provided by higher level block
1760   * plugging.
1761   *
1762   * At the end of a writeback pass, there will be a cached ioend remaining on the
1763   * writepage context that the caller will need to submit.
1764   */
iomap_add_to_ioend(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio,struct inode * inode,loff_t pos,unsigned len)1765  static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc,
1766  		struct writeback_control *wbc, struct folio *folio,
1767  		struct inode *inode, loff_t pos, unsigned len)
1768  {
1769  	struct iomap_folio_state *ifs = folio->private;
1770  	size_t poff = offset_in_folio(folio, pos);
1771  	int error;
1772  
1773  	if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) {
1774  new_ioend:
1775  		error = iomap_submit_ioend(wpc, 0);
1776  		if (error)
1777  			return error;
1778  		wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos);
1779  	}
1780  
1781  	if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff))
1782  		goto new_ioend;
1783  
1784  	if (ifs)
1785  		atomic_add(len, &ifs->write_bytes_pending);
1786  	wpc->ioend->io_size += len;
1787  	wbc_account_cgroup_owner(wbc, &folio->page, len);
1788  	return 0;
1789  }
1790  
iomap_writepage_map_blocks(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio,struct inode * inode,u64 pos,unsigned dirty_len,unsigned * count)1791  static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc,
1792  		struct writeback_control *wbc, struct folio *folio,
1793  		struct inode *inode, u64 pos, unsigned dirty_len,
1794  		unsigned *count)
1795  {
1796  	int error;
1797  
1798  	do {
1799  		unsigned map_len;
1800  
1801  		error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len);
1802  		if (error)
1803  			break;
1804  		trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap);
1805  
1806  		map_len = min_t(u64, dirty_len,
1807  			wpc->iomap.offset + wpc->iomap.length - pos);
1808  		WARN_ON_ONCE(!folio->private && map_len < dirty_len);
1809  
1810  		switch (wpc->iomap.type) {
1811  		case IOMAP_INLINE:
1812  			WARN_ON_ONCE(1);
1813  			error = -EIO;
1814  			break;
1815  		case IOMAP_HOLE:
1816  			break;
1817  		default:
1818  			error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos,
1819  					map_len);
1820  			if (!error)
1821  				(*count)++;
1822  			break;
1823  		}
1824  		dirty_len -= map_len;
1825  		pos += map_len;
1826  	} while (dirty_len && !error);
1827  
1828  	/*
1829  	 * We cannot cancel the ioend directly here on error.  We may have
1830  	 * already set other pages under writeback and hence we have to run I/O
1831  	 * completion to mark the error state of the pages under writeback
1832  	 * appropriately.
1833  	 *
1834  	 * Just let the file system know what portion of the folio failed to
1835  	 * map.
1836  	 */
1837  	if (error && wpc->ops->discard_folio)
1838  		wpc->ops->discard_folio(folio, pos);
1839  	return error;
1840  }
1841  
1842  /*
1843   * Check interaction of the folio with the file end.
1844   *
1845   * If the folio is entirely beyond i_size, return false.  If it straddles
1846   * i_size, adjust end_pos and zero all data beyond i_size.
1847   */
iomap_writepage_handle_eof(struct folio * folio,struct inode * inode,u64 * end_pos)1848  static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode,
1849  		u64 *end_pos)
1850  {
1851  	u64 isize = i_size_read(inode);
1852  
1853  	if (*end_pos > isize) {
1854  		size_t poff = offset_in_folio(folio, isize);
1855  		pgoff_t end_index = isize >> PAGE_SHIFT;
1856  
1857  		/*
1858  		 * If the folio is entirely ouside of i_size, skip it.
1859  		 *
1860  		 * This can happen due to a truncate operation that is in
1861  		 * progress and in that case truncate will finish it off once
1862  		 * we've dropped the folio lock.
1863  		 *
1864  		 * Note that the pgoff_t used for end_index is an unsigned long.
1865  		 * If the given offset is greater than 16TB on a 32-bit system,
1866  		 * then if we checked if the folio is fully outside i_size with
1867  		 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1868  		 * overflow and evaluate to 0.  Hence this folio would be
1869  		 * redirtied and written out repeatedly, which would result in
1870  		 * an infinite loop; the user program performing this operation
1871  		 * would hang.  Instead, we can detect this situation by
1872  		 * checking if the folio is totally beyond i_size or if its
1873  		 * offset is just equal to the EOF.
1874  		 */
1875  		if (folio->index > end_index ||
1876  		    (folio->index == end_index && poff == 0))
1877  			return false;
1878  
1879  		/*
1880  		 * The folio straddles i_size.
1881  		 *
1882  		 * It must be zeroed out on each and every writepage invocation
1883  		 * because it may be mmapped:
1884  		 *
1885  		 *    A file is mapped in multiples of the page size.  For a
1886  		 *    file that is not a multiple of the page size, the
1887  		 *    remaining memory is zeroed when mapped, and writes to that
1888  		 *    region are not written out to the file.
1889  		 *
1890  		 * Also adjust the writeback range to skip all blocks entirely
1891  		 * beyond i_size.
1892  		 */
1893  		folio_zero_segment(folio, poff, folio_size(folio));
1894  		*end_pos = round_up(isize, i_blocksize(inode));
1895  	}
1896  
1897  	return true;
1898  }
1899  
iomap_writepage_map(struct iomap_writepage_ctx * wpc,struct writeback_control * wbc,struct folio * folio)1900  static int iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1901  		struct writeback_control *wbc, struct folio *folio)
1902  {
1903  	struct iomap_folio_state *ifs = folio->private;
1904  	struct inode *inode = folio->mapping->host;
1905  	u64 pos = folio_pos(folio);
1906  	u64 end_pos = pos + folio_size(folio);
1907  	unsigned count = 0;
1908  	int error = 0;
1909  	u32 rlen;
1910  
1911  	WARN_ON_ONCE(!folio_test_locked(folio));
1912  	WARN_ON_ONCE(folio_test_dirty(folio));
1913  	WARN_ON_ONCE(folio_test_writeback(folio));
1914  
1915  	trace_iomap_writepage(inode, pos, folio_size(folio));
1916  
1917  	if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) {
1918  		folio_unlock(folio);
1919  		return 0;
1920  	}
1921  	WARN_ON_ONCE(end_pos <= pos);
1922  
1923  	if (i_blocks_per_folio(inode, folio) > 1) {
1924  		if (!ifs) {
1925  			ifs = ifs_alloc(inode, folio, 0);
1926  			iomap_set_range_dirty(folio, 0, end_pos - pos);
1927  		}
1928  
1929  		/*
1930  		 * Keep the I/O completion handler from clearing the writeback
1931  		 * bit until we have submitted all blocks by adding a bias to
1932  		 * ifs->write_bytes_pending, which is dropped after submitting
1933  		 * all blocks.
1934  		 */
1935  		WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0);
1936  		atomic_inc(&ifs->write_bytes_pending);
1937  	}
1938  
1939  	/*
1940  	 * Set the writeback bit ASAP, as the I/O completion for the single
1941  	 * block per folio case happen hit as soon as we're submitting the bio.
1942  	 */
1943  	folio_start_writeback(folio);
1944  
1945  	/*
1946  	 * Walk through the folio to find dirty areas to write back.
1947  	 */
1948  	while ((rlen = iomap_find_dirty_range(folio, &pos, end_pos))) {
1949  		error = iomap_writepage_map_blocks(wpc, wbc, folio, inode,
1950  				pos, rlen, &count);
1951  		if (error)
1952  			break;
1953  		pos += rlen;
1954  	}
1955  
1956  	if (count)
1957  		wpc->nr_folios++;
1958  
1959  	/*
1960  	 * We can have dirty bits set past end of file in page_mkwrite path
1961  	 * while mapping the last partial folio. Hence it's better to clear
1962  	 * all the dirty bits in the folio here.
1963  	 */
1964  	iomap_clear_range_dirty(folio, 0, folio_size(folio));
1965  
1966  	/*
1967  	 * Usually the writeback bit is cleared by the I/O completion handler.
1968  	 * But we may end up either not actually writing any blocks, or (when
1969  	 * there are multiple blocks in a folio) all I/O might have finished
1970  	 * already at this point.  In that case we need to clear the writeback
1971  	 * bit ourselves right after unlocking the page.
1972  	 */
1973  	folio_unlock(folio);
1974  	if (ifs) {
1975  		if (atomic_dec_and_test(&ifs->write_bytes_pending))
1976  			folio_end_writeback(folio);
1977  	} else {
1978  		if (!count)
1979  			folio_end_writeback(folio);
1980  	}
1981  	mapping_set_error(inode->i_mapping, error);
1982  	return error;
1983  }
1984  
1985  int
iomap_writepages(struct address_space * mapping,struct writeback_control * wbc,struct iomap_writepage_ctx * wpc,const struct iomap_writeback_ops * ops)1986  iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1987  		struct iomap_writepage_ctx *wpc,
1988  		const struct iomap_writeback_ops *ops)
1989  {
1990  	struct folio *folio = NULL;
1991  	int error;
1992  
1993  	/*
1994  	 * Writeback from reclaim context should never happen except in the case
1995  	 * of a VM regression so warn about it and refuse to write the data.
1996  	 */
1997  	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) ==
1998  			PF_MEMALLOC))
1999  		return -EIO;
2000  
2001  	wpc->ops = ops;
2002  	while ((folio = writeback_iter(mapping, wbc, folio, &error)))
2003  		error = iomap_writepage_map(wpc, wbc, folio);
2004  	return iomap_submit_ioend(wpc, error);
2005  }
2006  EXPORT_SYMBOL_GPL(iomap_writepages);
2007  
iomap_buffered_init(void)2008  static int __init iomap_buffered_init(void)
2009  {
2010  	return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
2011  			   offsetof(struct iomap_ioend, io_bio),
2012  			   BIOSET_NEED_BVECS);
2013  }
2014  fs_initcall(iomap_buffered_init);
2015