1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * linux/fs/ext4/readpage.c
4   *
5   * Copyright (C) 2002, Linus Torvalds.
6   * Copyright (C) 2015, Google, Inc.
7   *
8   * This was originally taken from fs/mpage.c
9   *
10   * The ext4_mpage_readpages() function here is intended to
11   * replace mpage_readahead() in the general case, not just for
12   * encrypted files.  It has some limitations (see below), where it
13   * will fall back to read_block_full_page(), but these limitations
14   * should only be hit when page_size != block_size.
15   *
16   * This will allow us to attach a callback function to support ext4
17   * encryption.
18   *
19   * If anything unusual happens, such as:
20   *
21   * - encountering a page which has buffers
22   * - encountering a page which has a non-hole after a hole
23   * - encountering a page with non-contiguous blocks
24   *
25   * then this code just gives up and calls the buffer_head-based read function.
26   * It does handle a page which has holes at the end - that is a common case:
27   * the end-of-file on blocksize < PAGE_SIZE setups.
28   *
29   */
30  
31  #include <linux/kernel.h>
32  #include <linux/export.h>
33  #include <linux/mm.h>
34  #include <linux/kdev_t.h>
35  #include <linux/gfp.h>
36  #include <linux/bio.h>
37  #include <linux/fs.h>
38  #include <linux/buffer_head.h>
39  #include <linux/blkdev.h>
40  #include <linux/highmem.h>
41  #include <linux/prefetch.h>
42  #include <linux/mpage.h>
43  #include <linux/writeback.h>
44  #include <linux/backing-dev.h>
45  #include <linux/pagevec.h>
46  
47  #include "ext4.h"
48  
49  #define NUM_PREALLOC_POST_READ_CTXS	128
50  
51  static struct kmem_cache *bio_post_read_ctx_cache;
52  static mempool_t *bio_post_read_ctx_pool;
53  
54  /* postprocessing steps for read bios */
55  enum bio_post_read_step {
56  	STEP_INITIAL = 0,
57  	STEP_DECRYPT,
58  	STEP_VERITY,
59  	STEP_MAX,
60  };
61  
62  struct bio_post_read_ctx {
63  	struct bio *bio;
64  	struct work_struct work;
65  	unsigned int cur_step;
66  	unsigned int enabled_steps;
67  };
68  
__read_end_io(struct bio * bio)69  static void __read_end_io(struct bio *bio)
70  {
71  	struct folio_iter fi;
72  
73  	bio_for_each_folio_all(fi, bio)
74  		folio_end_read(fi.folio, bio->bi_status == 0);
75  	if (bio->bi_private)
76  		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
77  	bio_put(bio);
78  }
79  
80  static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
81  
decrypt_work(struct work_struct * work)82  static void decrypt_work(struct work_struct *work)
83  {
84  	struct bio_post_read_ctx *ctx =
85  		container_of(work, struct bio_post_read_ctx, work);
86  	struct bio *bio = ctx->bio;
87  
88  	if (fscrypt_decrypt_bio(bio))
89  		bio_post_read_processing(ctx);
90  	else
91  		__read_end_io(bio);
92  }
93  
verity_work(struct work_struct * work)94  static void verity_work(struct work_struct *work)
95  {
96  	struct bio_post_read_ctx *ctx =
97  		container_of(work, struct bio_post_read_ctx, work);
98  	struct bio *bio = ctx->bio;
99  
100  	/*
101  	 * fsverity_verify_bio() may call readahead() again, and although verity
102  	 * will be disabled for that, decryption may still be needed, causing
103  	 * another bio_post_read_ctx to be allocated.  So to guarantee that
104  	 * mempool_alloc() never deadlocks we must free the current ctx first.
105  	 * This is safe because verity is the last post-read step.
106  	 */
107  	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
108  	mempool_free(ctx, bio_post_read_ctx_pool);
109  	bio->bi_private = NULL;
110  
111  	fsverity_verify_bio(bio);
112  
113  	__read_end_io(bio);
114  }
115  
bio_post_read_processing(struct bio_post_read_ctx * ctx)116  static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
117  {
118  	/*
119  	 * We use different work queues for decryption and for verity because
120  	 * verity may require reading metadata pages that need decryption, and
121  	 * we shouldn't recurse to the same workqueue.
122  	 */
123  	switch (++ctx->cur_step) {
124  	case STEP_DECRYPT:
125  		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
126  			INIT_WORK(&ctx->work, decrypt_work);
127  			fscrypt_enqueue_decrypt_work(&ctx->work);
128  			return;
129  		}
130  		ctx->cur_step++;
131  		fallthrough;
132  	case STEP_VERITY:
133  		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
134  			INIT_WORK(&ctx->work, verity_work);
135  			fsverity_enqueue_verify_work(&ctx->work);
136  			return;
137  		}
138  		ctx->cur_step++;
139  		fallthrough;
140  	default:
141  		__read_end_io(ctx->bio);
142  	}
143  }
144  
bio_post_read_required(struct bio * bio)145  static bool bio_post_read_required(struct bio *bio)
146  {
147  	return bio->bi_private && !bio->bi_status;
148  }
149  
150  /*
151   * I/O completion handler for multipage BIOs.
152   *
153   * The mpage code never puts partial pages into a BIO (except for end-of-file).
154   * If a page does not map to a contiguous run of blocks then it simply falls
155   * back to block_read_full_folio().
156   *
157   * Why is this?  If a page's completion depends on a number of different BIOs
158   * which can complete in any order (or at the same time) then determining the
159   * status of that page is hard.  See end_buffer_async_read() for the details.
160   * There is no point in duplicating all that complexity.
161   */
mpage_end_io(struct bio * bio)162  static void mpage_end_io(struct bio *bio)
163  {
164  	if (bio_post_read_required(bio)) {
165  		struct bio_post_read_ctx *ctx = bio->bi_private;
166  
167  		ctx->cur_step = STEP_INITIAL;
168  		bio_post_read_processing(ctx);
169  		return;
170  	}
171  	__read_end_io(bio);
172  }
173  
ext4_need_verity(const struct inode * inode,pgoff_t idx)174  static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
175  {
176  	return fsverity_active(inode) &&
177  	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
178  }
179  
ext4_set_bio_post_read_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx)180  static void ext4_set_bio_post_read_ctx(struct bio *bio,
181  				       const struct inode *inode,
182  				       pgoff_t first_idx)
183  {
184  	unsigned int post_read_steps = 0;
185  
186  	if (fscrypt_inode_uses_fs_layer_crypto(inode))
187  		post_read_steps |= 1 << STEP_DECRYPT;
188  
189  	if (ext4_need_verity(inode, first_idx))
190  		post_read_steps |= 1 << STEP_VERITY;
191  
192  	if (post_read_steps) {
193  		/* Due to the mempool, this never fails. */
194  		struct bio_post_read_ctx *ctx =
195  			mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
196  
197  		ctx->bio = bio;
198  		ctx->enabled_steps = post_read_steps;
199  		bio->bi_private = ctx;
200  	}
201  }
202  
ext4_readpage_limit(struct inode * inode)203  static inline loff_t ext4_readpage_limit(struct inode *inode)
204  {
205  	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
206  		return inode->i_sb->s_maxbytes;
207  
208  	return i_size_read(inode);
209  }
210  
ext4_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct folio * folio)211  int ext4_mpage_readpages(struct inode *inode,
212  		struct readahead_control *rac, struct folio *folio)
213  {
214  	struct bio *bio = NULL;
215  	sector_t last_block_in_bio = 0;
216  
217  	const unsigned blkbits = inode->i_blkbits;
218  	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
219  	const unsigned blocksize = 1 << blkbits;
220  	sector_t next_block;
221  	sector_t block_in_file;
222  	sector_t last_block;
223  	sector_t last_block_in_file;
224  	sector_t first_block;
225  	unsigned page_block;
226  	struct block_device *bdev = inode->i_sb->s_bdev;
227  	int length;
228  	unsigned relative_block = 0;
229  	struct ext4_map_blocks map;
230  	unsigned int nr_pages = rac ? readahead_count(rac) : 1;
231  
232  	map.m_pblk = 0;
233  	map.m_lblk = 0;
234  	map.m_len = 0;
235  	map.m_flags = 0;
236  
237  	for (; nr_pages; nr_pages--) {
238  		int fully_mapped = 1;
239  		unsigned first_hole = blocks_per_page;
240  
241  		if (rac)
242  			folio = readahead_folio(rac);
243  		prefetchw(&folio->flags);
244  
245  		if (folio_buffers(folio))
246  			goto confused;
247  
248  		block_in_file = next_block =
249  			(sector_t)folio->index << (PAGE_SHIFT - blkbits);
250  		last_block = block_in_file + nr_pages * blocks_per_page;
251  		last_block_in_file = (ext4_readpage_limit(inode) +
252  				      blocksize - 1) >> blkbits;
253  		if (last_block > last_block_in_file)
254  			last_block = last_block_in_file;
255  		page_block = 0;
256  
257  		/*
258  		 * Map blocks using the previous result first.
259  		 */
260  		if ((map.m_flags & EXT4_MAP_MAPPED) &&
261  		    block_in_file > map.m_lblk &&
262  		    block_in_file < (map.m_lblk + map.m_len)) {
263  			unsigned map_offset = block_in_file - map.m_lblk;
264  			unsigned last = map.m_len - map_offset;
265  
266  			first_block = map.m_pblk + map_offset;
267  			for (relative_block = 0; ; relative_block++) {
268  				if (relative_block == last) {
269  					/* needed? */
270  					map.m_flags &= ~EXT4_MAP_MAPPED;
271  					break;
272  				}
273  				if (page_block == blocks_per_page)
274  					break;
275  				page_block++;
276  				block_in_file++;
277  			}
278  		}
279  
280  		/*
281  		 * Then do more ext4_map_blocks() calls until we are
282  		 * done with this folio.
283  		 */
284  		while (page_block < blocks_per_page) {
285  			if (block_in_file < last_block) {
286  				map.m_lblk = block_in_file;
287  				map.m_len = last_block - block_in_file;
288  
289  				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
290  				set_error_page:
291  					folio_zero_segment(folio, 0,
292  							  folio_size(folio));
293  					folio_unlock(folio);
294  					goto next_page;
295  				}
296  			}
297  			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
298  				fully_mapped = 0;
299  				if (first_hole == blocks_per_page)
300  					first_hole = page_block;
301  				page_block++;
302  				block_in_file++;
303  				continue;
304  			}
305  			if (first_hole != blocks_per_page)
306  				goto confused;		/* hole -> non-hole */
307  
308  			/* Contiguous blocks? */
309  			if (!page_block)
310  				first_block = map.m_pblk;
311  			else if (first_block + page_block != map.m_pblk)
312  				goto confused;
313  			for (relative_block = 0; ; relative_block++) {
314  				if (relative_block == map.m_len) {
315  					/* needed? */
316  					map.m_flags &= ~EXT4_MAP_MAPPED;
317  					break;
318  				} else if (page_block == blocks_per_page)
319  					break;
320  				page_block++;
321  				block_in_file++;
322  			}
323  		}
324  		if (first_hole != blocks_per_page) {
325  			folio_zero_segment(folio, first_hole << blkbits,
326  					  folio_size(folio));
327  			if (first_hole == 0) {
328  				if (ext4_need_verity(inode, folio->index) &&
329  				    !fsverity_verify_folio(folio))
330  					goto set_error_page;
331  				folio_end_read(folio, true);
332  				continue;
333  			}
334  		} else if (fully_mapped) {
335  			folio_set_mappedtodisk(folio);
336  		}
337  
338  		/*
339  		 * This folio will go to BIO.  Do we need to send this
340  		 * BIO off first?
341  		 */
342  		if (bio && (last_block_in_bio != first_block - 1 ||
343  			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
344  		submit_and_realloc:
345  			submit_bio(bio);
346  			bio = NULL;
347  		}
348  		if (bio == NULL) {
349  			/*
350  			 * bio_alloc will _always_ be able to allocate a bio if
351  			 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
352  			 */
353  			bio = bio_alloc(bdev, bio_max_segs(nr_pages),
354  					REQ_OP_READ, GFP_KERNEL);
355  			fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
356  						  GFP_KERNEL);
357  			ext4_set_bio_post_read_ctx(bio, inode, folio->index);
358  			bio->bi_iter.bi_sector = first_block << (blkbits - 9);
359  			bio->bi_end_io = mpage_end_io;
360  			if (rac)
361  				bio->bi_opf |= REQ_RAHEAD;
362  		}
363  
364  		length = first_hole << blkbits;
365  		if (!bio_add_folio(bio, folio, length, 0))
366  			goto submit_and_realloc;
367  
368  		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
369  		     (relative_block == map.m_len)) ||
370  		    (first_hole != blocks_per_page)) {
371  			submit_bio(bio);
372  			bio = NULL;
373  		} else
374  			last_block_in_bio = first_block + blocks_per_page - 1;
375  		continue;
376  	confused:
377  		if (bio) {
378  			submit_bio(bio);
379  			bio = NULL;
380  		}
381  		if (!folio_test_uptodate(folio))
382  			block_read_full_folio(folio, ext4_get_block);
383  		else
384  			folio_unlock(folio);
385  next_page:
386  		; /* A label shall be followed by a statement until C23 */
387  	}
388  	if (bio)
389  		submit_bio(bio);
390  	return 0;
391  }
392  
ext4_init_post_read_processing(void)393  int __init ext4_init_post_read_processing(void)
394  {
395  	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
396  
397  	if (!bio_post_read_ctx_cache)
398  		goto fail;
399  	bio_post_read_ctx_pool =
400  		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
401  					 bio_post_read_ctx_cache);
402  	if (!bio_post_read_ctx_pool)
403  		goto fail_free_cache;
404  	return 0;
405  
406  fail_free_cache:
407  	kmem_cache_destroy(bio_post_read_ctx_cache);
408  fail:
409  	return -ENOMEM;
410  }
411  
ext4_exit_post_read_processing(void)412  void ext4_exit_post_read_processing(void)
413  {
414  	mempool_destroy(bio_post_read_ctx_pool);
415  	kmem_cache_destroy(bio_post_read_ctx_cache);
416  }
417