1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * fs/f2fs/inline.c
4   * Copyright (c) 2013, Intel Corporation
5   * Authors: Huajun Li <huajun.li@intel.com>
6   *          Haicheng Li <haicheng.li@intel.com>
7   */
8  
9  #include <linux/fs.h>
10  #include <linux/f2fs_fs.h>
11  #include <linux/fiemap.h>
12  
13  #include "f2fs.h"
14  #include "node.h"
15  #include <trace/events/f2fs.h>
16  
support_inline_data(struct inode * inode)17  static bool support_inline_data(struct inode *inode)
18  {
19  	if (f2fs_used_in_atomic_write(inode))
20  		return false;
21  	if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))
22  		return false;
23  	if (i_size_read(inode) > MAX_INLINE_DATA(inode))
24  		return false;
25  	return true;
26  }
27  
f2fs_may_inline_data(struct inode * inode)28  bool f2fs_may_inline_data(struct inode *inode)
29  {
30  	if (!support_inline_data(inode))
31  		return false;
32  
33  	return !f2fs_post_read_required(inode);
34  }
35  
inode_has_blocks(struct inode * inode,struct page * ipage)36  static bool inode_has_blocks(struct inode *inode, struct page *ipage)
37  {
38  	struct f2fs_inode *ri = F2FS_INODE(ipage);
39  	int i;
40  
41  	if (F2FS_HAS_BLOCKS(inode))
42  		return true;
43  
44  	for (i = 0; i < DEF_NIDS_PER_INODE; i++) {
45  		if (ri->i_nid[i])
46  			return true;
47  	}
48  	return false;
49  }
50  
f2fs_sanity_check_inline_data(struct inode * inode,struct page * ipage)51  bool f2fs_sanity_check_inline_data(struct inode *inode, struct page *ipage)
52  {
53  	if (!f2fs_has_inline_data(inode))
54  		return false;
55  
56  	if (inode_has_blocks(inode, ipage))
57  		return false;
58  
59  	if (!support_inline_data(inode))
60  		return true;
61  
62  	/*
63  	 * used by sanity_check_inode(), when disk layout fields has not
64  	 * been synchronized to inmem fields.
65  	 */
66  	return (S_ISREG(inode->i_mode) &&
67  		(file_is_encrypt(inode) || file_is_verity(inode) ||
68  		(F2FS_I(inode)->i_flags & F2FS_COMPR_FL)));
69  }
70  
f2fs_may_inline_dentry(struct inode * inode)71  bool f2fs_may_inline_dentry(struct inode *inode)
72  {
73  	if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY))
74  		return false;
75  
76  	if (!S_ISDIR(inode->i_mode))
77  		return false;
78  
79  	return true;
80  }
81  
f2fs_do_read_inline_data(struct folio * folio,struct page * ipage)82  void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage)
83  {
84  	struct inode *inode = folio_file_mapping(folio)->host;
85  
86  	if (folio_test_uptodate(folio))
87  		return;
88  
89  	f2fs_bug_on(F2FS_I_SB(inode), folio_index(folio));
90  
91  	folio_zero_segment(folio, MAX_INLINE_DATA(inode), folio_size(folio));
92  
93  	/* Copy the whole inline data block */
94  	memcpy_to_folio(folio, 0, inline_data_addr(inode, ipage),
95  		       MAX_INLINE_DATA(inode));
96  	if (!folio_test_uptodate(folio))
97  		folio_mark_uptodate(folio);
98  }
99  
f2fs_truncate_inline_inode(struct inode * inode,struct page * ipage,u64 from)100  void f2fs_truncate_inline_inode(struct inode *inode,
101  					struct page *ipage, u64 from)
102  {
103  	void *addr;
104  
105  	if (from >= MAX_INLINE_DATA(inode))
106  		return;
107  
108  	addr = inline_data_addr(inode, ipage);
109  
110  	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
111  	memset(addr + from, 0, MAX_INLINE_DATA(inode) - from);
112  	set_page_dirty(ipage);
113  
114  	if (from == 0)
115  		clear_inode_flag(inode, FI_DATA_EXIST);
116  }
117  
f2fs_read_inline_data(struct inode * inode,struct folio * folio)118  int f2fs_read_inline_data(struct inode *inode, struct folio *folio)
119  {
120  	struct page *ipage;
121  
122  	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
123  	if (IS_ERR(ipage)) {
124  		folio_unlock(folio);
125  		return PTR_ERR(ipage);
126  	}
127  
128  	if (!f2fs_has_inline_data(inode)) {
129  		f2fs_put_page(ipage, 1);
130  		return -EAGAIN;
131  	}
132  
133  	if (folio_index(folio))
134  		folio_zero_segment(folio, 0, folio_size(folio));
135  	else
136  		f2fs_do_read_inline_data(folio, ipage);
137  
138  	if (!folio_test_uptodate(folio))
139  		folio_mark_uptodate(folio);
140  	f2fs_put_page(ipage, 1);
141  	folio_unlock(folio);
142  	return 0;
143  }
144  
f2fs_convert_inline_page(struct dnode_of_data * dn,struct page * page)145  int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
146  {
147  	struct f2fs_io_info fio = {
148  		.sbi = F2FS_I_SB(dn->inode),
149  		.ino = dn->inode->i_ino,
150  		.type = DATA,
151  		.op = REQ_OP_WRITE,
152  		.op_flags = REQ_SYNC | REQ_PRIO,
153  		.page = page,
154  		.encrypted_page = NULL,
155  		.io_type = FS_DATA_IO,
156  	};
157  	struct node_info ni;
158  	int dirty, err;
159  
160  	if (!f2fs_exist_data(dn->inode))
161  		goto clear_out;
162  
163  	err = f2fs_reserve_block(dn, 0);
164  	if (err)
165  		return err;
166  
167  	err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
168  	if (err) {
169  		f2fs_truncate_data_blocks_range(dn, 1);
170  		f2fs_put_dnode(dn);
171  		return err;
172  	}
173  
174  	fio.version = ni.version;
175  
176  	if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
177  		f2fs_put_dnode(dn);
178  		set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
179  		f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
180  			  __func__, dn->inode->i_ino, dn->data_blkaddr);
181  		f2fs_handle_error(fio.sbi, ERROR_INVALID_BLKADDR);
182  		return -EFSCORRUPTED;
183  	}
184  
185  	f2fs_bug_on(F2FS_P_SB(page), folio_test_writeback(page_folio(page)));
186  
187  	f2fs_do_read_inline_data(page_folio(page), dn->inode_page);
188  	set_page_dirty(page);
189  
190  	/* clear dirty state */
191  	dirty = clear_page_dirty_for_io(page);
192  
193  	/* write data page to try to make data consistent */
194  	set_page_writeback(page);
195  	fio.old_blkaddr = dn->data_blkaddr;
196  	set_inode_flag(dn->inode, FI_HOT_DATA);
197  	f2fs_outplace_write_data(dn, &fio);
198  	f2fs_wait_on_page_writeback(page, DATA, true, true);
199  	if (dirty) {
200  		inode_dec_dirty_pages(dn->inode);
201  		f2fs_remove_dirty_inode(dn->inode);
202  	}
203  
204  	/* this converted inline_data should be recovered. */
205  	set_inode_flag(dn->inode, FI_APPEND_WRITE);
206  
207  	/* clear inline data and flag after data writeback */
208  	f2fs_truncate_inline_inode(dn->inode, dn->inode_page, 0);
209  	clear_page_private_inline(dn->inode_page);
210  clear_out:
211  	stat_dec_inline_inode(dn->inode);
212  	clear_inode_flag(dn->inode, FI_INLINE_DATA);
213  	f2fs_put_dnode(dn);
214  	return 0;
215  }
216  
f2fs_convert_inline_inode(struct inode * inode)217  int f2fs_convert_inline_inode(struct inode *inode)
218  {
219  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
220  	struct dnode_of_data dn;
221  	struct page *ipage, *page;
222  	int err = 0;
223  
224  	if (f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
225  		return -EROFS;
226  
227  	if (!f2fs_has_inline_data(inode))
228  		return 0;
229  
230  	err = f2fs_dquot_initialize(inode);
231  	if (err)
232  		return err;
233  
234  	page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
235  	if (!page)
236  		return -ENOMEM;
237  
238  	f2fs_lock_op(sbi);
239  
240  	ipage = f2fs_get_node_page(sbi, inode->i_ino);
241  	if (IS_ERR(ipage)) {
242  		err = PTR_ERR(ipage);
243  		goto out;
244  	}
245  
246  	set_new_dnode(&dn, inode, ipage, ipage, 0);
247  
248  	if (f2fs_has_inline_data(inode))
249  		err = f2fs_convert_inline_page(&dn, page);
250  
251  	f2fs_put_dnode(&dn);
252  out:
253  	f2fs_unlock_op(sbi);
254  
255  	f2fs_put_page(page, 1);
256  
257  	if (!err)
258  		f2fs_balance_fs(sbi, dn.node_changed);
259  
260  	return err;
261  }
262  
f2fs_write_inline_data(struct inode * inode,struct folio * folio)263  int f2fs_write_inline_data(struct inode *inode, struct folio *folio)
264  {
265  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
266  	struct page *ipage;
267  
268  	ipage = f2fs_get_node_page(sbi, inode->i_ino);
269  	if (IS_ERR(ipage))
270  		return PTR_ERR(ipage);
271  
272  	if (!f2fs_has_inline_data(inode)) {
273  		f2fs_put_page(ipage, 1);
274  		return -EAGAIN;
275  	}
276  
277  	f2fs_bug_on(F2FS_I_SB(inode), folio->index);
278  
279  	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
280  	memcpy_from_folio(inline_data_addr(inode, ipage),
281  			 folio, 0, MAX_INLINE_DATA(inode));
282  	set_page_dirty(ipage);
283  
284  	f2fs_clear_page_cache_dirty_tag(folio);
285  
286  	set_inode_flag(inode, FI_APPEND_WRITE);
287  	set_inode_flag(inode, FI_DATA_EXIST);
288  
289  	clear_page_private_inline(ipage);
290  	f2fs_put_page(ipage, 1);
291  	return 0;
292  }
293  
f2fs_recover_inline_data(struct inode * inode,struct page * npage)294  int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
295  {
296  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
297  	struct f2fs_inode *ri = NULL;
298  	void *src_addr, *dst_addr;
299  	struct page *ipage;
300  
301  	/*
302  	 * The inline_data recovery policy is as follows.
303  	 * [prev.] [next] of inline_data flag
304  	 *    o       o  -> recover inline_data
305  	 *    o       x  -> remove inline_data, and then recover data blocks
306  	 *    x       o  -> remove data blocks, and then recover inline_data
307  	 *    x       x  -> recover data blocks
308  	 */
309  	if (IS_INODE(npage))
310  		ri = F2FS_INODE(npage);
311  
312  	if (f2fs_has_inline_data(inode) &&
313  			ri && (ri->i_inline & F2FS_INLINE_DATA)) {
314  process_inline:
315  		ipage = f2fs_get_node_page(sbi, inode->i_ino);
316  		if (IS_ERR(ipage))
317  			return PTR_ERR(ipage);
318  
319  		f2fs_wait_on_page_writeback(ipage, NODE, true, true);
320  
321  		src_addr = inline_data_addr(inode, npage);
322  		dst_addr = inline_data_addr(inode, ipage);
323  		memcpy(dst_addr, src_addr, MAX_INLINE_DATA(inode));
324  
325  		set_inode_flag(inode, FI_INLINE_DATA);
326  		set_inode_flag(inode, FI_DATA_EXIST);
327  
328  		set_page_dirty(ipage);
329  		f2fs_put_page(ipage, 1);
330  		return 1;
331  	}
332  
333  	if (f2fs_has_inline_data(inode)) {
334  		ipage = f2fs_get_node_page(sbi, inode->i_ino);
335  		if (IS_ERR(ipage))
336  			return PTR_ERR(ipage);
337  		f2fs_truncate_inline_inode(inode, ipage, 0);
338  		stat_dec_inline_inode(inode);
339  		clear_inode_flag(inode, FI_INLINE_DATA);
340  		f2fs_put_page(ipage, 1);
341  	} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
342  		int ret;
343  
344  		ret = f2fs_truncate_blocks(inode, 0, false);
345  		if (ret)
346  			return ret;
347  		stat_inc_inline_inode(inode);
348  		goto process_inline;
349  	}
350  	return 0;
351  }
352  
f2fs_find_in_inline_dir(struct inode * dir,const struct f2fs_filename * fname,struct page ** res_page)353  struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
354  					const struct f2fs_filename *fname,
355  					struct page **res_page)
356  {
357  	struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
358  	struct f2fs_dir_entry *de;
359  	struct f2fs_dentry_ptr d;
360  	struct page *ipage;
361  	void *inline_dentry;
362  
363  	ipage = f2fs_get_node_page(sbi, dir->i_ino);
364  	if (IS_ERR(ipage)) {
365  		*res_page = ipage;
366  		return NULL;
367  	}
368  
369  	inline_dentry = inline_data_addr(dir, ipage);
370  
371  	make_dentry_ptr_inline(dir, &d, inline_dentry);
372  	de = f2fs_find_target_dentry(&d, fname, NULL);
373  	unlock_page(ipage);
374  	if (IS_ERR(de)) {
375  		*res_page = ERR_CAST(de);
376  		de = NULL;
377  	}
378  	if (de)
379  		*res_page = ipage;
380  	else
381  		f2fs_put_page(ipage, 0);
382  
383  	return de;
384  }
385  
f2fs_make_empty_inline_dir(struct inode * inode,struct inode * parent,struct page * ipage)386  int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
387  							struct page *ipage)
388  {
389  	struct f2fs_dentry_ptr d;
390  	void *inline_dentry;
391  
392  	inline_dentry = inline_data_addr(inode, ipage);
393  
394  	make_dentry_ptr_inline(inode, &d, inline_dentry);
395  	f2fs_do_make_empty_dir(inode, parent, &d);
396  
397  	set_page_dirty(ipage);
398  
399  	/* update i_size to MAX_INLINE_DATA */
400  	if (i_size_read(inode) < MAX_INLINE_DATA(inode))
401  		f2fs_i_size_write(inode, MAX_INLINE_DATA(inode));
402  	return 0;
403  }
404  
405  /*
406   * NOTE: ipage is grabbed by caller, but if any error occurs, we should
407   * release ipage in this function.
408   */
f2fs_move_inline_dirents(struct inode * dir,struct page * ipage,void * inline_dentry)409  static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
410  							void *inline_dentry)
411  {
412  	struct page *page;
413  	struct dnode_of_data dn;
414  	struct f2fs_dentry_block *dentry_blk;
415  	struct f2fs_dentry_ptr src, dst;
416  	int err;
417  
418  	page = f2fs_grab_cache_page(dir->i_mapping, 0, true);
419  	if (!page) {
420  		f2fs_put_page(ipage, 1);
421  		return -ENOMEM;
422  	}
423  
424  	set_new_dnode(&dn, dir, ipage, NULL, 0);
425  	err = f2fs_reserve_block(&dn, 0);
426  	if (err)
427  		goto out;
428  
429  	if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
430  		f2fs_put_dnode(&dn);
431  		set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
432  		f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
433  			  __func__, dir->i_ino, dn.data_blkaddr);
434  		f2fs_handle_error(F2FS_P_SB(page), ERROR_INVALID_BLKADDR);
435  		err = -EFSCORRUPTED;
436  		goto out;
437  	}
438  
439  	f2fs_wait_on_page_writeback(page, DATA, true, true);
440  
441  	dentry_blk = page_address(page);
442  
443  	/*
444  	 * Start by zeroing the full block, to ensure that all unused space is
445  	 * zeroed and no uninitialized memory is leaked to disk.
446  	 */
447  	memset(dentry_blk, 0, F2FS_BLKSIZE);
448  
449  	make_dentry_ptr_inline(dir, &src, inline_dentry);
450  	make_dentry_ptr_block(dir, &dst, dentry_blk);
451  
452  	/* copy data from inline dentry block to new dentry block */
453  	memcpy(dst.bitmap, src.bitmap, src.nr_bitmap);
454  	memcpy(dst.dentry, src.dentry, SIZE_OF_DIR_ENTRY * src.max);
455  	memcpy(dst.filename, src.filename, src.max * F2FS_SLOT_LEN);
456  
457  	if (!PageUptodate(page))
458  		SetPageUptodate(page);
459  	set_page_dirty(page);
460  
461  	/* clear inline dir and flag after data writeback */
462  	f2fs_truncate_inline_inode(dir, ipage, 0);
463  
464  	stat_dec_inline_dir(dir);
465  	clear_inode_flag(dir, FI_INLINE_DENTRY);
466  
467  	/*
468  	 * should retrieve reserved space which was used to keep
469  	 * inline_dentry's structure for backward compatibility.
470  	 */
471  	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
472  			!f2fs_has_inline_xattr(dir))
473  		F2FS_I(dir)->i_inline_xattr_size = 0;
474  
475  	f2fs_i_depth_write(dir, 1);
476  	if (i_size_read(dir) < PAGE_SIZE)
477  		f2fs_i_size_write(dir, PAGE_SIZE);
478  out:
479  	f2fs_put_page(page, 1);
480  	return err;
481  }
482  
f2fs_add_inline_entries(struct inode * dir,void * inline_dentry)483  static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
484  {
485  	struct f2fs_dentry_ptr d;
486  	unsigned long bit_pos = 0;
487  	int err = 0;
488  
489  	make_dentry_ptr_inline(dir, &d, inline_dentry);
490  
491  	while (bit_pos < d.max) {
492  		struct f2fs_dir_entry *de;
493  		struct f2fs_filename fname;
494  		nid_t ino;
495  		umode_t fake_mode;
496  
497  		if (!test_bit_le(bit_pos, d.bitmap)) {
498  			bit_pos++;
499  			continue;
500  		}
501  
502  		de = &d.dentry[bit_pos];
503  
504  		if (unlikely(!de->name_len)) {
505  			bit_pos++;
506  			continue;
507  		}
508  
509  		/*
510  		 * We only need the disk_name and hash to move the dentry.
511  		 * We don't need the original or casefolded filenames.
512  		 */
513  		memset(&fname, 0, sizeof(fname));
514  		fname.disk_name.name = d.filename[bit_pos];
515  		fname.disk_name.len = le16_to_cpu(de->name_len);
516  		fname.hash = de->hash_code;
517  
518  		ino = le32_to_cpu(de->ino);
519  		fake_mode = fs_ftype_to_dtype(de->file_type) << S_DT_SHIFT;
520  
521  		err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode);
522  		if (err)
523  			goto punch_dentry_pages;
524  
525  		bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
526  	}
527  	return 0;
528  punch_dentry_pages:
529  	truncate_inode_pages(&dir->i_data, 0);
530  	f2fs_truncate_blocks(dir, 0, false);
531  	f2fs_remove_dirty_inode(dir);
532  	return err;
533  }
534  
f2fs_move_rehashed_dirents(struct inode * dir,struct page * ipage,void * inline_dentry)535  static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
536  							void *inline_dentry)
537  {
538  	void *backup_dentry;
539  	int err;
540  
541  	backup_dentry = f2fs_kmalloc(F2FS_I_SB(dir),
542  				MAX_INLINE_DATA(dir), GFP_F2FS_ZERO);
543  	if (!backup_dentry) {
544  		f2fs_put_page(ipage, 1);
545  		return -ENOMEM;
546  	}
547  
548  	memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA(dir));
549  	f2fs_truncate_inline_inode(dir, ipage, 0);
550  
551  	unlock_page(ipage);
552  
553  	err = f2fs_add_inline_entries(dir, backup_dentry);
554  	if (err)
555  		goto recover;
556  
557  	lock_page(ipage);
558  
559  	stat_dec_inline_dir(dir);
560  	clear_inode_flag(dir, FI_INLINE_DENTRY);
561  
562  	/*
563  	 * should retrieve reserved space which was used to keep
564  	 * inline_dentry's structure for backward compatibility.
565  	 */
566  	if (!f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(dir)) &&
567  			!f2fs_has_inline_xattr(dir))
568  		F2FS_I(dir)->i_inline_xattr_size = 0;
569  
570  	kfree(backup_dentry);
571  	return 0;
572  recover:
573  	lock_page(ipage);
574  	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
575  	memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
576  	f2fs_i_depth_write(dir, 0);
577  	f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
578  	set_page_dirty(ipage);
579  	f2fs_put_page(ipage, 1);
580  
581  	kfree(backup_dentry);
582  	return err;
583  }
584  
do_convert_inline_dir(struct inode * dir,struct page * ipage,void * inline_dentry)585  static int do_convert_inline_dir(struct inode *dir, struct page *ipage,
586  							void *inline_dentry)
587  {
588  	if (!F2FS_I(dir)->i_dir_level)
589  		return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
590  	else
591  		return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
592  }
593  
f2fs_try_convert_inline_dir(struct inode * dir,struct dentry * dentry)594  int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
595  {
596  	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
597  	struct page *ipage;
598  	struct f2fs_filename fname;
599  	void *inline_dentry = NULL;
600  	int err = 0;
601  
602  	if (!f2fs_has_inline_dentry(dir))
603  		return 0;
604  
605  	f2fs_lock_op(sbi);
606  
607  	err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname);
608  	if (err)
609  		goto out;
610  
611  	ipage = f2fs_get_node_page(sbi, dir->i_ino);
612  	if (IS_ERR(ipage)) {
613  		err = PTR_ERR(ipage);
614  		goto out_fname;
615  	}
616  
617  	if (f2fs_has_enough_room(dir, ipage, &fname)) {
618  		f2fs_put_page(ipage, 1);
619  		goto out_fname;
620  	}
621  
622  	inline_dentry = inline_data_addr(dir, ipage);
623  
624  	err = do_convert_inline_dir(dir, ipage, inline_dentry);
625  	if (!err)
626  		f2fs_put_page(ipage, 1);
627  out_fname:
628  	f2fs_free_filename(&fname);
629  out:
630  	f2fs_unlock_op(sbi);
631  	return err;
632  }
633  
f2fs_add_inline_entry(struct inode * dir,const struct f2fs_filename * fname,struct inode * inode,nid_t ino,umode_t mode)634  int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
635  			  struct inode *inode, nid_t ino, umode_t mode)
636  {
637  	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
638  	struct page *ipage;
639  	unsigned int bit_pos;
640  	void *inline_dentry = NULL;
641  	struct f2fs_dentry_ptr d;
642  	int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
643  	struct page *page = NULL;
644  	int err = 0;
645  
646  	ipage = f2fs_get_node_page(sbi, dir->i_ino);
647  	if (IS_ERR(ipage))
648  		return PTR_ERR(ipage);
649  
650  	inline_dentry = inline_data_addr(dir, ipage);
651  	make_dentry_ptr_inline(dir, &d, inline_dentry);
652  
653  	bit_pos = f2fs_room_for_filename(d.bitmap, slots, d.max);
654  	if (bit_pos >= d.max) {
655  		err = do_convert_inline_dir(dir, ipage, inline_dentry);
656  		if (err)
657  			return err;
658  		err = -EAGAIN;
659  		goto out;
660  	}
661  
662  	if (inode) {
663  		f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
664  						SINGLE_DEPTH_NESTING);
665  		page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
666  		if (IS_ERR(page)) {
667  			err = PTR_ERR(page);
668  			goto fail;
669  		}
670  	}
671  
672  	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
673  
674  	f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
675  			   bit_pos);
676  
677  	set_page_dirty(ipage);
678  
679  	/* we don't need to mark_inode_dirty now */
680  	if (inode) {
681  		f2fs_i_pino_write(inode, dir->i_ino);
682  
683  		/* synchronize inode page's data from inode cache */
684  		if (is_inode_flag_set(inode, FI_NEW_INODE))
685  			f2fs_update_inode(inode, page);
686  
687  		f2fs_put_page(page, 1);
688  	}
689  
690  	f2fs_update_parent_metadata(dir, inode, 0);
691  fail:
692  	if (inode)
693  		f2fs_up_write(&F2FS_I(inode)->i_sem);
694  out:
695  	f2fs_put_page(ipage, 1);
696  	return err;
697  }
698  
f2fs_delete_inline_entry(struct f2fs_dir_entry * dentry,struct page * page,struct inode * dir,struct inode * inode)699  void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
700  					struct inode *dir, struct inode *inode)
701  {
702  	struct f2fs_dentry_ptr d;
703  	void *inline_dentry;
704  	int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len));
705  	unsigned int bit_pos;
706  	int i;
707  
708  	lock_page(page);
709  	f2fs_wait_on_page_writeback(page, NODE, true, true);
710  
711  	inline_dentry = inline_data_addr(dir, page);
712  	make_dentry_ptr_inline(dir, &d, inline_dentry);
713  
714  	bit_pos = dentry - d.dentry;
715  	for (i = 0; i < slots; i++)
716  		__clear_bit_le(bit_pos + i, d.bitmap);
717  
718  	set_page_dirty(page);
719  	f2fs_put_page(page, 1);
720  
721  	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
722  	f2fs_mark_inode_dirty_sync(dir, false);
723  
724  	if (inode)
725  		f2fs_drop_nlink(dir, inode);
726  }
727  
f2fs_empty_inline_dir(struct inode * dir)728  bool f2fs_empty_inline_dir(struct inode *dir)
729  {
730  	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
731  	struct page *ipage;
732  	unsigned int bit_pos = 2;
733  	void *inline_dentry;
734  	struct f2fs_dentry_ptr d;
735  
736  	ipage = f2fs_get_node_page(sbi, dir->i_ino);
737  	if (IS_ERR(ipage))
738  		return false;
739  
740  	inline_dentry = inline_data_addr(dir, ipage);
741  	make_dentry_ptr_inline(dir, &d, inline_dentry);
742  
743  	bit_pos = find_next_bit_le(d.bitmap, d.max, bit_pos);
744  
745  	f2fs_put_page(ipage, 1);
746  
747  	if (bit_pos < d.max)
748  		return false;
749  
750  	return true;
751  }
752  
f2fs_read_inline_dir(struct file * file,struct dir_context * ctx,struct fscrypt_str * fstr)753  int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
754  				struct fscrypt_str *fstr)
755  {
756  	struct inode *inode = file_inode(file);
757  	struct page *ipage = NULL;
758  	struct f2fs_dentry_ptr d;
759  	void *inline_dentry = NULL;
760  	int err;
761  
762  	make_dentry_ptr_inline(inode, &d, inline_dentry);
763  
764  	if (ctx->pos == d.max)
765  		return 0;
766  
767  	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
768  	if (IS_ERR(ipage))
769  		return PTR_ERR(ipage);
770  
771  	/*
772  	 * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
773  	 * ipage without page's lock held.
774  	 */
775  	unlock_page(ipage);
776  
777  	inline_dentry = inline_data_addr(inode, ipage);
778  
779  	make_dentry_ptr_inline(inode, &d, inline_dentry);
780  
781  	err = f2fs_fill_dentries(ctx, &d, 0, fstr);
782  	if (!err)
783  		ctx->pos = d.max;
784  
785  	f2fs_put_page(ipage, 0);
786  	return err < 0 ? err : 0;
787  }
788  
f2fs_inline_data_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)789  int f2fs_inline_data_fiemap(struct inode *inode,
790  		struct fiemap_extent_info *fieinfo, __u64 start, __u64 len)
791  {
792  	__u64 byteaddr, ilen;
793  	__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
794  		FIEMAP_EXTENT_LAST;
795  	struct node_info ni;
796  	struct page *ipage;
797  	int err = 0;
798  
799  	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
800  	if (IS_ERR(ipage))
801  		return PTR_ERR(ipage);
802  
803  	if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
804  				!f2fs_has_inline_data(inode)) {
805  		err = -EAGAIN;
806  		goto out;
807  	}
808  
809  	if (S_ISDIR(inode->i_mode) && !f2fs_has_inline_dentry(inode)) {
810  		err = -EAGAIN;
811  		goto out;
812  	}
813  
814  	ilen = min_t(size_t, MAX_INLINE_DATA(inode), i_size_read(inode));
815  	if (start >= ilen)
816  		goto out;
817  	if (start + len < ilen)
818  		ilen = start + len;
819  	ilen -= start;
820  
821  	err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false);
822  	if (err)
823  		goto out;
824  
825  	byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
826  	byteaddr += (char *)inline_data_addr(inode, ipage) -
827  					(char *)F2FS_INODE(ipage);
828  	err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags);
829  	trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err);
830  out:
831  	f2fs_put_page(ipage, 1);
832  	return err;
833  }
834