Lines Matching +full:page +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
23 #define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { in f2fs_check_nid_range()
37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", in f2fs_check_nid_range()
40 return -EFSCORRUPTED; in f2fs_check_nid_range()
48 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; in f2fs_available_free_memory()
60 avail_ram = val.totalram - val.totalhigh; in f2fs_available_free_memory()
66 mem_size = (nm_i->nid_cnt[FREE_NID] * in f2fs_available_free_memory()
68 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); in f2fs_available_free_memory()
70 mem_size = (nm_i->nat_cnt[TOTAL_NAT] * in f2fs_available_free_memory()
72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); in f2fs_available_free_memory()
76 if (sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory()
79 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); in f2fs_available_free_memory()
84 mem_size += sbi->im[i].ino_num * in f2fs_available_free_memory()
87 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); in f2fs_available_free_memory()
91 struct extent_tree_info *eti = &sbi->extent_tree[etype]; in f2fs_available_free_memory()
93 mem_size = (atomic_read(&eti->total_ext_tree) * in f2fs_available_free_memory()
95 atomic_read(&eti->total_ext_node) * in f2fs_available_free_memory()
97 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); in f2fs_available_free_memory()
99 mem_size = (atomic_read(&dcc->discard_cmd_cnt) * in f2fs_available_free_memory()
101 res = mem_size < (avail_ram * nm_i->ram_thresh / 100); in f2fs_available_free_memory()
107 * free memory is lower than watermark or cached page count in f2fs_available_free_memory()
108 * exceed threshold, deny caching compress page. in f2fs_available_free_memory()
110 res = (free_ram > avail_ram * sbi->compress_watermark / 100) && in f2fs_available_free_memory()
111 (COMPRESS_MAPPING(sbi)->nrpages < in f2fs_available_free_memory()
112 free_ram * sbi->compress_percent / 100); in f2fs_available_free_memory()
117 if (!sbi->sb->s_bdi->wb.dirty_exceeded) in f2fs_available_free_memory()
123 static void clear_node_page_dirty(struct page *page) in clear_node_page_dirty() argument
125 if (PageDirty(page)) { in clear_node_page_dirty()
126 f2fs_clear_page_cache_dirty_tag(page_folio(page)); in clear_node_page_dirty()
127 clear_page_dirty_for_io(page); in clear_node_page_dirty()
128 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); in clear_node_page_dirty()
130 ClearPageUptodate(page); in clear_node_page_dirty()
133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page()
138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_page()
140 struct page *src_page; in get_next_nat_page()
141 struct page *dst_page; in get_next_nat_page()
149 /* get current nat block page with lock */ in get_next_nat_page()
191 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); in __init_nat_entry()
192 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) in __init_nat_entry()
196 node_info_from_raw_nat(&ne->ni, raw_ne); in __init_nat_entry()
198 spin_lock(&nm_i->nat_list_lock); in __init_nat_entry()
199 list_add_tail(&ne->list, &nm_i->nat_entries); in __init_nat_entry()
200 spin_unlock(&nm_i->nat_list_lock); in __init_nat_entry()
202 nm_i->nat_cnt[TOTAL_NAT]++; in __init_nat_entry()
203 nm_i->nat_cnt[RECLAIMABLE_NAT]++; in __init_nat_entry()
211 ne = radix_tree_lookup(&nm_i->nat_root, n); in __lookup_nat_cache()
215 spin_lock(&nm_i->nat_list_lock); in __lookup_nat_cache()
216 if (!list_empty(&ne->list)) in __lookup_nat_cache()
217 list_move_tail(&ne->list, &nm_i->nat_entries); in __lookup_nat_cache()
218 spin_unlock(&nm_i->nat_list_lock); in __lookup_nat_cache()
227 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); in __gang_lookup_nat_cache()
232 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); in __del_from_nat_cache()
233 nm_i->nat_cnt[TOTAL_NAT]--; in __del_from_nat_cache()
234 nm_i->nat_cnt[RECLAIMABLE_NAT]--; in __del_from_nat_cache()
241 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); in __grab_nat_entry_set()
244 head = radix_tree_lookup(&nm_i->nat_set_root, set); in __grab_nat_entry_set()
249 INIT_LIST_HEAD(&head->entry_list); in __grab_nat_entry_set()
250 INIT_LIST_HEAD(&head->set_list); in __grab_nat_entry_set()
251 head->set = set; in __grab_nat_entry_set()
252 head->entry_cnt = 0; in __grab_nat_entry_set()
253 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); in __grab_nat_entry_set()
274 head->entry_cnt++; in __set_nat_cache_dirty()
281 nm_i->nat_cnt[DIRTY_NAT]++; in __set_nat_cache_dirty()
282 nm_i->nat_cnt[RECLAIMABLE_NAT]--; in __set_nat_cache_dirty()
285 spin_lock(&nm_i->nat_list_lock); in __set_nat_cache_dirty()
287 list_del_init(&ne->list); in __set_nat_cache_dirty()
289 list_move_tail(&ne->list, &head->entry_list); in __set_nat_cache_dirty()
290 spin_unlock(&nm_i->nat_list_lock); in __set_nat_cache_dirty()
296 spin_lock(&nm_i->nat_list_lock); in __clear_nat_cache_dirty()
297 list_move_tail(&ne->list, &nm_i->nat_entries); in __clear_nat_cache_dirty()
298 spin_unlock(&nm_i->nat_list_lock); in __clear_nat_cache_dirty()
301 set->entry_cnt--; in __clear_nat_cache_dirty()
302 nm_i->nat_cnt[DIRTY_NAT]--; in __clear_nat_cache_dirty()
303 nm_i->nat_cnt[RECLAIMABLE_NAT]++; in __clear_nat_cache_dirty()
309 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, in __gang_lookup_nat_set()
313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) in f2fs_in_warm_node_list() argument
315 return NODE_MAPPING(sbi) == page->mapping && in f2fs_in_warm_node_list()
316 IS_DNODE(page) && is_cold_node(page); in f2fs_in_warm_node_list()
321 spin_lock_init(&sbi->fsync_node_lock); in f2fs_init_fsync_node_info()
322 INIT_LIST_HEAD(&sbi->fsync_node_list); in f2fs_init_fsync_node_info()
323 sbi->fsync_seg_id = 0; in f2fs_init_fsync_node_info()
324 sbi->fsync_node_num = 0; in f2fs_init_fsync_node_info()
328 struct page *page) in f2fs_add_fsync_node_entry() argument
337 get_page(page); in f2fs_add_fsync_node_entry()
338 fn->page = page; in f2fs_add_fsync_node_entry()
339 INIT_LIST_HEAD(&fn->list); in f2fs_add_fsync_node_entry()
341 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_add_fsync_node_entry()
342 list_add_tail(&fn->list, &sbi->fsync_node_list); in f2fs_add_fsync_node_entry()
343 fn->seq_id = sbi->fsync_seg_id++; in f2fs_add_fsync_node_entry()
344 seq_id = fn->seq_id; in f2fs_add_fsync_node_entry()
345 sbi->fsync_node_num++; in f2fs_add_fsync_node_entry()
346 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_add_fsync_node_entry()
351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) in f2fs_del_fsync_node_entry() argument
356 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
357 list_for_each_entry(fn, &sbi->fsync_node_list, list) { in f2fs_del_fsync_node_entry()
358 if (fn->page == page) { in f2fs_del_fsync_node_entry()
359 list_del(&fn->list); in f2fs_del_fsync_node_entry()
360 sbi->fsync_node_num--; in f2fs_del_fsync_node_entry()
361 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
363 put_page(page); in f2fs_del_fsync_node_entry()
367 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_del_fsync_node_entry()
375 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_reset_fsync_node_info()
376 sbi->fsync_seg_id = 0; in f2fs_reset_fsync_node_info()
377 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_reset_fsync_node_info()
386 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_need_dentry_mark()
393 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_need_dentry_mark()
403 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_is_checkpointed_node()
407 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_is_checkpointed_node()
417 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_need_inode_block_update()
423 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_need_inode_block_update()
435 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) in cache_nat_entry()
442 f2fs_down_write(&nm_i->nat_tree_lock); in cache_nat_entry()
447 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || in cache_nat_entry()
449 le32_to_cpu(ne->block_addr) || in cache_nat_entry()
450 nat_get_version(e) != ne->version); in cache_nat_entry()
451 f2fs_up_write(&nm_i->nat_tree_lock); in cache_nat_entry()
461 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); in set_node_addr()
463 f2fs_down_write(&nm_i->nat_tree_lock); in set_node_addr()
464 e = __lookup_nat_cache(nm_i, ni->nid); in set_node_addr()
467 copy_node_info(&e->ni, ni); in set_node_addr()
468 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); in set_node_addr()
475 copy_node_info(&e->ni, ni); in set_node_addr()
476 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); in set_node_addr()
483 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); in set_node_addr()
505 if (ni->nid != ni->ino) in set_node_addr()
506 e = __lookup_nat_cache(nm_i, ni->ino); in set_node_addr()
508 if (fsync_done && ni->nid == ni->ino) in set_node_addr()
512 f2fs_up_write(&nm_i->nat_tree_lock); in set_node_addr()
520 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) in f2fs_try_to_free_nats()
523 spin_lock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
527 if (list_empty(&nm_i->nat_entries)) in f2fs_try_to_free_nats()
530 ne = list_first_entry(&nm_i->nat_entries, in f2fs_try_to_free_nats()
532 list_del(&ne->list); in f2fs_try_to_free_nats()
533 spin_unlock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
536 nr_shrink--; in f2fs_try_to_free_nats()
538 spin_lock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
540 spin_unlock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
542 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_try_to_free_nats()
543 return nr - nr_shrink; in f2fs_try_to_free_nats()
551 struct f2fs_journal *journal = curseg->journal; in f2fs_get_node_info()
554 struct page *page = NULL; in f2fs_get_node_info() local
561 ni->nid = nid; in f2fs_get_node_info()
564 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
567 ni->ino = nat_get_ino(e); in f2fs_get_node_info()
568 ni->blk_addr = nat_get_blkaddr(e); in f2fs_get_node_info()
569 ni->version = nat_get_version(e); in f2fs_get_node_info()
570 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
580 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { in f2fs_get_node_info()
581 down_read(&curseg->journal_rwsem); in f2fs_get_node_info()
582 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || in f2fs_get_node_info()
583 !down_read_trylock(&curseg->journal_rwsem)) { in f2fs_get_node_info()
584 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
593 up_read(&curseg->journal_rwsem); in f2fs_get_node_info()
595 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
599 /* Fill node_info from nat page */ in f2fs_get_node_info()
601 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
603 page = f2fs_get_meta_page(sbi, index); in f2fs_get_node_info()
604 if (IS_ERR(page)) in f2fs_get_node_info()
605 return PTR_ERR(page); in f2fs_get_node_info()
607 nat_blk = (struct f2fs_nat_block *)page_address(page); in f2fs_get_node_info()
608 ne = nat_blk->entries[nid - start_nid]; in f2fs_get_node_info()
610 f2fs_put_page(page, 1); in f2fs_get_node_info()
615 return -EFAULT; in f2fs_get_node_info()
625 static void f2fs_ra_node_pages(struct page *parent, int start, int n) in f2fs_ra_node_pages()
647 const long direct_index = ADDRS_PER_INODE(dn->inode); in f2fs_get_next_page_offset()
648 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); in f2fs_get_next_page_offset()
649 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; in f2fs_get_next_page_offset()
650 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); in f2fs_get_next_page_offset()
651 int cur_level = dn->cur_level; in f2fs_get_next_page_offset()
652 int max_level = dn->max_level; in f2fs_get_next_page_offset()
655 if (!dn->max_level) in f2fs_get_next_page_offset()
658 while (max_level-- > cur_level) in f2fs_get_next_page_offset()
661 switch (dn->max_level) { in f2fs_get_next_page_offset()
672 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); in f2fs_get_next_page_offset()
675 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; in f2fs_get_next_page_offset()
680 * Offset[0] will have raw inode offset.
683 int offset[4], unsigned int noffset[4]) in get_node_path()
696 offset[n] = block; in get_node_path()
699 block -= direct_index; in get_node_path()
701 offset[n++] = NODE_DIR1_BLOCK; in get_node_path()
703 offset[n] = block; in get_node_path()
707 block -= direct_blks; in get_node_path()
709 offset[n++] = NODE_DIR2_BLOCK; in get_node_path()
711 offset[n] = block; in get_node_path()
715 block -= direct_blks; in get_node_path()
717 offset[n++] = NODE_IND1_BLOCK; in get_node_path()
719 offset[n++] = block / direct_blks; in get_node_path()
720 noffset[n] = 4 + offset[n - 1]; in get_node_path()
721 offset[n] = block % direct_blks; in get_node_path()
725 block -= indirect_blks; in get_node_path()
727 offset[n++] = NODE_IND2_BLOCK; in get_node_path()
729 offset[n++] = block / direct_blks; in get_node_path()
730 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; in get_node_path()
731 offset[n] = block % direct_blks; in get_node_path()
735 block -= indirect_blks; in get_node_path()
737 offset[n++] = NODE_DIND_BLOCK; in get_node_path()
739 offset[n++] = block / indirect_blks; in get_node_path()
741 offset[n - 1] * (dptrs_per_blk + 1); in get_node_path()
742 offset[n++] = (block / direct_blks) % dptrs_per_blk; in get_node_path()
744 offset[n - 2] * (dptrs_per_blk + 1) + in get_node_path()
745 offset[n - 1]; in get_node_path()
746 offset[n] = block % direct_blks; in get_node_path()
750 return -E2BIG; in get_node_path()
763 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_get_dnode_of_data()
764 struct page *npage[4]; in f2fs_get_dnode_of_data()
765 struct page *parent = NULL; in f2fs_get_dnode_of_data()
766 int offset[4]; in f2fs_get_dnode_of_data() local
772 level = get_node_path(dn->inode, index, offset, noffset); in f2fs_get_dnode_of_data()
776 nids[0] = dn->inode->i_ino; in f2fs_get_dnode_of_data()
777 npage[0] = dn->inode_page; in f2fs_get_dnode_of_data()
786 if (f2fs_has_inline_data(dn->inode) && index) { in f2fs_get_dnode_of_data()
787 err = -ENOENT; in f2fs_get_dnode_of_data()
794 nids[1] = get_nid(parent, offset[0], true); in f2fs_get_dnode_of_data()
795 dn->inode_page = npage[0]; in f2fs_get_dnode_of_data()
796 dn->inode_page_locked = true; in f2fs_get_dnode_of_data()
805 err = -ENOSPC; in f2fs_get_dnode_of_data()
809 dn->nid = nids[i]; in f2fs_get_dnode_of_data()
817 set_nid(parent, offset[i - 1], nids[i], i == 1); in f2fs_get_dnode_of_data()
821 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); in f2fs_get_dnode_of_data()
829 dn->inode_page_locked = false; in f2fs_get_dnode_of_data()
845 nids[i + 1] = get_nid(parent, offset[i], false); in f2fs_get_dnode_of_data()
848 dn->nid = nids[level]; in f2fs_get_dnode_of_data()
849 dn->ofs_in_node = offset[level]; in f2fs_get_dnode_of_data()
850 dn->node_page = npage[level]; in f2fs_get_dnode_of_data()
851 dn->data_blkaddr = f2fs_data_blkaddr(dn); in f2fs_get_dnode_of_data()
853 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && in f2fs_get_dnode_of_data()
855 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; in f2fs_get_dnode_of_data()
856 unsigned int ofs_in_node = dn->ofs_in_node; in f2fs_get_dnode_of_data()
871 blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node); in f2fs_get_dnode_of_data()
873 blkaddr = data_blkaddr(dn->inode, dn->node_page, in f2fs_get_dnode_of_data()
876 f2fs_update_read_extent_tree_range_compressed(dn->inode, in f2fs_get_dnode_of_data()
887 dn->inode_page = NULL; in f2fs_get_dnode_of_data()
888 dn->node_page = NULL; in f2fs_get_dnode_of_data()
889 if (err == -ENOENT) { in f2fs_get_dnode_of_data()
890 dn->cur_level = i; in f2fs_get_dnode_of_data()
891 dn->max_level = level; in f2fs_get_dnode_of_data()
892 dn->ofs_in_node = offset[level]; in f2fs_get_dnode_of_data()
899 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in truncate_node()
904 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); in truncate_node()
910 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); in truncate_node()
913 if (dn->nid == dn->inode->i_ino) { in truncate_node()
914 f2fs_remove_orphan_inode(sbi, dn->nid); in truncate_node()
916 f2fs_inode_synced(dn->inode); in truncate_node()
919 clear_node_page_dirty(dn->node_page); in truncate_node()
922 index = page_folio(dn->node_page)->index; in truncate_node()
923 f2fs_put_page(dn->node_page, 1); in truncate_node()
928 dn->node_page = NULL; in truncate_node()
929 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); in truncate_node()
936 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in truncate_dnode()
937 struct page *page; in truncate_dnode() local
940 if (dn->nid == 0) in truncate_dnode()
944 page = f2fs_get_node_page(sbi, dn->nid); in truncate_dnode()
945 if (PTR_ERR(page) == -ENOENT) in truncate_dnode()
947 else if (IS_ERR(page)) in truncate_dnode()
948 return PTR_ERR(page); in truncate_dnode()
950 if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) { in truncate_dnode()
952 dn->inode->i_ino, dn->nid, ino_of_node(page)); in truncate_dnode()
955 f2fs_put_page(page, 1); in truncate_dnode()
956 return -EFSCORRUPTED; in truncate_dnode()
960 dn->node_page = page; in truncate_dnode()
961 dn->ofs_in_node = 0; in truncate_dnode()
962 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); in truncate_dnode()
965 f2fs_put_page(page, 1); in truncate_dnode()
976 struct page *page; in truncate_nodes() local
983 if (dn->nid == 0) in truncate_nodes()
986 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); in truncate_nodes()
988 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); in truncate_nodes()
989 if (IS_ERR(page)) { in truncate_nodes()
990 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); in truncate_nodes()
991 return PTR_ERR(page); in truncate_nodes()
994 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); in truncate_nodes()
996 rn = F2FS_NODE(page); in truncate_nodes()
999 child_nid = le32_to_cpu(rn->in.nid[i]); in truncate_nodes()
1006 if (set_nid(page, i, 0, false)) in truncate_nodes()
1007 dn->node_changed = true; in truncate_nodes()
1012 child_nid = le32_to_cpu(rn->in.nid[i]); in truncate_nodes()
1018 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); in truncate_nodes()
1020 if (set_nid(page, i, 0, false)) in truncate_nodes()
1021 dn->node_changed = true; in truncate_nodes()
1023 } else if (ret < 0 && ret != -ENOENT) { in truncate_nodes()
1032 dn->node_page = page; in truncate_nodes()
1038 f2fs_put_page(page, 1); in truncate_nodes()
1040 trace_f2fs_truncate_nodes_exit(dn->inode, freed); in truncate_nodes()
1044 f2fs_put_page(page, 1); in truncate_nodes()
1045 trace_f2fs_truncate_nodes_exit(dn->inode, ret); in truncate_nodes()
1050 struct f2fs_inode *ri, int *offset, int depth) in truncate_partial_nodes() argument
1052 struct page *pages[2]; in truncate_partial_nodes()
1057 int idx = depth - 2; in truncate_partial_nodes()
1059 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); in truncate_partial_nodes()
1066 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); in truncate_partial_nodes()
1069 idx = i - 1; in truncate_partial_nodes()
1072 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); in truncate_partial_nodes()
1075 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); in truncate_partial_nodes()
1078 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { in truncate_partial_nodes()
1082 dn->nid = child_nid; in truncate_partial_nodes()
1087 dn->node_changed = true; in truncate_partial_nodes()
1090 if (offset[idx + 1] == 0) { in truncate_partial_nodes()
1091 dn->node_page = pages[idx]; in truncate_partial_nodes()
1092 dn->nid = nid[idx]; in truncate_partial_nodes()
1099 offset[idx]++; in truncate_partial_nodes()
1100 offset[idx + 1] = 0; in truncate_partial_nodes()
1101 idx--; in truncate_partial_nodes()
1103 for (i = idx; i >= 0; i--) in truncate_partial_nodes()
1106 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); in truncate_partial_nodes()
1118 int level, offset[4], noffset[4]; in f2fs_truncate_inode_blocks() local
1122 struct page *page; in f2fs_truncate_inode_blocks() local
1126 level = get_node_path(inode, from, offset, noffset); in f2fs_truncate_inode_blocks()
1132 page = f2fs_get_node_page(sbi, inode->i_ino); in f2fs_truncate_inode_blocks()
1133 if (IS_ERR(page)) { in f2fs_truncate_inode_blocks()
1134 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); in f2fs_truncate_inode_blocks()
1135 return PTR_ERR(page); in f2fs_truncate_inode_blocks()
1138 set_new_dnode(&dn, inode, page, NULL, 0); in f2fs_truncate_inode_blocks()
1139 unlock_page(page); in f2fs_truncate_inode_blocks()
1141 ri = F2FS_INODE(page); in f2fs_truncate_inode_blocks()
1149 if (!offset[level - 1]) in f2fs_truncate_inode_blocks()
1151 err = truncate_partial_nodes(&dn, ri, offset, level); in f2fs_truncate_inode_blocks()
1152 if (err < 0 && err != -ENOENT) in f2fs_truncate_inode_blocks()
1158 if (!offset[level - 1]) in f2fs_truncate_inode_blocks()
1160 err = truncate_partial_nodes(&dn, ri, offset, level); in f2fs_truncate_inode_blocks()
1161 if (err < 0 && err != -ENOENT) in f2fs_truncate_inode_blocks()
1170 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); in f2fs_truncate_inode_blocks()
1171 switch (offset[0]) { in f2fs_truncate_inode_blocks()
1179 err = truncate_nodes(&dn, nofs, offset[1], 2); in f2fs_truncate_inode_blocks()
1183 err = truncate_nodes(&dn, nofs, offset[1], 3); in f2fs_truncate_inode_blocks()
1190 if (err == -ENOENT) { in f2fs_truncate_inode_blocks()
1191 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); in f2fs_truncate_inode_blocks()
1195 "offset[0]:%d, offset[1]:%d, nofs:%d", in f2fs_truncate_inode_blocks()
1196 inode->i_ino, dn.nid, offset[0], in f2fs_truncate_inode_blocks()
1197 offset[1], nofs); in f2fs_truncate_inode_blocks()
1202 if (offset[1] == 0 && in f2fs_truncate_inode_blocks()
1203 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { in f2fs_truncate_inode_blocks()
1204 lock_page(page); in f2fs_truncate_inode_blocks()
1205 BUG_ON(page->mapping != NODE_MAPPING(sbi)); in f2fs_truncate_inode_blocks()
1206 f2fs_wait_on_page_writeback(page, NODE, true, true); in f2fs_truncate_inode_blocks()
1207 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; in f2fs_truncate_inode_blocks()
1208 set_page_dirty(page); in f2fs_truncate_inode_blocks()
1209 unlock_page(page); in f2fs_truncate_inode_blocks()
1211 offset[1] = 0; in f2fs_truncate_inode_blocks()
1212 offset[0]++; in f2fs_truncate_inode_blocks()
1216 f2fs_put_page(page, 0); in f2fs_truncate_inode_blocks()
1221 /* caller must lock inode page */
1225 nid_t nid = F2FS_I(inode)->i_xattr_nid; in f2fs_truncate_xattr_node()
1227 struct page *npage; in f2fs_truncate_xattr_node()
1258 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); in f2fs_remove_inode_page()
1270 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || in f2fs_remove_inode_page()
1271 S_ISLNK(inode->i_mode)) in f2fs_remove_inode_page()
1277 return -EIO; in f2fs_remove_inode_page()
1280 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { in f2fs_remove_inode_page()
1283 inode->i_ino, (unsigned long long)inode->i_blocks); in f2fs_remove_inode_page()
1296 struct page *f2fs_new_inode_page(struct inode *inode) in f2fs_new_inode_page()
1300 /* allocate inode page for new inode */ in f2fs_new_inode_page()
1301 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); in f2fs_new_inode_page()
1303 /* caller should f2fs_put_page(page, 1); */ in f2fs_new_inode_page()
1307 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) in f2fs_new_node_page()
1309 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_new_node_page()
1311 struct page *page; in f2fs_new_node_page() local
1314 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) in f2fs_new_node_page()
1315 return ERR_PTR(-EPERM); in f2fs_new_node_page()
1317 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); in f2fs_new_node_page()
1318 if (!page) in f2fs_new_node_page()
1319 return ERR_PTR(-ENOMEM); in f2fs_new_node_page()
1321 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) in f2fs_new_node_page()
1325 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); in f2fs_new_node_page()
1327 dec_valid_node_count(sbi, dn->inode, !ofs); in f2fs_new_node_page()
1331 err = -EFSCORRUPTED; in f2fs_new_node_page()
1332 dec_valid_node_count(sbi, dn->inode, !ofs); in f2fs_new_node_page()
1338 new_ni.nid = dn->nid; in f2fs_new_node_page()
1339 new_ni.ino = dn->inode->i_ino; in f2fs_new_node_page()
1345 f2fs_wait_on_page_writeback(page, NODE, true, true); in f2fs_new_node_page()
1346 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); in f2fs_new_node_page()
1347 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); in f2fs_new_node_page()
1348 if (!PageUptodate(page)) in f2fs_new_node_page()
1349 SetPageUptodate(page); in f2fs_new_node_page()
1350 if (set_page_dirty(page)) in f2fs_new_node_page()
1351 dn->node_changed = true; in f2fs_new_node_page()
1354 f2fs_i_xnid_write(dn->inode, dn->nid); in f2fs_new_node_page()
1358 return page; in f2fs_new_node_page()
1360 clear_node_page_dirty(page); in f2fs_new_node_page()
1361 f2fs_put_page(page, 1); in f2fs_new_node_page()
1367 * 0: f2fs_put_page(page, 0)
1368 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1370 static int read_node_page(struct page *page, blk_opf_t op_flags) in read_node_page() argument
1372 struct folio *folio = page_folio(page); in read_node_page()
1373 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in read_node_page()
1380 .page = page, in read_node_page()
1386 if (!f2fs_inode_chksum_verify(sbi, page)) { in read_node_page()
1388 return -EFSBADCRC; in read_node_page()
1393 err = f2fs_get_node_info(sbi, folio->index, &ni, false); in read_node_page()
1400 return -ENOENT; in read_node_page()
1414 * Readahead a node page
1418 struct page *apage; in f2fs_ra_node_page()
1426 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); in f2fs_ra_node_page()
1438 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, in __get_node_page()
1439 struct page *parent, int start) in __get_node_page()
1441 struct page *page; in __get_node_page() local
1445 return ERR_PTR(-ENOENT); in __get_node_page()
1447 return ERR_PTR(-EINVAL); in __get_node_page()
1449 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); in __get_node_page()
1450 if (!page) in __get_node_page()
1451 return ERR_PTR(-ENOMEM); in __get_node_page()
1453 err = read_node_page(page, 0); in __get_node_page()
1464 lock_page(page); in __get_node_page()
1466 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in __get_node_page()
1467 f2fs_put_page(page, 1); in __get_node_page()
1471 if (unlikely(!PageUptodate(page))) { in __get_node_page()
1472 err = -EIO; in __get_node_page()
1476 if (!f2fs_inode_chksum_verify(sbi, page)) { in __get_node_page()
1477 err = -EFSBADCRC; in __get_node_page()
1481 if (likely(nid == nid_of_node(page))) in __get_node_page()
1482 return page; in __get_node_page()
1485 nid, nid_of_node(page), ino_of_node(page), in __get_node_page()
1486 ofs_of_node(page), cpver_of_node(page), in __get_node_page()
1487 next_blkaddr_of_node(page)); in __get_node_page()
1490 err = -EFSCORRUPTED; in __get_node_page()
1492 ClearPageUptodate(page); in __get_node_page()
1495 if (err != -ENOENT) in __get_node_page()
1496 f2fs_handle_page_eio(sbi, page_folio(page), NODE); in __get_node_page()
1497 f2fs_put_page(page, 1); in __get_node_page()
1501 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) in f2fs_get_node_page()
1506 struct page *f2fs_get_node_page_ra(struct page *parent, int start) in f2fs_get_node_page_ra()
1517 struct page *page; in flush_inline_data() local
1521 inode = ilookup(sbi->sb, ino); in flush_inline_data()
1525 page = f2fs_pagecache_get_page(inode->i_mapping, 0, in flush_inline_data()
1527 if (!page) in flush_inline_data()
1530 if (!PageUptodate(page)) in flush_inline_data()
1533 if (!PageDirty(page)) in flush_inline_data()
1536 if (!clear_page_dirty_for_io(page)) in flush_inline_data()
1539 ret = f2fs_write_inline_data(inode, page_folio(page)); in flush_inline_data()
1543 set_page_dirty(page); in flush_inline_data()
1545 f2fs_put_page(page, 1); in flush_inline_data()
1550 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) in last_fsync_dnode()
1554 struct page *last_page = NULL; in last_fsync_dnode()
1561 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, in last_fsync_dnode()
1566 struct page *page = &fbatch.folios[i]->page; in last_fsync_dnode() local
1571 return ERR_PTR(-EIO); in last_fsync_dnode()
1574 if (!IS_DNODE(page) || !is_cold_node(page)) in last_fsync_dnode()
1576 if (ino_of_node(page) != ino) in last_fsync_dnode()
1579 lock_page(page); in last_fsync_dnode()
1581 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in last_fsync_dnode()
1583 unlock_page(page); in last_fsync_dnode()
1586 if (ino_of_node(page) != ino) in last_fsync_dnode()
1589 if (!PageDirty(page)) { in last_fsync_dnode()
1597 get_page(page); in last_fsync_dnode()
1598 last_page = page; in last_fsync_dnode()
1599 unlock_page(page); in last_fsync_dnode()
1607 static int __write_node_page(struct page *page, bool atomic, bool *submitted, in __write_node_page() argument
1611 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in __write_node_page()
1612 struct folio *folio = page_folio(page); in __write_node_page()
1617 .ino = ino_of_node(page), in __write_node_page()
1621 .page = page, in __write_node_page()
1632 /* keep node pages in remount-ro mode */ in __write_node_page()
1645 wbc->sync_mode == WB_SYNC_NONE && in __write_node_page()
1646 IS_DNODE(page) && is_cold_node(page)) in __write_node_page()
1649 /* get old block addr of this node page */ in __write_node_page()
1650 nid = nid_of_node(page); in __write_node_page()
1651 f2fs_bug_on(sbi, folio->index != nid); in __write_node_page()
1656 if (wbc->for_reclaim) { in __write_node_page()
1657 if (!f2fs_down_read_trylock(&sbi->node_write)) in __write_node_page()
1660 f2fs_down_read(&sbi->node_write); in __write_node_page()
1663 /* This page is already truncated */ in __write_node_page()
1667 f2fs_up_read(&sbi->node_write); in __write_node_page()
1675 f2fs_up_read(&sbi->node_write); in __write_node_page()
1683 if (f2fs_in_warm_node_list(sbi, page)) { in __write_node_page()
1684 seq = f2fs_add_fsync_node_entry(sbi, page); in __write_node_page()
1693 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); in __write_node_page()
1695 f2fs_up_read(&sbi->node_write); in __write_node_page()
1697 if (wbc->for_reclaim) { in __write_node_page()
1698 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); in __write_node_page()
1720 int f2fs_move_node_page(struct page *node_page, int gc_type) in f2fs_move_node_page()
1736 err = -EAGAIN; in f2fs_move_node_page()
1742 err = -EAGAIN; in f2fs_move_node_page()
1747 /* set page dirty and write it */ in f2fs_move_node_page()
1758 static int f2fs_write_node_page(struct page *page, in f2fs_write_node_page() argument
1761 return __write_node_page(page, false, NULL, wbc, false, in f2fs_write_node_page()
1772 struct page *last_page = NULL; in f2fs_fsync_node_pages()
1774 nid_t ino = inode->i_ino; in f2fs_fsync_node_pages()
1788 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, in f2fs_fsync_node_pages()
1793 struct page *page = &fbatch.folios[i]->page; in f2fs_fsync_node_pages() local
1799 ret = -EIO; in f2fs_fsync_node_pages()
1803 if (!IS_DNODE(page) || !is_cold_node(page)) in f2fs_fsync_node_pages()
1805 if (ino_of_node(page) != ino) in f2fs_fsync_node_pages()
1808 lock_page(page); in f2fs_fsync_node_pages()
1810 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in f2fs_fsync_node_pages()
1812 unlock_page(page); in f2fs_fsync_node_pages()
1815 if (ino_of_node(page) != ino) in f2fs_fsync_node_pages()
1818 if (!PageDirty(page) && page != last_page) { in f2fs_fsync_node_pages()
1823 f2fs_wait_on_page_writeback(page, NODE, true, true); in f2fs_fsync_node_pages()
1825 set_fsync_mark(page, 0); in f2fs_fsync_node_pages()
1826 set_dentry_mark(page, 0); in f2fs_fsync_node_pages()
1828 if (!atomic || page == last_page) { in f2fs_fsync_node_pages()
1829 set_fsync_mark(page, 1); in f2fs_fsync_node_pages()
1830 percpu_counter_inc(&sbi->rf_node_block_count); in f2fs_fsync_node_pages()
1831 if (IS_INODE(page)) { in f2fs_fsync_node_pages()
1834 f2fs_update_inode(inode, page); in f2fs_fsync_node_pages()
1835 set_dentry_mark(page, in f2fs_fsync_node_pages()
1839 if (!PageDirty(page)) in f2fs_fsync_node_pages()
1840 set_page_dirty(page); in f2fs_fsync_node_pages()
1843 if (!clear_page_dirty_for_io(page)) in f2fs_fsync_node_pages()
1846 ret = __write_node_page(page, atomic && in f2fs_fsync_node_pages()
1847 page == last_page, in f2fs_fsync_node_pages()
1851 unlock_page(page); in f2fs_fsync_node_pages()
1858 if (page == last_page) { in f2fs_fsync_node_pages()
1859 f2fs_put_page(page, 0); in f2fs_fsync_node_pages()
1872 ino, page_folio(last_page)->index); in f2fs_fsync_node_pages()
1882 return ret ? -EIO : 0; in f2fs_fsync_node_pages()
1890 if (inode->i_ino != ino) in f2fs_match_ino()
1896 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_match_ino()
1897 clean = list_empty(&F2FS_I(inode)->gdirty_list); in f2fs_match_ino()
1898 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_match_ino()
1909 static bool flush_dirty_inode(struct page *page) in flush_dirty_inode() argument
1911 struct f2fs_sb_info *sbi = F2FS_P_SB(page); in flush_dirty_inode()
1913 nid_t ino = ino_of_node(page); in flush_dirty_inode()
1915 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); in flush_dirty_inode()
1919 f2fs_update_inode(inode, page); in flush_dirty_inode()
1920 unlock_page(page); in flush_dirty_inode()
1935 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, in f2fs_flush_inline_data()
1940 struct page *page = &fbatch.folios[i]->page; in f2fs_flush_inline_data() local
1942 if (!IS_INODE(page)) in f2fs_flush_inline_data()
1945 lock_page(page); in f2fs_flush_inline_data()
1947 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in f2fs_flush_inline_data()
1949 unlock_page(page); in f2fs_flush_inline_data()
1953 if (!PageDirty(page)) { in f2fs_flush_inline_data()
1959 if (page_private_inline(page)) { in f2fs_flush_inline_data()
1960 clear_page_private_inline(page); in f2fs_flush_inline_data()
1961 unlock_page(page); in f2fs_flush_inline_data()
1962 flush_inline_data(sbi, ino_of_node(page)); in f2fs_flush_inline_data()
1965 unlock_page(page); in f2fs_flush_inline_data()
1989 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, in f2fs_sync_node_pages()
1994 struct page *page = &fbatch.folios[i]->page; in f2fs_sync_node_pages() local
1998 if (atomic_read(&sbi->wb_sync_req[NODE]) && in f2fs_sync_node_pages()
1999 wbc->sync_mode == WB_SYNC_NONE) { in f2fs_sync_node_pages()
2010 if (step == 0 && IS_DNODE(page)) in f2fs_sync_node_pages()
2012 if (step == 1 && (!IS_DNODE(page) || in f2fs_sync_node_pages()
2013 is_cold_node(page))) in f2fs_sync_node_pages()
2015 if (step == 2 && (!IS_DNODE(page) || in f2fs_sync_node_pages()
2016 !is_cold_node(page))) in f2fs_sync_node_pages()
2019 if (wbc->sync_mode == WB_SYNC_ALL) in f2fs_sync_node_pages()
2020 lock_page(page); in f2fs_sync_node_pages()
2021 else if (!trylock_page(page)) in f2fs_sync_node_pages()
2024 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { in f2fs_sync_node_pages()
2026 unlock_page(page); in f2fs_sync_node_pages()
2030 if (!PageDirty(page)) { in f2fs_sync_node_pages()
2040 if (page_private_inline(page)) { in f2fs_sync_node_pages()
2041 clear_page_private_inline(page); in f2fs_sync_node_pages()
2042 unlock_page(page); in f2fs_sync_node_pages()
2043 flush_inline_data(sbi, ino_of_node(page)); in f2fs_sync_node_pages()
2048 if (IS_INODE(page) && flush_dirty_inode(page)) in f2fs_sync_node_pages()
2051 f2fs_wait_on_page_writeback(page, NODE, true, true); in f2fs_sync_node_pages()
2053 if (!clear_page_dirty_for_io(page)) in f2fs_sync_node_pages()
2056 set_fsync_mark(page, 0); in f2fs_sync_node_pages()
2057 set_dentry_mark(page, 0); in f2fs_sync_node_pages()
2059 ret = __write_node_page(page, false, &submitted, in f2fs_sync_node_pages()
2062 unlock_page(page); in f2fs_sync_node_pages()
2066 if (--wbc->nr_to_write == 0) in f2fs_sync_node_pages()
2072 if (wbc->nr_to_write == 0) { in f2fs_sync_node_pages()
2080 wbc->sync_mode == WB_SYNC_NONE && step == 1) in f2fs_sync_node_pages()
2090 return -EIO; in f2fs_sync_node_pages()
2098 struct page *page; in f2fs_wait_on_node_pages_writeback() local
2099 struct list_head *head = &sbi->fsync_node_list; in f2fs_wait_on_node_pages_writeback()
2104 spin_lock_irqsave(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2106 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2110 if (fn->seq_id > seq_id) { in f2fs_wait_on_node_pages_writeback()
2111 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2114 cur_seq_id = fn->seq_id; in f2fs_wait_on_node_pages_writeback()
2115 page = fn->page; in f2fs_wait_on_node_pages_writeback()
2116 get_page(page); in f2fs_wait_on_node_pages_writeback()
2117 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); in f2fs_wait_on_node_pages_writeback()
2119 f2fs_wait_on_page_writeback(page, NODE, true, false); in f2fs_wait_on_node_pages_writeback()
2121 put_page(page); in f2fs_wait_on_node_pages_writeback()
2141 if (wbc->sync_mode != WB_SYNC_ALL && in f2fs_write_node_pages()
2146 if (wbc->sync_mode == WB_SYNC_ALL) in f2fs_write_node_pages()
2147 atomic_inc(&sbi->wb_sync_req[NODE]); in f2fs_write_node_pages()
2148 else if (atomic_read(&sbi->wb_sync_req[NODE])) { in f2fs_write_node_pages()
2150 if (current->plug) in f2fs_write_node_pages()
2151 blk_finish_plug(current->plug); in f2fs_write_node_pages()
2155 trace_f2fs_writepages(mapping->host, wbc, NODE); in f2fs_write_node_pages()
2161 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); in f2fs_write_node_pages()
2163 if (wbc->sync_mode == WB_SYNC_ALL) in f2fs_write_node_pages()
2164 atomic_dec(&sbi->wb_sync_req[NODE]); in f2fs_write_node_pages()
2168 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); in f2fs_write_node_pages()
2169 trace_f2fs_writepages(mapping->host, wbc, NODE); in f2fs_write_node_pages()
2181 if (IS_INODE(&folio->page)) in f2fs_dirty_node_folio()
2182 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page); in f2fs_dirty_node_folio()
2186 set_page_private_reference(&folio->page); in f2fs_dirty_node_folio()
2207 return radix_tree_lookup(&nm_i->free_nid_root, n); in __lookup_free_nid_list()
2214 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); in __insert_free_nid()
2219 nm_i->nid_cnt[FREE_NID]++; in __insert_free_nid()
2220 list_add_tail(&i->list, &nm_i->free_nid_list); in __insert_free_nid()
2229 f2fs_bug_on(sbi, state != i->state); in __remove_free_nid()
2230 nm_i->nid_cnt[state]--; in __remove_free_nid()
2232 list_del(&i->list); in __remove_free_nid()
2233 radix_tree_delete(&nm_i->free_nid_root, i->nid); in __remove_free_nid()
2241 f2fs_bug_on(sbi, org_state != i->state); in __move_free_nid()
2242 i->state = dst_state; in __move_free_nid()
2243 nm_i->nid_cnt[org_state]--; in __move_free_nid()
2244 nm_i->nid_cnt[dst_state]++; in __move_free_nid()
2248 list_del(&i->list); in __move_free_nid()
2251 list_add_tail(&i->list, &nm_i->free_nid_list); in __move_free_nid()
2264 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_nat_bitmap_enabled()
2265 for (i = 0; i < nm_i->nat_blocks; i++) { in f2fs_nat_bitmap_enabled()
2266 if (!test_bit_le(i, nm_i->nat_block_bitmap)) { in f2fs_nat_bitmap_enabled()
2271 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_nat_bitmap_enabled()
2281 unsigned int nid_ofs = nid - START_NID(nid); in update_free_nid_bitmap()
2283 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) in update_free_nid_bitmap()
2287 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) in update_free_nid_bitmap()
2289 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); in update_free_nid_bitmap()
2290 nm_i->free_nid_count[nat_ofs]++; in update_free_nid_bitmap()
2292 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) in update_free_nid_bitmap()
2294 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); in update_free_nid_bitmap()
2296 nm_i->free_nid_count[nat_ofs]--; in update_free_nid_bitmap()
2307 int err = -EINVAL; in add_free_nid()
2318 i->nid = nid; in add_free_nid()
2319 i->state = FREE_NID; in add_free_nid()
2323 spin_lock(&nm_i->nid_list_lock); in add_free_nid()
2328 * - f2fs_create in add_free_nid()
2329 * - f2fs_new_inode in add_free_nid()
2330 * - f2fs_alloc_nid in add_free_nid()
2331 * - __insert_nid_to_list(PREALLOC_NID) in add_free_nid()
2332 * - f2fs_balance_fs_bg in add_free_nid()
2333 * - f2fs_build_free_nids in add_free_nid()
2334 * - __f2fs_build_free_nids in add_free_nid()
2335 * - scan_nat_page in add_free_nid()
2336 * - add_free_nid in add_free_nid()
2337 * - __lookup_nat_cache in add_free_nid()
2338 * - f2fs_add_link in add_free_nid()
2339 * - f2fs_init_inode_metadata in add_free_nid()
2340 * - f2fs_new_inode_page in add_free_nid()
2341 * - f2fs_new_node_page in add_free_nid()
2342 * - set_node_addr in add_free_nid()
2343 * - f2fs_alloc_nid_done in add_free_nid()
2344 * - __remove_nid_from_list(PREALLOC_NID) in add_free_nid()
2345 * - __insert_nid_to_list(FREE_NID) in add_free_nid()
2354 if (e->state == FREE_NID) in add_free_nid()
2365 nm_i->available_nids++; in add_free_nid()
2367 spin_unlock(&nm_i->nid_list_lock); in add_free_nid()
2381 spin_lock(&nm_i->nid_list_lock); in remove_free_nid()
2383 if (i && i->state == FREE_NID) { in remove_free_nid()
2387 spin_unlock(&nm_i->nid_list_lock); in remove_free_nid()
2394 struct page *nat_page, nid_t start_nid) in scan_nat_page()
2402 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); in scan_nat_page()
2407 if (unlikely(start_nid >= nm_i->max_nid)) in scan_nat_page()
2410 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); in scan_nat_page()
2413 return -EFSCORRUPTED; in scan_nat_page()
2418 spin_lock(&NM_I(sbi)->nid_list_lock); in scan_nat_page()
2420 spin_unlock(&NM_I(sbi)->nid_list_lock); in scan_nat_page()
2430 struct f2fs_journal *journal = curseg->journal; in scan_curseg_cache()
2433 down_read(&curseg->journal_rwsem); in scan_curseg_cache()
2445 up_read(&curseg->journal_rwsem); in scan_curseg_cache()
2454 f2fs_down_read(&nm_i->nat_tree_lock); in scan_free_nid_bits()
2456 for (i = 0; i < nm_i->nat_blocks; i++) { in scan_free_nid_bits()
2457 if (!test_bit_le(i, nm_i->nat_block_bitmap)) in scan_free_nid_bits()
2459 if (!nm_i->free_nid_count[i]) in scan_free_nid_bits()
2462 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], in scan_free_nid_bits()
2470 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) in scan_free_nid_bits()
2477 f2fs_up_read(&nm_i->nat_tree_lock); in scan_free_nid_bits()
2485 nid_t nid = nm_i->next_scan_nid; in __f2fs_build_free_nids()
2487 if (unlikely(nid >= nm_i->max_nid)) in __f2fs_build_free_nids()
2494 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) in __f2fs_build_free_nids()
2504 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) in __f2fs_build_free_nids()
2512 f2fs_down_read(&nm_i->nat_tree_lock); in __f2fs_build_free_nids()
2516 nm_i->nat_block_bitmap)) { in __f2fs_build_free_nids()
2517 struct page *page = get_current_nat_page(sbi, nid); in __f2fs_build_free_nids() local
2519 if (IS_ERR(page)) { in __f2fs_build_free_nids()
2520 ret = PTR_ERR(page); in __f2fs_build_free_nids()
2522 ret = scan_nat_page(sbi, page, nid); in __f2fs_build_free_nids()
2523 f2fs_put_page(page, 1); in __f2fs_build_free_nids()
2527 f2fs_up_read(&nm_i->nat_tree_lock); in __f2fs_build_free_nids()
2529 if (ret == -EFSCORRUPTED) { in __f2fs_build_free_nids()
2540 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); in __f2fs_build_free_nids()
2541 if (unlikely(nid >= nm_i->max_nid)) in __f2fs_build_free_nids()
2549 nm_i->next_scan_nid = nid; in __f2fs_build_free_nids()
2554 f2fs_up_read(&nm_i->nat_tree_lock); in __f2fs_build_free_nids()
2556 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), in __f2fs_build_free_nids()
2557 nm_i->ra_nid_pages, META_NAT, false); in __f2fs_build_free_nids()
2566 mutex_lock(&NM_I(sbi)->build_lock); in f2fs_build_free_nids()
2568 mutex_unlock(&NM_I(sbi)->build_lock); in f2fs_build_free_nids()
2586 spin_lock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2588 if (unlikely(nm_i->available_nids == 0)) { in f2fs_alloc_nid()
2589 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2594 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { in f2fs_alloc_nid()
2595 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); in f2fs_alloc_nid()
2596 i = list_first_entry(&nm_i->free_nid_list, in f2fs_alloc_nid()
2598 *nid = i->nid; in f2fs_alloc_nid()
2601 nm_i->available_nids--; in f2fs_alloc_nid()
2605 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2608 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2624 spin_lock(&nm_i->nid_list_lock); in f2fs_alloc_nid_done()
2628 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid_done()
2645 spin_lock(&nm_i->nid_list_lock); in f2fs_alloc_nid_failed()
2656 nm_i->available_nids++; in f2fs_alloc_nid_failed()
2660 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid_failed()
2671 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) in f2fs_try_to_free_nids()
2674 if (!mutex_trylock(&nm_i->build_lock)) in f2fs_try_to_free_nids()
2677 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { in f2fs_try_to_free_nids()
2681 spin_lock(&nm_i->nid_list_lock); in f2fs_try_to_free_nids()
2682 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { in f2fs_try_to_free_nids()
2684 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) in f2fs_try_to_free_nids()
2688 nr_shrink--; in f2fs_try_to_free_nids()
2689 batch--; in f2fs_try_to_free_nids()
2691 spin_unlock(&nm_i->nid_list_lock); in f2fs_try_to_free_nids()
2694 mutex_unlock(&nm_i->build_lock); in f2fs_try_to_free_nids()
2696 return nr - nr_shrink; in f2fs_try_to_free_nids()
2699 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) in f2fs_recover_inline_xattr() argument
2703 struct page *ipage; in f2fs_recover_inline_xattr()
2706 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); in f2fs_recover_inline_xattr()
2710 ri = F2FS_INODE(page); in f2fs_recover_inline_xattr()
2711 if (ri->i_inline & F2FS_INLINE_XATTR) { in f2fs_recover_inline_xattr()
2725 src_addr = inline_xattr_addr(inode, page); in f2fs_recover_inline_xattr()
2736 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) in f2fs_recover_xattr_data() argument
2739 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; in f2fs_recover_xattr_data()
2743 struct page *xpage; in f2fs_recover_xattr_data()
2761 return -ENOSPC; in f2fs_recover_xattr_data()
2773 /* 3: update and set xattr node page dirty */ in f2fs_recover_xattr_data()
2774 if (page) { in f2fs_recover_xattr_data()
2775 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), in f2fs_recover_xattr_data()
2784 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) in f2fs_recover_inode_page() argument
2787 nid_t ino = ino_of_node(page); in f2fs_recover_inode_page()
2789 struct page *ipage; in f2fs_recover_inode_page()
2797 return -EINVAL; in f2fs_recover_inode_page()
2813 src = F2FS_INODE(page); in f2fs_recover_inode_page()
2817 dst->i_size = 0; in f2fs_recover_inode_page()
2818 dst->i_blocks = cpu_to_le64(1); in f2fs_recover_inode_page()
2819 dst->i_links = cpu_to_le32(1); in f2fs_recover_inode_page()
2820 dst->i_xattr_nid = 0; in f2fs_recover_inode_page()
2821 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); in f2fs_recover_inode_page()
2822 if (dst->i_inline & F2FS_EXTRA_ATTR) { in f2fs_recover_inode_page()
2823 dst->i_extra_isize = src->i_extra_isize; in f2fs_recover_inode_page()
2826 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), in f2fs_recover_inode_page()
2828 dst->i_inline_xattr_size = src->i_inline_xattr_size; in f2fs_recover_inode_page()
2831 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), in f2fs_recover_inode_page()
2833 dst->i_projid = src->i_projid; in f2fs_recover_inode_page()
2836 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), in f2fs_recover_inode_page()
2838 dst->i_crtime = src->i_crtime; in f2fs_recover_inode_page()
2839 dst->i_crtime_nsec = src->i_crtime_nsec; in f2fs_recover_inode_page()
2866 sum_entry = &sum->entries[0]; in f2fs_restore_node_summary()
2869 nrpages = bio_max_segs(last_offset - i); in f2fs_restore_node_summary()
2875 struct page *page = f2fs_get_tmp_page(sbi, idx); in f2fs_restore_node_summary() local
2877 if (IS_ERR(page)) in f2fs_restore_node_summary()
2878 return PTR_ERR(page); in f2fs_restore_node_summary()
2880 rn = F2FS_NODE(page); in f2fs_restore_node_summary()
2881 sum_entry->nid = rn->footer.nid; in f2fs_restore_node_summary()
2882 sum_entry->version = 0; in f2fs_restore_node_summary()
2883 sum_entry->ofs_in_node = 0; in f2fs_restore_node_summary()
2885 f2fs_put_page(page, 1); in f2fs_restore_node_summary()
2898 struct f2fs_journal *journal = curseg->journal; in remove_nats_in_journal()
2901 down_write(&curseg->journal_rwsem); in remove_nats_in_journal()
2925 spin_lock(&nm_i->nid_list_lock); in remove_nats_in_journal()
2926 nm_i->available_nids--; in remove_nats_in_journal()
2927 spin_unlock(&nm_i->nid_list_lock); in remove_nats_in_journal()
2932 update_nats_in_cursum(journal, -i); in remove_nats_in_journal()
2933 up_write(&curseg->journal_rwsem); in remove_nats_in_journal()
2941 if (nes->entry_cnt >= max) in __adjust_nat_entry_set()
2945 if (cur->entry_cnt >= nes->entry_cnt) { in __adjust_nat_entry_set()
2946 list_add(&nes->set_list, cur->set_list.prev); in __adjust_nat_entry_set()
2951 list_add_tail(&nes->set_list, head); in __adjust_nat_entry_set()
2958 __set_bit_le(nat_ofs, nm_i->empty_nat_bits); in __update_nat_bits()
2959 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); in __update_nat_bits()
2963 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits); in __update_nat_bits()
2965 __set_bit_le(nat_ofs, nm_i->full_nat_bits); in __update_nat_bits()
2967 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); in __update_nat_bits()
2971 struct page *page) in update_nat_bits() argument
2975 struct f2fs_nat_block *nat_blk = page_address(page); in update_nat_bits()
2987 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) in update_nat_bits()
2999 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_enable_nat_bits()
3001 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { in f2fs_enable_nat_bits()
3012 nm_i->free_nid_bitmap[nat_ofs])) in f2fs_enable_nat_bits()
3019 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_enable_nat_bits()
3026 struct f2fs_journal *journal = curseg->journal; in __flush_nat_entry_set()
3027 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; in __flush_nat_entry_set()
3031 struct page *page = NULL; in __flush_nat_entry_set() local
3036 * #2, flush nat entries to nat page. in __flush_nat_entry_set()
3038 if ((cpc->reason & CP_UMOUNT) || in __flush_nat_entry_set()
3039 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) in __flush_nat_entry_set()
3043 down_write(&curseg->journal_rwsem); in __flush_nat_entry_set()
3045 page = get_next_nat_page(sbi, start_nid); in __flush_nat_entry_set()
3046 if (IS_ERR(page)) in __flush_nat_entry_set()
3047 return PTR_ERR(page); in __flush_nat_entry_set()
3049 nat_blk = page_address(page); in __flush_nat_entry_set()
3054 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { in __flush_nat_entry_set()
3057 int offset; in __flush_nat_entry_set() local
3062 offset = f2fs_lookup_journal_in_cursum(journal, in __flush_nat_entry_set()
3064 f2fs_bug_on(sbi, offset < 0); in __flush_nat_entry_set()
3065 raw_ne = &nat_in_journal(journal, offset); in __flush_nat_entry_set()
3066 nid_in_journal(journal, offset) = cpu_to_le32(nid); in __flush_nat_entry_set()
3068 raw_ne = &nat_blk->entries[nid - start_nid]; in __flush_nat_entry_set()
3070 raw_nat_from_node_info(raw_ne, &ne->ni); in __flush_nat_entry_set()
3076 spin_lock(&NM_I(sbi)->nid_list_lock); in __flush_nat_entry_set()
3078 spin_unlock(&NM_I(sbi)->nid_list_lock); in __flush_nat_entry_set()
3083 up_write(&curseg->journal_rwsem); in __flush_nat_entry_set()
3085 update_nat_bits(sbi, start_nid, page); in __flush_nat_entry_set()
3086 f2fs_put_page(page, 1); in __flush_nat_entry_set()
3090 if (!set->entry_cnt) { in __flush_nat_entry_set()
3091 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); in __flush_nat_entry_set()
3104 struct f2fs_journal *journal = curseg->journal; in f2fs_flush_nat_entries()
3116 if (cpc->reason & CP_UMOUNT) { in f2fs_flush_nat_entries()
3117 f2fs_down_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3119 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3122 if (!nm_i->nat_cnt[DIRTY_NAT]) in f2fs_flush_nat_entries()
3125 f2fs_down_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3132 if (cpc->reason & CP_UMOUNT || in f2fs_flush_nat_entries()
3134 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) in f2fs_flush_nat_entries()
3141 set_idx = setvec[found - 1]->set + 1; in f2fs_flush_nat_entries()
3154 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3164 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; in __get_nat_bitmaps()
3169 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); in __get_nat_bitmaps()
3170 nm_i->nat_bits = f2fs_kvzalloc(sbi, in __get_nat_bitmaps()
3171 F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL); in __get_nat_bitmaps()
3172 if (!nm_i->nat_bits) in __get_nat_bitmaps()
3173 return -ENOMEM; in __get_nat_bitmaps()
3175 nm_i->full_nat_bits = nm_i->nat_bits + 8; in __get_nat_bitmaps()
3176 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; in __get_nat_bitmaps()
3181 nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - in __get_nat_bitmaps()
3182 nm_i->nat_bits_blocks; in __get_nat_bitmaps()
3183 for (i = 0; i < nm_i->nat_bits_blocks; i++) { in __get_nat_bitmaps()
3184 struct page *page; in __get_nat_bitmaps() local
3186 page = f2fs_get_meta_page(sbi, nat_bits_addr++); in __get_nat_bitmaps()
3187 if (IS_ERR(page)) in __get_nat_bitmaps()
3188 return PTR_ERR(page); in __get_nat_bitmaps()
3190 memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i), in __get_nat_bitmaps()
3191 page_address(page), F2FS_BLKSIZE); in __get_nat_bitmaps()
3192 f2fs_put_page(page, 1); in __get_nat_bitmaps()
3196 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { in __get_nat_bitmaps()
3199 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); in __get_nat_bitmaps()
3216 for (i = 0; i < nm_i->nat_blocks; i++) { in load_free_nid_bitmap()
3217 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); in load_free_nid_bitmap()
3218 if (i >= nm_i->nat_blocks) in load_free_nid_bitmap()
3221 __set_bit_le(i, nm_i->nat_block_bitmap); in load_free_nid_bitmap()
3226 spin_lock(&NM_I(sbi)->nid_list_lock); in load_free_nid_bitmap()
3229 spin_unlock(&NM_I(sbi)->nid_list_lock); in load_free_nid_bitmap()
3232 for (i = 0; i < nm_i->nat_blocks; i++) { in load_free_nid_bitmap()
3233 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); in load_free_nid_bitmap()
3234 if (i >= nm_i->nat_blocks) in load_free_nid_bitmap()
3237 __set_bit_le(i, nm_i->nat_block_bitmap); in load_free_nid_bitmap()
3249 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); in init_node_manager()
3252 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; in init_node_manager()
3253 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); in init_node_manager()
3254 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; in init_node_manager()
3257 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - in init_node_manager()
3259 nm_i->nid_cnt[FREE_NID] = 0; in init_node_manager()
3260 nm_i->nid_cnt[PREALLOC_NID] = 0; in init_node_manager()
3261 nm_i->ram_thresh = DEF_RAM_THRESHOLD; in init_node_manager()
3262 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; in init_node_manager()
3263 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; in init_node_manager()
3264 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS; in init_node_manager()
3266 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); in init_node_manager()
3267 INIT_LIST_HEAD(&nm_i->free_nid_list); in init_node_manager()
3268 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); in init_node_manager()
3269 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); in init_node_manager()
3270 INIT_LIST_HEAD(&nm_i->nat_entries); in init_node_manager()
3271 spin_lock_init(&nm_i->nat_list_lock); in init_node_manager()
3273 mutex_init(&nm_i->build_lock); in init_node_manager()
3274 spin_lock_init(&nm_i->nid_list_lock); in init_node_manager()
3275 init_f2fs_rwsem(&nm_i->nat_tree_lock); in init_node_manager()
3277 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); in init_node_manager()
3278 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); in init_node_manager()
3280 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, in init_node_manager()
3282 if (!nm_i->nat_bitmap) in init_node_manager()
3283 return -ENOMEM; in init_node_manager()
3290 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, in init_node_manager()
3292 if (!nm_i->nat_bitmap_mir) in init_node_manager()
3293 return -ENOMEM; in init_node_manager()
3304 nm_i->free_nid_bitmap = in init_free_nid_cache()
3306 nm_i->nat_blocks), in init_free_nid_cache()
3308 if (!nm_i->free_nid_bitmap) in init_free_nid_cache()
3309 return -ENOMEM; in init_free_nid_cache()
3311 for (i = 0; i < nm_i->nat_blocks; i++) { in init_free_nid_cache()
3312 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, in init_free_nid_cache()
3314 if (!nm_i->free_nid_bitmap[i]) in init_free_nid_cache()
3315 return -ENOMEM; in init_free_nid_cache()
3318 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, in init_free_nid_cache()
3320 if (!nm_i->nat_block_bitmap) in init_free_nid_cache()
3321 return -ENOMEM; in init_free_nid_cache()
3323 nm_i->free_nid_count = in init_free_nid_cache()
3325 nm_i->nat_blocks), in init_free_nid_cache()
3327 if (!nm_i->free_nid_count) in init_free_nid_cache()
3328 return -ENOMEM; in init_free_nid_cache()
3336 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), in f2fs_build_node_manager()
3338 if (!sbi->nm_info) in f2fs_build_node_manager()
3339 return -ENOMEM; in f2fs_build_node_manager()
3369 spin_lock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3370 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { in f2fs_destroy_node_manager()
3372 spin_unlock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3374 spin_lock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3376 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); in f2fs_destroy_node_manager()
3377 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); in f2fs_destroy_node_manager()
3378 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); in f2fs_destroy_node_manager()
3379 spin_unlock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3382 f2fs_down_write(&nm_i->nat_tree_lock); in f2fs_destroy_node_manager()
3387 nid = nat_get_nid(natvec[found - 1]) + 1; in f2fs_destroy_node_manager()
3389 spin_lock(&nm_i->nat_list_lock); in f2fs_destroy_node_manager()
3390 list_del(&natvec[idx]->list); in f2fs_destroy_node_manager()
3391 spin_unlock(&nm_i->nat_list_lock); in f2fs_destroy_node_manager()
3396 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); in f2fs_destroy_node_manager()
3405 nid = setvec[found - 1]->set + 1; in f2fs_destroy_node_manager()
3408 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); in f2fs_destroy_node_manager()
3409 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); in f2fs_destroy_node_manager()
3413 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_destroy_node_manager()
3415 kvfree(nm_i->nat_block_bitmap); in f2fs_destroy_node_manager()
3416 if (nm_i->free_nid_bitmap) { in f2fs_destroy_node_manager()
3419 for (i = 0; i < nm_i->nat_blocks; i++) in f2fs_destroy_node_manager()
3420 kvfree(nm_i->free_nid_bitmap[i]); in f2fs_destroy_node_manager()
3421 kvfree(nm_i->free_nid_bitmap); in f2fs_destroy_node_manager()
3423 kvfree(nm_i->free_nid_count); in f2fs_destroy_node_manager()
3425 kvfree(nm_i->nat_bitmap); in f2fs_destroy_node_manager()
3426 kvfree(nm_i->nat_bits); in f2fs_destroy_node_manager()
3428 kvfree(nm_i->nat_bitmap_mir); in f2fs_destroy_node_manager()
3430 sbi->nm_info = NULL; in f2fs_destroy_node_manager()
3464 return -ENOMEM; in f2fs_create_node_manager_caches()