Lines Matching +full:1 +full:eb

41 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)  in btrfs_leak_debug_add_eb()  argument
43 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb()
47 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb()
51 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) in btrfs_leak_debug_del_eb() argument
53 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb()
57 list_del(&eb->leak_list); in btrfs_leak_debug_del_eb()
63 struct extent_buffer *eb; in btrfs_extent_buffer_leak_debug_check() local
76 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
81 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
82 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
83 WARN_ON_ONCE(1); in btrfs_extent_buffer_leak_debug_check()
84 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
89 #define btrfs_leak_debug_add_eb(eb) do {} while (0) argument
90 #define btrfs_leak_debug_del_eb(eb) do {} while (0) argument
180 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); in process_one_folio()
181 len = end + 1 - start; in process_one_folio()
278 end + 1) - range_start; in lock_delalloc_folios()
281 processed_end = range_start + range_len - 1; in lock_delalloc_folios()
344 /* @delalloc_end can be -1, never go beyond @orig_end */ in find_lock_delalloc_range()
361 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
362 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
376 loops = 1; in find_lock_delalloc_range()
553 end = start + fi.length - 1; in end_bbio_data_read()
572 u32 zero_len = offset_in_folio(folio, end) + 1 - in end_bbio_data_read()
653 static int alloc_eb_folio_array(struct extent_buffer *eb, bool nofail) in alloc_eb_folio_array() argument
656 int num_pages = num_extent_pages(eb); in alloc_eb_folio_array()
664 eb->folios[i] = page_folio(page_array[i]); in alloc_eb_folio_array()
665 eb->folio_size = PAGE_SIZE; in alloc_eb_folio_array()
666 eb->folio_shift = PAGE_SHIFT; in alloc_eb_folio_array()
690 * 1) The folios are belonging to the same inode in btrfs_bio_is_contig()
826 static int attach_extent_buffer_folio(struct extent_buffer *eb, in attach_extent_buffer_folio() argument
830 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_extent_buffer_folio()
844 folio_attach_private(folio, eb); in attach_extent_buffer_folio()
846 WARN_ON(folio_get_private(folio) != eb); in attach_extent_buffer_folio()
925 btrfs_lock_and_flush_ordered_range(BTRFS_I(inode), start, start + len - 1, &cached_state); in __get_extent_map()
932 unlock_extent(&BTRFS_I(inode)->io_tree, start, start + len - 1, &cached_state); in __get_extent_map()
949 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
988 em = __get_extent_map(inode, folio, cur, end - cur + 1, in btrfs_do_readpage()
991 end_folio_read(folio, false, cur, end + 1 - cur); in btrfs_do_readpage()
1000 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
1045 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
1108 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1123 const u64 page_end = page_start + folio_size(folio) - 1; in writepage_delalloc()
1137 ASSERT(fs_info->sectors_per_page > 1); in writepage_delalloc()
1140 bio_ctrl->submit_bitmap = 1; in writepage_delalloc()
1148 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1152 min(delalloc_end, page_end) + 1 - in writepage_delalloc()
1155 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1175 found_len = last_delalloc_end + 1 - found_start; in writepage_delalloc()
1189 found_len = last_delalloc_end + 1 - found_start; in writepage_delalloc()
1195 found_start + found_len - 1, in writepage_delalloc()
1203 found_start + found_len - 1, NULL); in writepage_delalloc()
1206 found_start + found_len - 1); in writepage_delalloc()
1218 unsigned int end_bit = (min(page_end + 1, found_start + found_len) - in writepage_delalloc()
1226 if (found_start + found_len >= last_delalloc_end + 1) in writepage_delalloc()
1243 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE); in writepage_delalloc()
1251 return 1; in writepage_delalloc()
1317 btrfs_set_range_writeback(inode, filepos, filepos + sectorsize - 1); in submit_one_sector()
1335 * We return 1 if the IO is started and the page is unlocked,
1361 return 1; in extent_writepage_io()
1450 bio_ctrl->submit_bitmap = (unsigned long)-1; in extent_writepage()
1456 if (ret == 1) in extent_writepage()
1463 if (ret == 1) in extent_writepage()
1484 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) in wait_on_extent_buffer_writeback() argument
1486 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
1497 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
1500 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
1503 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1504 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
1505 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1508 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
1509 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1513 * We need to do this to prevent races in people who check if the eb is in lock_extent_buffer_for_io()
1517 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
1518 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
1519 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
1520 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1521 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
1523 -eb->len, in lock_extent_buffer_for_io()
1527 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1529 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1533 static void set_btree_ioerr(struct extent_buffer *eb) in set_btree_ioerr() argument
1535 struct btrfs_fs_info *fs_info = eb->fs_info; in set_btree_ioerr()
1537 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in set_btree_ioerr()
1543 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
1551 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); in set_btree_ioerr()
1577 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is in set_btree_ioerr()
1578 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
1591 switch (eb->log_index) { in set_btree_ioerr()
1592 case -1: in set_btree_ioerr()
1598 case 1: in set_btree_ioerr()
1613 struct extent_buffer *eb; in find_extent_buffer_nolock() local
1616 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer_nolock()
1618 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer_nolock()
1620 return eb; in find_extent_buffer_nolock()
1628 struct extent_buffer *eb = bbio->private; in end_bbio_meta_write() local
1629 struct btrfs_fs_info *fs_info = eb->fs_info; in end_bbio_meta_write()
1635 set_btree_ioerr(eb); in end_bbio_meta_write()
1638 u64 start = eb->start + bio_offset; in end_bbio_meta_write()
1646 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in end_bbio_meta_write()
1648 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in end_bbio_meta_write()
1653 static void prepare_eb_write(struct extent_buffer *eb) in prepare_eb_write() argument
1659 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in prepare_eb_write()
1662 nritems = btrfs_header_nritems(eb); in prepare_eb_write()
1663 if (btrfs_header_level(eb) > 0) { in prepare_eb_write()
1664 end = btrfs_node_key_ptr_offset(eb, nritems); in prepare_eb_write()
1665 memzero_extent_buffer(eb, end, eb->len - end); in prepare_eb_write()
1669 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 in prepare_eb_write()
1671 start = btrfs_item_nr_offset(eb, nritems); in prepare_eb_write()
1672 end = btrfs_item_nr_offset(eb, 0); in prepare_eb_write()
1674 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info); in prepare_eb_write()
1676 end += btrfs_item_offset(eb, nritems - 1); in prepare_eb_write()
1677 memzero_extent_buffer(eb, start, end - start); in prepare_eb_write()
1681 static noinline_for_stack void write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
1684 struct btrfs_fs_info *fs_info = eb->fs_info; in write_one_eb()
1687 prepare_eb_write(eb); in write_one_eb()
1691 eb->fs_info, end_bbio_meta_write, eb); in write_one_eb()
1692 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in write_one_eb()
1695 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in write_one_eb()
1696 bbio->file_offset = eb->start; in write_one_eb()
1698 struct folio *folio = eb->folios[0]; in write_one_eb()
1702 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len); in write_one_eb()
1703 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, in write_one_eb()
1704 eb->len)) { in write_one_eb()
1708 ret = bio_add_folio(&bbio->bio, folio, eb->len, in write_one_eb()
1709 eb->start - folio_pos(folio)); in write_one_eb()
1711 wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len); in write_one_eb()
1714 int num_folios = num_extent_folios(eb); in write_one_eb()
1717 struct folio *folio = eb->folios[i]; in write_one_eb()
1723 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0); in write_one_eb()
1726 eb->folio_size); in write_one_eb()
1759 struct extent_buffer *eb; in submit_eb_subpage() local
1785 * Here we just want to grab the eb without touching extra in submit_eb_subpage()
1788 eb = find_extent_buffer_nolock(fs_info, start); in submit_eb_subpage()
1793 * The eb has already reached 0 refs thus find_extent_buffer() in submit_eb_subpage()
1794 * doesn't return it. We don't need to write back such eb in submit_eb_subpage()
1797 if (!eb) in submit_eb_subpage()
1800 if (lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_subpage()
1801 write_one_eb(eb, wbc); in submit_eb_subpage()
1804 free_extent_buffer(eb); in submit_eb_subpage()
1814 * belongs to this eb, we don't need to submit
1833 struct extent_buffer *eb; in submit_eb_page() local
1848 eb = folio_get_private(folio); in submit_eb_page()
1854 if (WARN_ON(!eb)) { in submit_eb_page()
1859 if (eb == ctx->eb) { in submit_eb_page()
1863 ret = atomic_inc_not_zero(&eb->refs); in submit_eb_page()
1868 ctx->eb = eb; in submit_eb_page()
1870 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx); in submit_eb_page()
1874 free_extent_buffer(eb); in submit_eb_page()
1878 if (!lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_page()
1879 free_extent_buffer(eb); in submit_eb_page()
1884 /* Mark the last eb in the block group. */ in submit_eb_page()
1885 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb); in submit_eb_page()
1886 ctx->zoned_bg->meta_write_pointer += eb->len; in submit_eb_page()
1888 write_one_eb(eb, wbc); in submit_eb_page()
1889 free_extent_buffer(eb); in submit_eb_page()
1890 return 1; in submit_eb_page()
1911 end = -1; in btree_write_cache_pages()
1920 scanned = 1; in btree_write_cache_pages()
1942 done = 1; in btree_write_cache_pages()
1961 scanned = 1; in btree_write_cache_pages()
1989 * extent io tree. Thus we don't want to submit such wild eb in btree_write_cache_pages()
2053 end = -1; in extent_write_cache_pages()
2063 range_whole = 1; in extent_write_cache_pages()
2064 scanned = 1; in extent_write_cache_pages()
2077 wbc->tagged_writepages = 1; in extent_write_cache_pages()
2133 done = 1; in extent_write_cache_pages()
2153 scanned = 1; in extent_write_cache_pages()
2197 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize)); in extent_write_locked_range()
2200 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); in extent_write_locked_range()
2201 u32 cur_len = cur_end + 1 - cur; in extent_write_locked_range()
2214 cur = cur_end + 1; in extent_write_locked_range()
2226 bio_ctrl.submit_bitmap = (unsigned long)-1; in extent_write_locked_range()
2229 if (ret == 1) in extent_write_locked_range()
2242 cur = cur_end + 1; in extent_write_locked_range()
2273 u64 prev_em_start = (u64)-1; in btrfs_readahead()
2293 u64 end = start + folio_size(folio) - 1; in extent_invalidate_folio()
2324 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
2362 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
2368 const u64 len = end - start + 1; in try_release_extent_mapping()
2384 extent_map_end(em) - 1, EXTENT_LOCKED)) in try_release_extent_mapping()
2435 static void __free_extent_buffer(struct extent_buffer *eb) in __free_extent_buffer() argument
2437 kmem_cache_free(extent_buffer_cache, eb); in __free_extent_buffer()
2440 static int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
2442 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
2443 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
2457 * Even there is no eb refs here, we may still have in folio_range_has_eb()
2466 static void detach_extent_buffer_folio(const struct extent_buffer *eb, struct folio *folio) in detach_extent_buffer_folio() argument
2468 struct btrfs_fs_info *fs_info = eb->fs_info; in detach_extent_buffer_folio()
2469 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in detach_extent_buffer_folio()
2472 * For mapped eb, we're going to change the folio private, which should in detach_extent_buffer_folio()
2487 * removed the eb from the radix tree, so we could race in detach_extent_buffer_folio()
2488 * and have this page now attached to the new eb. So in detach_extent_buffer_folio()
2490 * this eb. in detach_extent_buffer_folio()
2492 if (folio_test_private(folio) && folio_get_private(folio) == eb) { in detach_extent_buffer_folio()
2493 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in detach_extent_buffer_folio()
2496 /* We need to make sure we haven't be attached to a new eb. */ in detach_extent_buffer_folio()
2505 * For subpage, we can have dummy eb with folio private attached. In in detach_extent_buffer_folio()
2507 * attached to one dummy eb, no sharing. in detach_extent_buffer_folio()
2527 static void btrfs_release_extent_buffer_pages(const struct extent_buffer *eb) in btrfs_release_extent_buffer_pages() argument
2529 ASSERT(!extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_pages()
2532 struct folio *folio = eb->folios[i]; in btrfs_release_extent_buffer_pages()
2537 detach_extent_buffer_folio(eb, folio); in btrfs_release_extent_buffer_pages()
2547 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
2549 btrfs_release_extent_buffer_pages(eb); in btrfs_release_extent_buffer()
2550 btrfs_leak_debug_del_eb(eb); in btrfs_release_extent_buffer()
2551 __free_extent_buffer(eb); in btrfs_release_extent_buffer()
2558 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
2560 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
2561 eb->start = start; in __alloc_extent_buffer()
2562 eb->len = len; in __alloc_extent_buffer()
2563 eb->fs_info = fs_info; in __alloc_extent_buffer()
2564 init_rwsem(&eb->lock); in __alloc_extent_buffer()
2566 btrfs_leak_debug_add_eb(eb); in __alloc_extent_buffer()
2568 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
2569 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
2573 return eb; in __alloc_extent_buffer()
2618 struct extent_buffer *eb; in __alloc_dummy_extent_buffer() local
2622 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
2623 if (!eb) in __alloc_dummy_extent_buffer()
2626 ret = alloc_eb_folio_array(eb, false); in __alloc_dummy_extent_buffer()
2630 num_folios = num_extent_folios(eb); in __alloc_dummy_extent_buffer()
2632 ret = attach_extent_buffer_folio(eb, eb->folios[i], NULL); in __alloc_dummy_extent_buffer()
2637 set_extent_buffer_uptodate(eb); in __alloc_dummy_extent_buffer()
2638 btrfs_set_header_nritems(eb, 0); in __alloc_dummy_extent_buffer()
2639 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
2641 return eb; in __alloc_dummy_extent_buffer()
2644 if (eb->folios[i]) { in __alloc_dummy_extent_buffer()
2645 detach_extent_buffer_folio(eb, eb->folios[i]); in __alloc_dummy_extent_buffer()
2646 folio_put(eb->folios[i]); in __alloc_dummy_extent_buffer()
2649 __free_extent_buffer(eb); in __alloc_dummy_extent_buffer()
2659 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
2685 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
2686 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
2689 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
2690 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
2691 atomic_inc(&eb->refs); in check_buffer_tree_ref()
2692 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
2695 static void mark_extent_buffer_accessed(struct extent_buffer *eb) in mark_extent_buffer_accessed() argument
2697 int num_folios= num_extent_folios(eb); in mark_extent_buffer_accessed()
2699 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
2702 folio_mark_accessed(eb->folios[i]); in mark_extent_buffer_accessed()
2708 struct extent_buffer *eb; in find_extent_buffer() local
2710 eb = find_extent_buffer_nolock(fs_info, start); in find_extent_buffer()
2711 if (!eb) in find_extent_buffer()
2714 * Lock our eb's refs_lock to avoid races with free_extent_buffer(). in find_extent_buffer()
2715 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and in find_extent_buffer()
2717 * set, eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
2721 * could race and increment the eb's reference count, clear its stale in find_extent_buffer()
2726 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
2727 spin_lock(&eb->refs_lock); in find_extent_buffer()
2728 spin_unlock(&eb->refs_lock); in find_extent_buffer()
2730 mark_extent_buffer_accessed(eb); in find_extent_buffer()
2731 return eb; in find_extent_buffer()
2738 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
2741 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
2742 if (eb) in alloc_test_extent_buffer()
2743 return eb; in alloc_test_extent_buffer()
2744 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
2745 if (!eb) in alloc_test_extent_buffer()
2747 eb->fs_info = fs_info; in alloc_test_extent_buffer()
2756 start >> fs_info->sectorsize_bits, eb); in alloc_test_extent_buffer()
2766 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
2767 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
2769 return eb; in alloc_test_extent_buffer()
2771 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
2797 * We could have already allocated an eb for this page and attached one in grab_extent_buffer()
2798 * so lets see if we can get a ref on the existing eb, and if we can we in grab_extent_buffer()
2843 * Return 0 if eb->folios[i] is attached to btree inode successfully.
2847 * than @eb.
2850 static int attach_eb_folio_to_filemap(struct extent_buffer *eb, int i, in attach_eb_folio_to_filemap() argument
2855 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_eb_folio_to_filemap()
2857 const unsigned long index = eb->start >> PAGE_SHIFT; in attach_eb_folio_to_filemap()
2864 ASSERT(eb->folios[i]); in attach_eb_folio_to_filemap()
2867 ret = filemap_add_folio(mapping, eb->folios[i], index + i, in attach_eb_folio_to_filemap()
2880 ASSERT(folio_nr_pages(existing_folio) == 1); in attach_eb_folio_to_filemap()
2882 if (folio_size(existing_folio) != eb->folio_size) { in attach_eb_folio_to_filemap()
2892 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap()
2893 eb->folios[i] = existing_folio; in attach_eb_folio_to_filemap()
2905 return 1; in attach_eb_folio_to_filemap()
2908 __free_page(folio_page(eb->folios[i], 0)); in attach_eb_folio_to_filemap()
2909 eb->folios[i] = existing_folio; in attach_eb_folio_to_filemap()
2911 eb->folio_size = folio_size(eb->folios[i]); in attach_eb_folio_to_filemap()
2912 eb->folio_shift = folio_shift(eb->folios[i]); in attach_eb_folio_to_filemap()
2914 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc); in attach_eb_folio_to_filemap()
2917 * To inform we have an extra eb under allocation, so that in attach_eb_folio_to_filemap()
2919 * eb hasn't been inserted into radix tree yet. in attach_eb_folio_to_filemap()
2921 * The ref will be decreased when the eb releases the page, in in attach_eb_folio_to_filemap()
2925 btrfs_folio_inc_eb_refs(fs_info, eb->folios[i]); in attach_eb_folio_to_filemap()
2936 struct extent_buffer *eb; in alloc_extent_buffer() local
2941 int uptodate = 1; in alloc_extent_buffer()
2958 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
2959 if (eb) in alloc_extent_buffer()
2960 return eb; in alloc_extent_buffer()
2962 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
2963 if (!eb) in alloc_extent_buffer()
2973 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level); in alloc_extent_buffer()
2992 ret = alloc_eb_folio_array(eb, true); in alloc_extent_buffer()
2998 num_folios = num_extent_folios(eb); in alloc_extent_buffer()
3003 ret = attach_eb_folio_to_filemap(eb, i, prealloc, &existing_eb); in alloc_extent_buffer()
3011 * folios mismatch between the new eb and filemap. in alloc_extent_buffer()
3015 * - the new eb is using higher order folio in alloc_extent_buffer()
3018 * This can happen at the previous eb allocation, and we don't in alloc_extent_buffer()
3021 * - the existing eb has already been freed in alloc_extent_buffer()
3035 * Only after attach_eb_folio_to_filemap(), eb->folios[] is in alloc_extent_buffer()
3039 folio = eb->folios[i]; in alloc_extent_buffer()
3040 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len)); in alloc_extent_buffer()
3043 * Check if the current page is physically contiguous with previous eb in alloc_extent_buffer()
3048 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0)) in alloc_extent_buffer()
3051 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len)) in alloc_extent_buffer()
3063 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
3066 eb->addr = folio_address(eb->folios[0]) + offset_in_page(eb->start); in alloc_extent_buffer()
3074 start >> fs_info->sectorsize_bits, eb); in alloc_extent_buffer()
3086 check_buffer_tree_ref(eb); in alloc_extent_buffer()
3087 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
3095 unlock_page(folio_page(eb->folios[i], 0)); in alloc_extent_buffer()
3096 return eb; in alloc_extent_buffer()
3099 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
3104 * then attaching our eb to that folio. If we fail to insert our folio in alloc_extent_buffer()
3105 * we'll lookup the folio for that index, and grab that EB. We do not in alloc_extent_buffer()
3106 * want that to grab this eb, as we're getting ready to free it. So we in alloc_extent_buffer()
3110 * subpage case detaching does a btrfs_folio_dec_eb_refs() for our eb. in alloc_extent_buffer()
3113 * case. If we left eb->folios[i] populated in the subpage case we'd in alloc_extent_buffer()
3117 ASSERT(eb->folios[i]); in alloc_extent_buffer()
3118 detach_extent_buffer_folio(eb, eb->folios[i]); in alloc_extent_buffer()
3119 unlock_page(folio_page(eb->folios[i], 0)); in alloc_extent_buffer()
3120 folio_put(eb->folios[i]); in alloc_extent_buffer()
3121 eb->folios[i] = NULL; in alloc_extent_buffer()
3127 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in alloc_extent_buffer()
3129 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
3138 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
3141 __free_extent_buffer(eb); in btrfs_release_extent_buffer_rcu()
3144 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
3145 __releases(&eb->refs_lock) in release_extent_buffer()
3147 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
3149 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
3150 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
3151 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
3152 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
3154 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3158 eb->start >> fs_info->sectorsize_bits); in release_extent_buffer()
3161 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3164 btrfs_leak_debug_del_eb(eb); in release_extent_buffer()
3166 btrfs_release_extent_buffer_pages(eb); in release_extent_buffer()
3168 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
3169 __free_extent_buffer(eb); in release_extent_buffer()
3170 return 1; in release_extent_buffer()
3173 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
3174 return 1; in release_extent_buffer()
3176 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3181 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
3184 if (!eb) in free_extent_buffer()
3187 refs = atomic_read(&eb->refs); in free_extent_buffer()
3188 while (1) { in free_extent_buffer()
3189 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
3190 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
3191 refs == 1)) in free_extent_buffer()
3193 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1)) in free_extent_buffer()
3197 spin_lock(&eb->refs_lock); in free_extent_buffer()
3198 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
3199 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
3200 !extent_buffer_under_io(eb) && in free_extent_buffer()
3201 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
3202 atomic_dec(&eb->refs); in free_extent_buffer()
3208 release_extent_buffer(eb); in free_extent_buffer()
3211 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
3213 if (!eb) in free_extent_buffer_stale()
3216 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
3217 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
3219 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
3220 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
3221 atomic_dec(&eb->refs); in free_extent_buffer_stale()
3222 release_extent_buffer(eb); in free_extent_buffer_stale()
3237 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb) in clear_subpage_extent_buffer_dirty() argument
3239 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_subpage_extent_buffer_dirty()
3240 struct folio *folio = eb->folios[0]; in clear_subpage_extent_buffer_dirty()
3245 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len); in clear_subpage_extent_buffer_dirty()
3249 WARN_ON(atomic_read(&eb->refs) == 0); in clear_subpage_extent_buffer_dirty()
3253 struct extent_buffer *eb) in btrfs_clear_buffer_dirty() argument
3255 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_clear_buffer_dirty()
3258 btrfs_assert_tree_write_locked(eb); in btrfs_clear_buffer_dirty()
3260 if (trans && btrfs_header_generation(eb) != trans->transid) in btrfs_clear_buffer_dirty()
3272 if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in btrfs_clear_buffer_dirty()
3273 set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags); in btrfs_clear_buffer_dirty()
3277 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) in btrfs_clear_buffer_dirty()
3280 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, in btrfs_clear_buffer_dirty()
3283 if (eb->fs_info->nodesize < PAGE_SIZE) in btrfs_clear_buffer_dirty()
3284 return clear_subpage_extent_buffer_dirty(eb); in btrfs_clear_buffer_dirty()
3286 num_folios = num_extent_folios(eb); in btrfs_clear_buffer_dirty()
3288 struct folio *folio = eb->folios[i]; in btrfs_clear_buffer_dirty()
3296 WARN_ON(atomic_read(&eb->refs) == 0); in btrfs_clear_buffer_dirty()
3299 void set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
3304 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
3306 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
3308 num_folios = num_extent_folios(eb); in set_extent_buffer_dirty()
3309 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
3310 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
3311 WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)); in set_extent_buffer_dirty()
3314 bool subpage = eb->fs_info->nodesize < PAGE_SIZE; in set_extent_buffer_dirty()
3328 lock_page(folio_page(eb->folios[0], 0)); in set_extent_buffer_dirty()
3330 btrfs_folio_set_dirty(eb->fs_info, eb->folios[i], in set_extent_buffer_dirty()
3331 eb->start, eb->len); in set_extent_buffer_dirty()
3333 unlock_page(folio_page(eb->folios[0], 0)); in set_extent_buffer_dirty()
3334 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, in set_extent_buffer_dirty()
3335 eb->len, in set_extent_buffer_dirty()
3336 eb->fs_info->dirty_metadata_batch); in set_extent_buffer_dirty()
3340 ASSERT(folio_test_dirty(eb->folios[i])); in set_extent_buffer_dirty()
3344 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
3346 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_extent_buffer_uptodate()
3347 int num_folios = num_extent_folios(eb); in clear_extent_buffer_uptodate()
3349 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
3351 struct folio *folio = eb->folios[i]; in clear_extent_buffer_uptodate()
3364 eb->start, eb->len); in clear_extent_buffer_uptodate()
3368 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
3370 struct btrfs_fs_info *fs_info = eb->fs_info; in set_extent_buffer_uptodate()
3371 int num_folios = num_extent_folios(eb); in set_extent_buffer_uptodate()
3373 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
3375 struct folio *folio = eb->folios[i]; in set_extent_buffer_uptodate()
3385 eb->start, eb->len); in set_extent_buffer_uptodate()
3389 static void clear_extent_buffer_reading(struct extent_buffer *eb) in clear_extent_buffer_reading() argument
3391 clear_bit(EXTENT_BUFFER_READING, &eb->bflags); in clear_extent_buffer_reading()
3393 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); in clear_extent_buffer_reading()
3398 struct extent_buffer *eb = bbio->private; in end_bbio_meta_read() local
3399 struct btrfs_fs_info *fs_info = eb->fs_info; in end_bbio_meta_read()
3409 WARN_ON(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)); in end_bbio_meta_read()
3411 eb->read_mirror = bbio->mirror_num; in end_bbio_meta_read()
3414 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) in end_bbio_meta_read()
3418 set_extent_buffer_uptodate(eb); in end_bbio_meta_read()
3420 clear_extent_buffer_uptodate(eb); in end_bbio_meta_read()
3421 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in end_bbio_meta_read()
3426 u64 start = eb->start + bio_offset; in end_bbio_meta_read()
3437 clear_extent_buffer_reading(eb); in end_bbio_meta_read()
3438 free_extent_buffer(eb); in end_bbio_meta_read()
3443 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, in read_extent_buffer_pages() argument
3449 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
3457 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) in read_extent_buffer_pages()
3461 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) in read_extent_buffer_pages()
3467 * started and finished reading the same eb. In this case, UPTODATE in read_extent_buffer_pages()
3470 if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) { in read_extent_buffer_pages()
3471 clear_extent_buffer_reading(eb); in read_extent_buffer_pages()
3475 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
3476 eb->read_mirror = 0; in read_extent_buffer_pages()
3477 check_buffer_tree_ref(eb); in read_extent_buffer_pages()
3478 atomic_inc(&eb->refs); in read_extent_buffer_pages()
3481 REQ_OP_READ | REQ_META, eb->fs_info, in read_extent_buffer_pages()
3482 end_bbio_meta_read, eb); in read_extent_buffer_pages()
3483 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in read_extent_buffer_pages()
3484 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in read_extent_buffer_pages()
3485 bbio->file_offset = eb->start; in read_extent_buffer_pages()
3487 if (eb->fs_info->nodesize < PAGE_SIZE) { in read_extent_buffer_pages()
3488 ret = bio_add_folio(&bbio->bio, eb->folios[0], eb->len, in read_extent_buffer_pages()
3489 eb->start - folio_pos(eb->folios[0])); in read_extent_buffer_pages()
3492 int num_folios = num_extent_folios(eb); in read_extent_buffer_pages()
3495 struct folio *folio = eb->folios[i]; in read_extent_buffer_pages()
3497 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0); in read_extent_buffer_pages()
3505 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); in read_extent_buffer_pages()
3506 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
3513 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
3516 btrfs_warn(eb->fs_info, in report_eb_range()
3517 "access to eb bytenr %llu len %u out of range start %lu len %lu", in report_eb_range()
3518 eb->start, eb->len, start, len); in report_eb_range()
3526 * the eb.
3527 * NOTE: @start and @len are offset inside the eb, not logical address.
3531 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
3536 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
3537 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
3538 return report_eb_range(eb, start, len); in check_eb_range()
3543 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
3546 const int unit_size = eb->folio_size; in read_extent_buffer()
3550 unsigned long i = get_eb_folio_index(eb, start); in read_extent_buffer()
3552 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
3561 if (eb->addr) { in read_extent_buffer()
3562 memcpy(dstv, eb->addr + start, len); in read_extent_buffer()
3566 offset = get_eb_offset_in_folio(eb, start); in read_extent_buffer()
3572 kaddr = folio_address(eb->folios[i]); in read_extent_buffer()
3582 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
3586 const int unit_size = eb->folio_size; in read_extent_buffer_to_user_nofault()
3590 unsigned long i = get_eb_folio_index(eb, start); in read_extent_buffer_to_user_nofault()
3593 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
3594 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
3596 if (eb->addr) { in read_extent_buffer_to_user_nofault()
3597 if (copy_to_user_nofault(dstv, eb->addr + start, len)) in read_extent_buffer_to_user_nofault()
3602 offset = get_eb_offset_in_folio(eb, start); in read_extent_buffer_to_user_nofault()
3608 kaddr = folio_address(eb->folios[i]); in read_extent_buffer_to_user_nofault()
3623 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
3626 const int unit_size = eb->folio_size; in memcmp_extent_buffer()
3631 unsigned long i = get_eb_folio_index(eb, start); in memcmp_extent_buffer()
3634 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
3637 if (eb->addr) in memcmp_extent_buffer()
3638 return memcmp(ptrv, eb->addr + start, len); in memcmp_extent_buffer()
3640 offset = get_eb_offset_in_folio(eb, start); in memcmp_extent_buffer()
3644 kaddr = folio_address(eb->folios[i]); in memcmp_extent_buffer()
3661 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
3663 static void assert_eb_folio_uptodate(const struct extent_buffer *eb, int i) in assert_eb_folio_uptodate() argument
3665 struct btrfs_fs_info *fs_info = eb->fs_info; in assert_eb_folio_uptodate()
3666 struct folio *folio = eb->folios[i]; in assert_eb_folio_uptodate()
3678 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in assert_eb_folio_uptodate()
3682 folio = eb->folios[0]; in assert_eb_folio_uptodate()
3685 eb->start, eb->len))) in assert_eb_folio_uptodate()
3686 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len); in assert_eb_folio_uptodate()
3692 static void __write_extent_buffer(const struct extent_buffer *eb, in __write_extent_buffer() argument
3696 const int unit_size = eb->folio_size; in __write_extent_buffer()
3701 unsigned long i = get_eb_folio_index(eb, start); in __write_extent_buffer()
3703 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __write_extent_buffer()
3705 if (check_eb_range(eb, start, len)) in __write_extent_buffer()
3708 if (eb->addr) { in __write_extent_buffer()
3710 memmove(eb->addr + start, srcv, len); in __write_extent_buffer()
3712 memcpy(eb->addr + start, srcv, len); in __write_extent_buffer()
3716 offset = get_eb_offset_in_folio(eb, start); in __write_extent_buffer()
3720 assert_eb_folio_uptodate(eb, i); in __write_extent_buffer()
3723 kaddr = folio_address(eb->folios[i]); in __write_extent_buffer()
3736 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
3739 return __write_extent_buffer(eb, srcv, start, len, false); in write_extent_buffer()
3742 static void memset_extent_buffer(const struct extent_buffer *eb, int c, in memset_extent_buffer() argument
3745 const int unit_size = eb->folio_size; in memset_extent_buffer()
3748 if (eb->addr) { in memset_extent_buffer()
3749 memset(eb->addr + start, c, len); in memset_extent_buffer()
3754 unsigned long index = get_eb_folio_index(eb, cur); in memset_extent_buffer()
3755 unsigned int offset = get_eb_offset_in_folio(eb, cur); in memset_extent_buffer()
3758 assert_eb_folio_uptodate(eb, index); in memset_extent_buffer()
3759 memset(folio_address(eb->folios[index]) + offset, c, cur_len); in memset_extent_buffer()
3765 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
3768 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
3770 return memset_extent_buffer(eb, 0, start, len); in memzero_extent_buffer()
3831 * @eb: the extent buffer
3841 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
3854 offset = start + offset_in_eb_folio(eb, eb->start) + byte_offset; in eb_bitmap_offset()
3856 *folio_index = offset >> eb->folio_shift; in eb_bitmap_offset()
3857 *folio_offset = offset_in_eb_folio(eb, offset); in eb_bitmap_offset()
3863 * @eb: the extent buffer
3867 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
3874 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
3875 assert_eb_folio_uptodate(eb, i); in extent_buffer_test_bit()
3876 kaddr = folio_address(eb->folios[i]); in extent_buffer_test_bit()
3877 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
3880 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr) in extent_buffer_get_byte() argument
3882 unsigned long index = get_eb_folio_index(eb, bytenr); in extent_buffer_get_byte()
3884 if (check_eb_range(eb, bytenr, 1)) in extent_buffer_get_byte()
3886 return folio_address(eb->folios[index]) + get_eb_offset_in_folio(eb, bytenr); in extent_buffer_get_byte()
3890 * Set an area of a bitmap to 1.
3892 * @eb: the extent buffer
3897 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
3901 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_set()
3910 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_set()
3916 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_set()
3917 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_set()
3920 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_set()
3928 * @eb: the extent buffer
3933 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
3938 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_clear()
3947 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_clear()
3953 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_clear()
3954 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_clear()
3957 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_clear()
4008 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
4009 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
4038 cur = min_t(unsigned long, len, src_off_in_folio + 1); in memmove_extent_buffer()
4039 cur = min(cur, dst_off_in_folio + 1); in memmove_extent_buffer()
4042 cur + 1; in memmove_extent_buffer()
4043 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1, in memmove_extent_buffer()
4046 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur, in memmove_extent_buffer()
4087 cur = gang[ret - 1]->start + gang[ret - 1]->len; in get_next_extent_buffer()
4101 struct extent_buffer *eb = NULL; in try_release_subpage_extent_buffer() local
4112 eb = get_next_extent_buffer(fs_info, folio, cur); in try_release_subpage_extent_buffer()
4113 if (!eb) { in try_release_subpage_extent_buffer()
4114 /* No more eb in the page range after or at cur */ in try_release_subpage_extent_buffer()
4118 cur = eb->start + eb->len; in try_release_subpage_extent_buffer()
4121 * The same as try_release_extent_buffer(), to ensure the eb in try_release_subpage_extent_buffer()
4124 spin_lock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4125 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_subpage_extent_buffer()
4126 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4133 * If tree ref isn't set then we know the ref on this eb is a in try_release_subpage_extent_buffer()
4134 * real ref, so just return, this eb will likely be freed soon in try_release_subpage_extent_buffer()
4137 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_subpage_extent_buffer()
4138 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4147 release_extent_buffer(eb); in try_release_subpage_extent_buffer()
4155 ret = 1; in try_release_subpage_extent_buffer()
4165 struct extent_buffer *eb; in try_release_extent_buffer() local
4177 return 1; in try_release_extent_buffer()
4180 eb = folio_get_private(folio); in try_release_extent_buffer()
4181 BUG_ON(!eb); in try_release_extent_buffer()
4185 * the eb doesn't disappear out from under us while we're looking at in try_release_extent_buffer()
4188 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
4189 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
4190 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4197 * If tree ref isn't set then we know the ref on this eb is a real ref, in try_release_extent_buffer()
4200 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
4201 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4205 return release_extent_buffer(eb); in try_release_extent_buffer()
4213 * @owner_root: objectid of the root that owns this eb
4215 * @level: level for the eb
4218 * normal uptodate check of the eb, without checking the generation. If we have
4229 struct extent_buffer *eb; in btrfs_readahead_tree_block() local
4232 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); in btrfs_readahead_tree_block()
4233 if (IS_ERR(eb)) in btrfs_readahead_tree_block()
4236 if (btrfs_buffer_uptodate(eb, gen, 1)) { in btrfs_readahead_tree_block()
4237 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4241 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check); in btrfs_readahead_tree_block()
4243 free_extent_buffer_stale(eb); in btrfs_readahead_tree_block()
4245 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4263 btrfs_header_level(node) - 1); in btrfs_readahead_node_child()