/linux-6.12.1/fs/isofs/ |
D | compress.c | 40 static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, in zisofs_uncompress_block() argument 49 int i, block_size = block_end - block_start; in zisofs_uncompress_block() 54 int needblocks = (block_size + (block_start & bufmask) + bufmask) in zisofs_uncompress_block() 77 blocknum = block_start >> bufshift; in zisofs_uncompress_block() 138 (block_start & bufmask); in zisofs_uncompress_block() 140 (block_start & bufmask), in zisofs_uncompress_block() 143 block_start = 0; in zisofs_uncompress_block() 210 loff_t block_start, block_end; in zisofs_fill_pages() local 246 block_start = le32_to_cpu(*(__le32 *) in zisofs_fill_pages() 262 if (block_start > block_end) { in zisofs_fill_pages() [all …]
|
/linux-6.12.1/kernel/sched/ |
D | stats.c | 50 u64 sleep_start, block_start; in __update_stats_enqueue_sleeper() local 53 block_start = schedstat_val(stats->block_start); in __update_stats_enqueue_sleeper() 73 if (block_start) { in __update_stats_enqueue_sleeper() 74 u64 delta = rq_clock(rq) - block_start; in __update_stats_enqueue_sleeper() 82 __schedstat_set(stats->block_start, 0); in __update_stats_enqueue_sleeper()
|
/linux-6.12.1/drivers/gpu/drm/ |
D | drm_buddy.c | 183 u64 block_start, block_end; in __force_merge() local 188 block_start = drm_buddy_block_offset(block); in __force_merge() 189 block_end = block_start + drm_buddy_block_size(mm, block) - 1; in __force_merge() 191 if (!contains(start, end, block_start, block_end)) in __force_merge() 495 u64 block_start; in __alloc_range_bias() local 509 block_start = drm_buddy_block_offset(block); in __alloc_range_bias() 510 block_end = block_start + drm_buddy_block_size(mm, block) - 1; in __alloc_range_bias() 512 if (!overlaps(start, end, block_start, block_end)) in __alloc_range_bias() 518 if (block_start < start || block_end > end) { in __alloc_range_bias() 519 u64 adjusted_start = max(block_start, start); in __alloc_range_bias() [all …]
|
/linux-6.12.1/fs/reiserfs/ |
D | file.c | 173 unsigned block_start, block_end; in reiserfs_commit_page() local 195 for (bh = head = page_buffers(page), block_start = 0; in reiserfs_commit_page() 196 bh != head || !block_start; in reiserfs_commit_page() 197 block_start = block_end, bh = bh->b_this_page) { in reiserfs_commit_page() 201 block_end = block_start + blocksize; in reiserfs_commit_page() 202 if (block_end <= from || block_start >= to) { in reiserfs_commit_page()
|
/linux-6.12.1/fs/cramfs/ |
D | inode.c | 827 u32 block_ptr, block_start, block_len; in cramfs_read_folio() local 843 block_start = block_ptr << CRAMFS_BLK_DIRECT_PTR_SHIFT; in cramfs_read_folio() 852 cramfs_read(sb, block_start, 2); in cramfs_read_folio() 853 block_start += 2; in cramfs_read_folio() 863 block_start = OFFSET(inode) + maxblock * 4; in cramfs_read_folio() 865 block_start = *(u32 *) in cramfs_read_folio() 868 if (unlikely(block_start & CRAMFS_BLK_FLAG_DIRECT_PTR)) { in cramfs_read_folio() 870 u32 prev_start = block_start; in cramfs_read_folio() 871 block_start = prev_start & ~CRAMFS_BLK_FLAGS; in cramfs_read_folio() 872 block_start <<= CRAMFS_BLK_DIRECT_PTR_SHIFT; in cramfs_read_folio() [all …]
|
/linux-6.12.1/drivers/fpga/ |
D | microchip-spi.c | 110 u32 block_start, component_size; in mpf_ops_parse_header() local 143 block_start = get_unaligned_le32(buf + block_start_offset); in mpf_ops_parse_header() 147 bitstream_start = block_start; in mpf_ops_parse_header() 148 info->header_size = block_start; in mpf_ops_parse_header() 149 if (block_start > count) in mpf_ops_parse_header() 154 components_size_start = block_start; in mpf_ops_parse_header()
|
/linux-6.12.1/fs/ocfs2/ |
D | aops.c | 418 unsigned block_start, block_end; in walk_page_buffers() local 423 for ( bh = head, block_start = 0; in walk_page_buffers() 424 ret == 0 && (bh != head || !block_start); in walk_page_buffers() 425 block_start = block_end, bh = next) in walk_page_buffers() 428 block_end = block_start + blocksize; in walk_page_buffers() 429 if (block_end <= from || block_start >= to) { in walk_page_buffers() 568 unsigned int block_start) in ocfs2_should_read_blk() argument 570 u64 offset = folio_pos(folio) + block_start; in ocfs2_should_read_blk() 595 unsigned int block_end, block_start; in ocfs2_map_page_blocks() local 602 for (bh = head, block_start = 0; bh != head || !block_start; in ocfs2_map_page_blocks() [all …]
|
D | file.c | 762 unsigned zero_from, zero_to, block_start, block_end; in ocfs2_write_zero_page() local 798 for (block_start = zero_from; block_start < zero_to; in ocfs2_write_zero_page() 799 block_start = block_end) { in ocfs2_write_zero_page() 800 block_end = block_start + i_blocksize(inode); in ocfs2_write_zero_page() 807 ret = __block_write_begin(folio, block_start + 1, 0, in ocfs2_write_zero_page() 816 block_commit_write(&folio->page, block_start + 1, block_start + 1); in ocfs2_write_zero_page()
|
/linux-6.12.1/fs/ |
D | buffer.c | 1982 size_t block_start, block_end; in folio_zero_new_buffers() local 1991 block_start = 0; in folio_zero_new_buffers() 1993 block_end = block_start + bh->b_size; in folio_zero_new_buffers() 1996 if (block_end > from && block_start < to) { in folio_zero_new_buffers() 2000 start = max(from, block_start); in folio_zero_new_buffers() 2012 block_start = block_end; in folio_zero_new_buffers() 2092 size_t block_start, block_end; in __block_write_begin_int() local 2106 for (bh = head, block_start = 0; bh != head || !block_start; in __block_write_begin_int() 2107 block++, block_start=block_end, bh = bh->b_this_page) { in __block_write_begin_int() 2108 block_end = block_start + blocksize; in __block_write_begin_int() [all …]
|
/linux-6.12.1/tools/net/ynl/ |
D | ynl-gen-c.py | 197 ri.cw.block_start(line=f"{kw} (type == {self.enum_name})") 1264 def block_start(self, line=''): member in CodeWriter 1353 self.block_start() 1515 … cw.block_start(line=f'const struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =') 1523 cw.block_start(line=f'const struct ynl_policy_nest {struct.render_name}_nest =') 1535 cw.block_start() 1551 cw.block_start(line=f"static const char * const {map_name}[] =") 1577 cw.block_start(line=f"static const char * const {map_name}[] =") 1597 ri.cw.block_start() 1645 ri.cw.block_start() [all …]
|
/linux-6.12.1/fs/nilfs2/ |
D | page.c | 428 unsigned int block_start, block_end; in nilfs_page_count_clean_buffers() local 432 for (bh = head = page_buffers(page), block_start = 0; in nilfs_page_count_clean_buffers() 433 bh != head || !block_start; in nilfs_page_count_clean_buffers() 434 block_start = block_end, bh = bh->b_this_page) { in nilfs_page_count_clean_buffers() 435 block_end = block_start + bh->b_size; in nilfs_page_count_clean_buffers() 436 if (block_end > from && block_start < to && !buffer_dirty(bh)) in nilfs_page_count_clean_buffers()
|
/linux-6.12.1/lib/zlib_deflate/ |
D | deflate.c | 546 s->block_start = 0L; in lm_init() 775 s->block_start -= (long) wsize; in fill_window() 839 zlib_tr_flush_block(s, (s->block_start >= 0L ? \ 840 (char *)&s->window[(unsigned)s->block_start] : \ 842 (ulg)((long)s->strstart - s->block_start), \ 844 s->block_start = s->strstart; \ 885 s->block_start >= (long)s->w_size, "slide too late"); in deflate_stored() 892 Assert(s->block_start >= 0L, "block gone"); in deflate_stored() 898 max_start = s->block_start + max_block_size; in deflate_stored() 908 if (s->strstart - (uInt)s->block_start >= MAX_DIST(s)) { in deflate_stored()
|
D | defutil.h | 126 long block_start; member
|
/linux-6.12.1/fs/ext4/ |
D | move_extent.c | 173 unsigned int blocksize, block_start, block_end; in mext_page_mkuptodate() local 192 block_start = block_end; in mext_page_mkuptodate() 193 block_end = block_start + blocksize; in mext_page_mkuptodate() 194 if (block_end <= from || block_start >= to) { in mext_page_mkuptodate() 206 folio_zero_range(folio, block_start, blocksize); in mext_page_mkuptodate()
|
D | page-io.c | 433 unsigned block_start; in ext4_bio_write_folio() local 463 block_start = bh_offset(bh); in ext4_bio_write_folio() 464 if (block_start >= len) { in ext4_bio_write_folio()
|
D | inode.c | 971 unsigned block_start, block_end; in ext4_walk_page_buffers() local 976 for (bh = head, block_start = 0; in ext4_walk_page_buffers() 977 ret == 0 && (bh != head || !block_start); in ext4_walk_page_buffers() 978 block_start = block_end, bh = next) { in ext4_walk_page_buffers() 980 block_end = block_start + blocksize; in ext4_walk_page_buffers() 981 if (block_end <= from || block_start >= to) { in ext4_walk_page_buffers() 1022 unsigned block_start, block_end; in ext4_block_write_begin() local 1043 for (bh = head, block_start = 0; bh != head || !block_start; in ext4_block_write_begin() 1044 block++, block_start = block_end, bh = bh->b_this_page) { in ext4_block_write_begin() 1045 block_end = block_start + blocksize; in ext4_block_write_begin() [all …]
|
/linux-6.12.1/arch/arm/mm/ |
D | mmu.c | 1198 phys_addr_t block_start, block_end, memblock_limit = 0; in adjust_lowmem_bounds() local 1216 for_each_mem_range(i, &block_start, &block_end) { in adjust_lowmem_bounds() 1217 if (!IS_ALIGNED(block_start, PMD_SIZE)) { in adjust_lowmem_bounds() 1220 len = round_up(block_start, PMD_SIZE) - block_start; in adjust_lowmem_bounds() 1221 memblock_mark_nomap(block_start, len); in adjust_lowmem_bounds() 1226 for_each_mem_range(i, &block_start, &block_end) { in adjust_lowmem_bounds() 1227 if (block_start < vmalloc_limit) { in adjust_lowmem_bounds() 1253 if (!IS_ALIGNED(block_start, PMD_SIZE)) in adjust_lowmem_bounds() 1254 memblock_limit = block_start; in adjust_lowmem_bounds()
|
/linux-6.12.1/drivers/mtd/parsers/ |
D | afs.c | 227 u32 block_start; in afs_parse_v2_partition() local 281 block_start = imginfo[20]; in afs_parse_v2_partition() 287 block_start, block_end); in afs_parse_v2_partition()
|
/linux-6.12.1/fs/bcachefs/ |
D | fs-io.c | 503 u64 block_start = round_up(offset, block_bytes(c)); in bchfs_fpunch() local 516 if (block_start < block_end) { in bchfs_fpunch() 520 block_start >> 9, block_end >> 9, in bchfs_fpunch() 709 u64 block_start = round_down(offset, block_bytes(c)); in bchfs_fallocate() local 729 block_start = round_up(offset, block_bytes(c)); in bchfs_fallocate() 733 ret = __bchfs_fallocate(inode, mode, block_start >> 9, block_end >> 9); in bchfs_fallocate()
|
/linux-6.12.1/fs/iomap/ |
D | buffered-io.c | 672 static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, in iomap_read_folio_sync() argument 679 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); in iomap_read_folio_sync() 690 loff_t block_start = round_down(pos, block_size); in __iomap_write_begin() local 715 iomap_adjust_read_range(iter->inode, folio, &block_start, in __iomap_write_begin() 716 block_end - block_start, &poff, &plen); in __iomap_write_begin() 725 if (iomap_block_needs_zeroing(iter, block_start)) { in __iomap_write_begin() 735 status = iomap_read_folio_sync(block_start, folio, in __iomap_write_begin() 741 } while ((block_start += plen) < block_end); in __iomap_write_begin()
|
/linux-6.12.1/fs/jbd2/ |
D | journal.c | 1957 unsigned long long phys_block, block_start, block_stop; /* physical */ in __jbd2_journal_erase() local 1975 block_start = ~0ULL; in __jbd2_journal_erase() 1983 if (block_start == ~0ULL) { in __jbd2_journal_erase() 1984 block_start = phys_block; in __jbd2_journal_erase() 1985 block_stop = block_start - 1; in __jbd2_journal_erase() 2010 byte_start = block_start * journal->j_blocksize; in __jbd2_journal_erase() 2012 byte_count = (block_stop - block_start + 1) * in __jbd2_journal_erase() 2032 err, block_start, block_stop); in __jbd2_journal_erase() 2037 block_start = ~0ULL; in __jbd2_journal_erase()
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlxsw/ |
D | core_acl_flex_keys.c | 554 int block_start, int block_end) in mlxsw_afk_clear() argument 558 for (i = block_start; i <= block_end; i++) in mlxsw_afk_clear()
|
D | core_acl_flex_keys.h | 233 int block_start, int block_end);
|
/linux-6.12.1/fs/btrfs/ |
D | direct-io.c | 224 u64 block_start; in btrfs_get_blocks_direct_write() local 249 block_start = extent_map_block_start(em) + (start - em->start); in btrfs_get_blocks_direct_write() 253 bg = btrfs_inc_nocow_writers(fs_info, block_start); in btrfs_get_blocks_direct_write()
|
D | inode.c | 4739 u64 block_start; in btrfs_truncate_block() local 4746 block_start = round_down(from, blocksize); in btrfs_truncate_block() 4747 block_end = block_start + blocksize - 1; in btrfs_truncate_block() 4749 ret = btrfs_check_data_free_space(inode, &data_reserved, block_start, in btrfs_truncate_block() 4752 if (btrfs_check_nocow_lock(inode, block_start, &write_bytes, false) > 0) { in btrfs_truncate_block() 4763 block_start, blocksize); in btrfs_truncate_block() 4770 btrfs_delalloc_release_space(inode, data_reserved, block_start, in btrfs_truncate_block() 4803 lock_extent(io_tree, block_start, block_end, &cached_state); in btrfs_truncate_block() 4805 ordered = btrfs_lookup_ordered_extent(inode, block_start); in btrfs_truncate_block() 4807 unlock_extent(io_tree, block_start, block_end, &cached_state); in btrfs_truncate_block() [all …]
|