/linux-6.12.1/drivers/md/bcache/ |
D | request.c | 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate() 116 while (bio_sectors(bio)) { in bch_data_invalidate() 117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate() 195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) in CLOSURE_CALLBACK() 222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), in CLOSURE_CALLBACK() 412 bio_sectors(bio) & (c->cache->sb.block_size - 1)) { in check_should_bypass() 468 bch_rescale_priorities(c, bio_sectors(bio)); in check_should_bypass() 471 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass() 539 unsigned int bio_sectors = bio_sectors(bio); in cache_lookup_fn() local 550 BUG_ON(bio_sectors <= sectors); in cache_lookup_fn() [all …]
|
D | writeback.h | 117 bio_sectors(bio))) in should_writeback()
|
/linux-6.12.1/block/ |
D | blk-core.c | 505 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro() 538 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod() 561 if (bio_sectors(bio)) { in blk_partition_remap() 577 int nr_sectors = bio_sectors(bio); in blk_check_zone_append() 787 if (!bio_sectors(bio)) { in submit_bio_noacct() 890 count_vm_events(PGPGIN, bio_sectors(bio)); in submit_bio() 892 count_vm_events(PGPGOUT, bio_sectors(bio)); in submit_bio() 1053 bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time); in bio_end_io_acct_remapped()
|
D | blk-merge.c | 150 if (bio_sectors(bio) <= max_discard_sectors) in bio_split_discard() 175 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors) in bio_split_write_zeroes() 661 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn() 680 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn() 696 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge() 1001 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge() 1088 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
|
D | blk-zoned.c | 603 wp_offset += bio_sectors(bio); in disk_zone_wplug_abort_unaligned() 821 zwplug->wp_offset += bio_sectors(bio); in blk_zone_write_plug_bio_merged() 882 zwplug->wp_offset += bio_sectors(bio); in blk_zone_write_plug_init_request() 884 req_back_sector += bio_sectors(bio); in blk_zone_write_plug_init_request() 933 zwplug->wp_offset += bio_sectors(bio); in blk_zone_wplug_prepare_bio() 1050 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in blk_zone_plug_bio()
|
D | bio-integrity.c | 394 if (!bio_sectors(bio)) in bio_integrity_prep() 423 len = bio_integrity_bytes(bi, bio_sectors(bio)); in bio_integrity_prep() 539 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); in bio_integrity_trim()
|
D | bounce.c | 229 if (sectors < bio_sectors(bio_orig)) { in __blk_queue_bounce()
|
D | blk-cgroup.h | 371 bio_issue_init(&bio->bi_issue, bio_sectors(bio)); in blkcg_bio_issue_init()
|
/linux-6.12.1/fs/bcachefs/ |
D | io_write.c | 441 bio_sectors(&n->bio)); in bch2_submit_wbio_replicas() 828 BUG_ON(bio_sectors(bio) != op->crc.compressed_size); in bch2_write_prep_encoded_data() 905 BUG_ON(!bio_sectors(src)); in bch2_write_extent() 1014 bio_sectors(src) - (src_len >> 9), in bch2_write_extent() 1029 bio_sectors(src) - (src_len >> 9), in bch2_write_extent() 1294 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top); in bch2_nocow_write() 1312 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) { in bch2_nocow_write() 1321 op->pos.offset += bio_sectors(bio); in bch2_nocow_write() 1322 op->written += bio_sectors(bio); in bch2_nocow_write() 1539 sectors = bio_sectors(bio); in bch2_write_data_inline() [all …]
|
D | checksum.c | 418 { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } }, in bch2_rechecksum_bio() 424 BUG_ON(len_a + len_b > bio_sectors(bio)); in bch2_rechecksum_bio() 425 BUG_ON(crc_old.uncompressed_size != bio_sectors(bio)); in bch2_rechecksum_bio()
|
D | fs-io-direct.c | 286 dio->op.pos.offset, bio_sectors(bio), in bch2_dio_write_check_allocated() 513 bio_sectors(bio), true); in bch2_dio_write_loop() 517 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio), in bch2_dio_write_loop()
|
D | io_read.c | 1011 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size); in __bch2_read_extent() 1048 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio)); in __bch2_read_extent() 1049 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); in __bch2_read_extent() 1075 bio_sectors(&rbio->bio)); in __bch2_read_extent()
|
D | data_update.c | 457 while (bio_sectors(bio)) { in bch2_update_unwritten_extent() 458 unsigned sectors = bio_sectors(bio); in bch2_update_unwritten_extent()
|
/linux-6.12.1/fs/btrfs/ |
D | raid56.h | 119 struct sector_ptr *bio_sectors; member
|
D | raid56.c | 159 kfree(rbio->bio_sectors); in free_raid_bio_pointers() 256 if (!rbio->bio_sectors[i].page) { in cache_rbio_pages() 270 rbio->bio_sectors[i].page, in cache_rbio_pages() 271 rbio->bio_sectors[i].pgoff, in cache_rbio_pages() 964 sector = &rbio->bio_sectors[index]; in sector_in_rbio() 1012 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr), in alloc_rbio() 1019 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors || in alloc_rbio() 1209 struct sector_ptr *sector = &rbio->bio_sectors[index]; in index_one_bio() 1532 sector = &rbio->bio_sectors[i]; in get_bio_sector_nr()
|
/linux-6.12.1/include/trace/events/ |
D | block.h | 324 __entry->nr_sector = bio_sectors(bio); 352 __entry->nr_sector = bio_sectors(bio); 548 __entry->nr_sector = bio_sectors(bio);
|
/linux-6.12.1/drivers/md/ |
D | dm.c | 506 return bio_sectors(bio); in dm_io_sectors() 1341 unsigned int bio_sectors = bio_sectors(bio); in dm_accept_partial_bio() local 1346 BUG_ON(bio_sectors > *tio->len_ptr); in dm_accept_partial_bio() 1347 BUG_ON(n_sectors > bio_sectors); in dm_accept_partial_bio() 1349 *tio->len_ptr -= bio_sectors - n_sectors; in dm_accept_partial_bio() 1358 io->sector_offset = bio_sectors(io->orig_bio); in dm_accept_partial_bio() 1476 io->sector_offset = bio_sectors(ci->bio); in setup_split_accounting() 1778 ci->sector_count = bio_sectors(bio); in init_clone_info()
|
D | dm-log-writes.c | 670 if (!bio_sectors(bio) && !flush_bio) in log_writes_map() 704 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map() 716 if (flush_bio && !bio_sectors(bio)) { in log_writes_map()
|
D | dm-zoned.h | 46 #define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
|
D | dm-zone.c | 140 return !op_is_flush(bio->bi_opf) && bio_sectors(bio); in dm_is_zone_write()
|
D | dm-integrity.c | 1612 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight() 1727 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT); in integrity_recheck() 1914 if (unlikely(logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_check_limits() 1916 logical_sector, bio_sectors(bio), in dm_integrity_check_limits() 1920 if (unlikely((logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) { in dm_integrity_check_limits() 1923 logical_sector, bio_sectors(bio)); in dm_integrity_check_limits() 1966 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; in dm_integrity_map() 1996 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map() 2208 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue() 2430 dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); in dm_integrity_map_inline() [all …]
|
D | dm-crypt.c | 1181 if (!bio_sectors(bio) || !io->cc->tuple_size) in dm_crypt_integrity_io_alloc() 1188 tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift); in dm_crypt_integrity_io_alloc() 2150 sector += bio_sectors(clone); in kcryptd_crypt_write_convert() 3518 if (bio_sectors(bio)) in crypt_map() 3528 if (unlikely(bio_sectors(bio) > max_sectors)) in crypt_map() 3545 unsigned int tag_len = cc->tuple_size * (bio_sectors(bio) >> cc->sector_shift); in crypt_map() 3553 if (bio_sectors(bio) > cc->tag_pool_max_sectors) in crypt_map()
|
D | raid10.c | 1206 if (max_sectors < bio_sectors(bio)) { in raid10_read_request() 1482 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request() 1637 if (bio_sectors(bio) < stripe_size*2) in raid10_handle_discard() 1655 split_size = bio_sectors(bio) - remainder; in raid10_handle_discard() 1835 int sectors = bio_sectors(bio); in raid10_make_request() 2397 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write() 2420 bio_sectors(tbio)); in sync_request_write() 2554 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write() 2560 bio_sectors(wbio2)); in recovery_request_write()
|
D | dm-ebs-target.c | 50 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks()
|
/linux-6.12.1/include/linux/ |
D | bio.h | 42 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) macro 343 if (sectors >= bio_sectors(bio)) in bio_next_split()
|