/linux-6.12.1/drivers/md/ |
D | dm-ebs-target.c | 160 if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs)) in __ebs_discard_bio() 199 block2 = __sector_to_block(ec, bio_end_sector(bio)); in __ebs_process_bios() 202 if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1) in __ebs_process_bios() 376 __block_mod(bio_end_sector(bio), ec->u_bs) || in ebs_map()
|
D | raid0.c | 465 if (bio_end_sector(bio) > zone->zone_end) { in raid0_handle_discard() 474 end = bio_end_sector(bio); in raid0_handle_discard()
|
D | raid5.h | 720 if (bio_end_sector(bio) < sector + RAID5_STRIPE_SECTORS(conf)) in r5_next_bio()
|
D | dm-stripe.c | 257 stripe_map_range_sector(sc, bio_end_sector(bio), in stripe_map_range()
|
D | raid1.c | 319 (unsigned long long) bio_end_sector(bio) - 1); in raid_end_bio_io() 543 (unsigned long long) bio_end_sector(mbio) - 1); in raid1_end_write_request() 1430 bio->bi_iter.bi_sector, bio_end_sector(bio))) { in raid1_write_request() 1442 bio_end_sector(bio))) in raid1_write_request()
|
D | dm-writecache.c | 1308 bio_end_sector(bio)); in writecache_flush_thread() 1533 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio)); in writecache_map_discard() 1805 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) in wc_add_block()
|
D | raid10.c | 1357 bio_end_sector(bio)))) { in raid10_write_request() 1368 bio->bi_iter.bi_sector, bio_end_sector(bio))) in raid10_write_request() 1627 bio_end = bio_end_sector(bio); in raid10_handle_discard() 1666 bio_end = bio_end_sector(bio); in raid10_handle_discard()
|
D | raid5.c | 3463 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) in stripe_bio_overlaps() 3468 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) in stripe_bio_overlaps() 3539 if (bio_end_sector(bi) >= sector) in __add_stripe_bio() 3540 sector = bio_end_sector(bi); in __add_stripe_bio() 5735 last_sector = bio_end_sector(bi); in make_discard_request() 6119 ctx.last_sector = bio_end_sector(bi); in raid5_make_request() 6604 last_sector = bio_end_sector(raid_bio); in retry_aligned_read()
|
D | raid5-ppl.c | 498 bio->bi_iter.bi_sector = bio_end_sector(prev); in ppl_submit_iounit()
|
D | dm-clone-target.c | 294 end = bio_end_sector(bio) >> clone->region_shift; in bio_region_range()
|
D | dm-verity-target.c | 770 if (bio_end_sector(bio) >> in verity_map()
|
D | dm-cache-target.c | 1012 sector_t se = bio_end_sector(bio); in calc_discard_block_range()
|
D | dm.c | 528 sector = bio_end_sector(bio) - io->sector_offset; in dm_io_acct()
|
D | dm-raid.c | 3350 if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors)) in raid_map()
|
/linux-6.12.1/fs/xfs/ |
D | xfs_bio_io.c | 43 bio->bi_iter.bi_sector = bio_end_sector(prev); in xfs_rw_bdev()
|
/linux-6.12.1/fs/bcachefs/ |
D | fs-io-buffered.c | 108 pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT; in readpage_bio_extend() 136 BUG_ON(folio_sector(folio) != bio_end_sector(bio)); in readpage_bio_extend() 595 bio_end_sector(&w->io->op.wbio.bio) != sector)) in __bch2_writepage() 609 WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) > in __bch2_writepage() 613 bio_end_sector(&w->io->op.wbio.bio) << 9, in __bch2_writepage()
|
/linux-6.12.1/drivers/md/bcache/ |
D | request.c | 443 i->last = bio_end_sector(bio); in check_should_bypass() 572 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); in cache_lookup_fn() 986 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); in cached_dev_write() 1321 &KEY(d->id, bio_end_sector(bio), 0)); in flash_dev_submit_bio()
|
/linux-6.12.1/fs/gfs2/ |
D | lops.c | 303 nblk = bio_end_sector(bio); in gfs2_log_get_bio() 487 new->bi_iter.bi_sector = bio_end_sector(prev); in gfs2_chain_bio() 543 if (bio_end_sector(bio) == sector) { in gfs2_find_jhead()
|
/linux-6.12.1/include/linux/ |
D | bio.h | 43 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) macro
|
D | blkdev.h | 896 disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); in bio_straddles_zones()
|
/linux-6.12.1/drivers/block/ |
D | pktcdvd.c | 923 pd->iosched.last_write = bio_end_sector(bio); in pkt_iosched_process_queue() 2439 bio->bi_iter.bi_sector, bio_end_sector(bio)); in pkt_submit_bio() 2461 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); in pkt_submit_bio()
|
/linux-6.12.1/fs/iomap/ |
D | buffered-io.c | 405 bio_end_sector(ctx->bio) != sector || in iomap_readpage_iter() 1742 bio_end_sector(&wpc->ioend->io_bio)) in iomap_can_add_to_ioend()
|
/linux-6.12.1/block/ |
D | mq-deadline.c | 628 sector_t sector = bio_end_sector(bio); in dd_request_merge()
|
D | blk-iocost.c | 2624 iocg->cursor = bio_end_sector(bio); in ioc_rqos_throttle() 2741 sector_t bio_end = bio_end_sector(bio); in ioc_rqos_merge()
|
D | bfq-iosched.c | 1802 end = bio_end_sector(bio) - 1; in bfq_actuator_index() 2367 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); in bfq_find_rq_fmerge()
|