/linux-6.12.1/drivers/block/drbd/ |
D | drbd_interval.c | 16 #define NODE_END(node) ((node)->sector + ((node)->size >> 9)) 28 sector_t this_end = this->sector + (this->size >> 9); in drbd_insert_interval() 39 if (this->sector < here->sector) in drbd_insert_interval() 41 else if (this->sector > here->sector) in drbd_insert_interval() 69 drbd_contains_interval(struct rb_root *root, sector_t sector, in drbd_contains_interval() argument 78 if (sector < here->sector) in drbd_contains_interval() 80 else if (sector > here->sector) in drbd_contains_interval() 118 drbd_find_overlap(struct rb_root *root, sector_t sector, unsigned int size) in drbd_find_overlap() argument 122 sector_t end = sector + (size >> 9); in drbd_find_overlap() 131 sector < interval_end(node->rb_left)) { in drbd_find_overlap() [all …]
|
D | drbd_actlog.c | 127 sector_t sector, enum req_op op) in _drbd_md_sync_page_io() argument 144 bio->bi_iter.bi_sector = sector; in _drbd_md_sync_page_io() 178 sector_t sector, enum req_op op) in drbd_md_sync_page_io() argument 187 (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", in drbd_md_sync_page_io() 190 if (sector < drbd_md_first_sector(bdev) || in drbd_md_sync_page_io() 191 sector + 7 > drbd_md_last_sector(bdev)) in drbd_md_sync_page_io() 194 (unsigned long long)sector, in drbd_md_sync_page_io() 197 err = _drbd_md_sync_page_io(device, bdev, sector, op); in drbd_md_sync_page_io() 200 (unsigned long long)sector, in drbd_md_sync_page_io() 245 unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); in drbd_al_begin_io_fastpath() [all …]
|
D | drbd_interval.h | 10 sector_t sector; /* start sector of the interval */ member 38 #define drbd_for_each_overlap(i, root, sector, size) \ argument 39 for (i = drbd_find_overlap(root, sector, size); \ 41 i = drbd_next_overlap(i, sector, size))
|
D | drbd_worker.c | 127 drbd_set_out_of_sync(peer_device, peer_req->i.sector, peer_req->i.size); in drbd_endio_write_sec_final() 157 drbd_rs_complete_io(device, i.sector); in drbd_endio_write_sec_final() 183 (unsigned long long)peer_req->i.sector); in drbd_peer_request_endio() 356 sector_t sector = peer_req->i.sector; in w_e_send_csum() local 367 err = drbd_send_drequest_csum(peer_device, sector, size, in w_e_send_csum() 387 static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, int size) in read_for_csum() argument 397 peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, in read_for_csum() 590 sector_t sector; in make_resync_request() local 656 sector = BM_BIT_TO_SECT(bit); in make_resync_request() 658 if (drbd_try_rs_begin_io(peer_device, sector)) { in make_resync_request() [all …]
|
D | drbd_receiver.c | 360 drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in drbd_alloc_peer_req() argument 389 peer_req->i.sector = sector; in drbd_alloc_peer_req() 1602 if (drbd_issue_discard_or_zero_out(device, peer_req->i.sector, in drbd_issue_peer_discard_or_zero_out() 1640 sector_t sector = peer_req->i.sector; in drbd_submit_peer_request() local 1694 bio->bi_iter.bi_sector = sector; in drbd_submit_peer_request() 1707 sector += len >> 9; in drbd_submit_peer_request() 1847 read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, in read_in_block() argument 1897 if (sector + (ds>>9) > capacity) { in read_in_block() 1901 (unsigned long long)sector, ds); in read_in_block() 1908 peer_req = drbd_alloc_peer_req(peer_device, id, sector, ds, data_size, GFP_NOIO); in read_in_block() [all …]
|
/linux-6.12.1/block/ |
D | blk-lib.c | 13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) in bio_discard_limit() argument 19 sector += bdev->bd_start_sect; in bio_discard_limit() 22 round_up(sector, discard_granularity >> SECTOR_SHIFT); in bio_discard_limit() 28 if (granularity_aligned_sector != sector) in bio_discard_limit() 29 return granularity_aligned_sector - sector; in bio_discard_limit() 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() argument 41 sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); in blk_alloc_discard_bio() 50 bio->bi_iter.bi_sector = *sector; in blk_alloc_discard_bio() 52 *sector += bio_sects; in blk_alloc_discard_bio() 63 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, in __blkdev_issue_discard() argument [all …]
|
D | blk-ia-ranges.c | 18 return sprintf(buf, "%llu\n", iar->sector); in blk_ia_range_sector_show() 178 sector_t sector) in disk_find_ia_range() argument 185 if (sector >= iar->sector && in disk_find_ia_range() 186 sector < iar->sector + iar->nr_sectors) in disk_find_ia_range() 198 sector_t sector = 0; in disk_check_ia_ranges() local 210 tmp = disk_find_ia_range(iars, sector); in disk_check_ia_ranges() 211 if (!tmp || tmp->sector != sector) { in disk_check_ia_ranges() 218 swap(iar->sector, tmp->sector); in disk_check_ia_ranges() 222 sector += iar->nr_sectors; in disk_check_ia_ranges() 225 if (sector != capacity) { in disk_check_ia_ranges() [all …]
|
D | blk-zoned.c | 137 int blkdev_report_zones(struct block_device *bdev, sector_t sector, in blkdev_report_zones() argument 146 if (!nr_zones || sector >= capacity) in blkdev_report_zones() 149 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); in blkdev_report_zones() 177 sector_t sector, sector_t nr_sectors) in blkdev_zone_mgmt() argument 181 sector_t end_sector = sector + nr_sectors; in blkdev_zone_mgmt() 194 if (end_sector <= sector || end_sector > capacity) in blkdev_zone_mgmt() 199 if (!bdev_is_zone_start(bdev, sector)) in blkdev_zone_mgmt() 209 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) in blkdev_zone_mgmt() 212 while (sector < end_sector) { in blkdev_zone_mgmt() 214 bio->bi_iter.bi_sector = sector; in blkdev_zone_mgmt() [all …]
|
/linux-6.12.1/drivers/block/ |
D | brd.c | 49 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() argument 51 return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT); in brd_lookup_page() 57 static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp) in brd_insert_page() argument 59 pgoff_t idx = sector >> PAGE_SECTORS_SHIFT; in brd_insert_page() 63 page = brd_lookup_page(brd, sector); in brd_insert_page() 105 static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n, in copy_to_brd_setup() argument 108 unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; in copy_to_brd_setup() 113 ret = brd_insert_page(brd, sector, gfp); in copy_to_brd_setup() 117 sector += copy >> SECTOR_SHIFT; in copy_to_brd_setup() 118 ret = brd_insert_page(brd, sector, gfp); in copy_to_brd_setup() [all …]
|
/linux-6.12.1/include/trace/events/ |
D | block.h | 32 __field( sector_t, sector ) 38 __entry->sector = bh->b_blocknr; 44 (unsigned long long)__entry->sector, __entry->size 91 __field( sector_t, sector ) 100 __entry->sector = blk_rq_trace_sector(rq); 111 (unsigned long long)__entry->sector, __entry->nr_sector, 126 __field( sector_t, sector ) 136 __entry->sector = blk_rq_pos(rq); 148 (unsigned long long)__entry->sector, __entry->nr_sector, 198 __field( sector_t, sector ) [all …]
|
D | bcache.h | 18 __field(sector_t, sector ) 28 __entry->sector = bio->bi_iter.bi_sector; 36 __entry->rwbs, (unsigned long long)__entry->sector, 96 __field(sector_t, sector ) 103 __entry->sector = bio->bi_iter.bi_sector; 110 (unsigned long long)__entry->sector, __entry->nr_sector) 129 __field(sector_t, sector ) 138 __entry->sector = bio->bi_iter.bi_sector; 147 __entry->rwbs, (unsigned long long)__entry->sector, 159 __field(sector_t, sector ) [all …]
|
/linux-6.12.1/fs/btrfs/ |
D | raid56.c | 952 struct sector_ptr *sector; in sector_in_rbio() local 964 sector = &rbio->bio_sectors[index]; in sector_in_rbio() 965 if (sector->page || bio_list_only) { in sector_in_rbio() 967 if (!sector->page) in sector_in_rbio() 968 sector = NULL; in sector_in_rbio() 970 return sector; in sector_in_rbio() 1124 struct sector_ptr *sector, in rbio_add_io_sector() argument 1145 ASSERT(sector->page); in rbio_add_io_sector() 1176 ret = bio_add_page(last, sector->page, sectorsize, in rbio_add_io_sector() 1177 sector->pgoff); in rbio_add_io_sector() [all …]
|
/linux-6.12.1/drivers/block/null_blk/ |
D | zoned.c | 56 sector_t sector = 0; in null_init_zoned_dev() local 134 zone->start = sector; in null_init_zoned_dev() 141 sector += dev->zone_size_sects; in null_init_zoned_dev() 148 zone->start = sector; in null_init_zoned_dev() 164 sector += dev->zone_size_sects; in null_init_zoned_dev() 193 int null_report_zones(struct gendisk *disk, sector_t sector, in null_report_zones() argument 203 first_zone = null_zone_no(dev, sector); in null_report_zones() 241 sector_t sector, unsigned int len) in null_zone_valid_read_len() argument 244 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)]; in null_zone_valid_read_len() 249 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len() [all …]
|
D | null_blk.h | 129 blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector, 132 sector_t sector, unsigned int nr_sectors); 138 int null_report_zones(struct gendisk *disk, sector_t sector, 141 sector_t sector, sector_t nr_sectors); 143 sector_t sector, unsigned int len); 159 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd() argument 164 sector_t sector, in null_zone_valid_read_len() argument
|
D | main.c | 866 static void null_free_sector(struct nullb *nullb, sector_t sector, in null_free_sector() argument 875 idx = sector >> PAGE_SECTORS_SHIFT; in null_free_sector() 876 sector_bit = (sector & SECTOR_MASK); in null_free_sector() 939 sector_t sector, bool for_write, bool is_cache) in __null_lookup_page() argument 946 idx = sector >> PAGE_SECTORS_SHIFT; in __null_lookup_page() 947 sector_bit = (sector & SECTOR_MASK); in __null_lookup_page() 960 sector_t sector, bool for_write, bool ignore_cache) in null_lookup_page() argument 965 page = __null_lookup_page(nullb, sector, for_write, true); in null_lookup_page() 968 return __null_lookup_page(nullb, sector, for_write, false); in null_lookup_page() 972 sector_t sector, bool ignore_cache) in null_insert_page() argument [all …]
|
/linux-6.12.1/drivers/scsi/ |
D | sr_vendor.c | 174 unsigned long sector; in sr_cd_check() local 186 sector = 0; /* the multisession sector offset goes here */ in sr_cd_check() 212 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 216 sector = 0; in sr_cd_check() 243 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 271 sector = min * CD_SECS * CD_FRAMES + sec * CD_FRAMES + frame; in sr_cd_check() 272 if (sector) in sr_cd_check() 273 sector -= CD_MSF_OFFSET; in sr_cd_check() 309 sector = buffer[11] + (buffer[10] << 8) + in sr_cd_check() 318 sector = 0; in sr_cd_check() [all …]
|
/linux-6.12.1/fs/fat/ |
D | cache.c | 310 int fat_get_mapped_cluster(struct inode *inode, sector_t sector, in fat_get_mapped_cluster() argument 318 cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits); in fat_get_mapped_cluster() 319 offset = sector & (sbi->sec_per_clus - 1); in fat_get_mapped_cluster() 326 if (*mapped_blocks > last_block - sector) in fat_get_mapped_cluster() 327 *mapped_blocks = last_block - sector; in fat_get_mapped_cluster() 333 static int is_exceed_eof(struct inode *inode, sector_t sector, in is_exceed_eof() argument 341 if (sector >= *last_block) { in is_exceed_eof() 351 if (sector >= *last_block) in is_exceed_eof() 358 int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys, in fat_bmap() argument 367 if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) { in fat_bmap() [all …]
|
/linux-6.12.1/samples/bpf/ |
D | tracex3.bpf.c | 15 sector_t sector; member 32 .sector = ctx->sector in bpf_prog1() 63 .sector = ctx->sector in bpf_prog2()
|
/linux-6.12.1/fs/zonefs/ |
D | trace.h | 30 __field(sector_t, sector) 38 __entry->sector = z->z_sector; 43 blk_op_str(__entry->op), __entry->sector, 54 __field(sector_t, sector) 62 __entry->sector = zonefs_inode_zone(inode)->z_sector; 70 __entry->sector, __entry->size, __entry->wpoffset,
|
/linux-6.12.1/drivers/usb/storage/ |
D | jumpshot.c | 155 u32 sector, in jumpshot_read_data() argument 171 if (sector > 0x0FFFFFFF) in jumpshot_read_data() 193 command[2] = sector & 0xFF; in jumpshot_read_data() 194 command[3] = (sector >> 8) & 0xFF; in jumpshot_read_data() 195 command[4] = (sector >> 16) & 0xFF; in jumpshot_read_data() 197 command[5] = 0xE0 | ((sector >> 24) & 0x0F); in jumpshot_read_data() 217 sector += thistime; in jumpshot_read_data() 232 u32 sector, in jumpshot_write_data() argument 248 if (sector > 0x0FFFFFFF) in jumpshot_write_data() 275 command[2] = sector & 0xFF; in jumpshot_write_data() [all …]
|
/linux-6.12.1/drivers/vdpa/vdpa_sim/ |
D | vdpa_sim_blk.c | 118 u64 sector; in vdpasim_blk_handle_req() local 156 sector = vdpasim64_to_cpu(vdpasim, hdr.sector); in vdpasim_blk_handle_req() 157 offset = sector << SECTOR_SHIFT; in vdpasim_blk_handle_req() 161 sector != 0) { in vdpasim_blk_handle_req() 164 type, sector); in vdpasim_blk_handle_req() 171 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req() 194 if (!vdpasim_blk_check_range(vdpasim, sector, in vdpasim_blk_handle_req() 255 sector = le64_to_cpu(range.sector); in vdpasim_blk_handle_req() 256 offset = sector << SECTOR_SHIFT; in vdpasim_blk_handle_req() 277 if (!vdpasim_blk_check_range(vdpasim, sector, num_sectors, in vdpasim_blk_handle_req()
|
/linux-6.12.1/drivers/mtd/ |
D | rfd_ftl.c | 91 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf); 240 static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_readsect() argument 247 if (sector >= part->sector_count) in rfd_ftl_readsect() 250 addr = part->sector_map[sector]; in rfd_ftl_readsect() 601 static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) in do_writesect() argument 642 part->sector_map[sector] = addr; in do_writesect() 644 entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); in do_writesect() 667 static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) in rfd_ftl_writesect() argument 674 pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); in rfd_ftl_writesect() 681 if (sector >= part->sector_count) { in rfd_ftl_writesect() [all …]
|
/linux-6.12.1/drivers/md/ |
D | dm-log-writes.c | 97 __le64 sector; member 126 sector_t sector; member 214 sector_t sector) in write_metadata() argument 223 bio->bi_iter.bi_sector = sector; in write_metadata() 224 bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ? in write_metadata() 260 sector_t sector) in write_inline_data() argument 276 bio->bi_iter.bi_sector = sector; in write_inline_data() 308 sector += bio_pages * PAGE_SECTORS; in write_inline_data() 319 struct pending_block *block, sector_t sector) in log_one_block() argument 326 entry.sector = cpu_to_le64(block->sector); in log_one_block() [all …]
|
D | raid0.c | 305 sector_t sector = *sectorp; in find_zone() local 308 if (sector < z[i].zone_end) { in find_zone() 310 *sectorp = sector - z[i-1].zone_end; in find_zone() 321 sector_t sector, sector_t *sector_offset) in map_sector() argument 332 sect_in_chunk = sector & (chunk_sects - 1); in map_sector() 333 sector >>= chunksect_bits; in map_sector() 339 sect_in_chunk = sector_div(sector, chunk_sects); in map_sector() 350 + sector_div(sector, zone->nb_dev)]; in map_sector() 553 sector_t sector = bio_sector; in raid0_map_submit_bio() local 557 zone = find_zone(mddev->private, §or); in raid0_map_submit_bio() [all …]
|
D | raid1.c | 62 sector_t lo = r1_bio->sector; in check_and_add_serial() 84 int idx = sector_to_idx(r1_bio->sector); in wait_for_serialization() 262 sector_t sect = r1_bio->sector; in put_buf() 283 idx = sector_to_idx(r1_bio->sector); in reschedule_retry() 312 sector_t sector = r1_bio->sector; in raid_end_bio_io() local 329 allow_barrier(conf, sector); in raid_end_bio_io() 340 r1_bio->sector + (r1_bio->sectors); in update_head_pos() 405 (unsigned long long)r1_bio->sector); in raid1_end_read_request() 424 mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors, in close_write() 455 sector_t lo = r1_bio->sector; in raid1_end_write_request() [all …]
|