/linux-6.12.1/block/ |
D | badblocks.c | 537 sector_t sectors = bad->len; in can_merge_behind() local 542 ((s + sectors) >= BB_OFFSET(p[behind])) && in can_merge_behind() 557 sector_t sectors = bad->len; in behind_merge() local 563 WARN_ON((s + sectors) < BB_OFFSET(p[behind])); in behind_merge() 599 sector_t sectors = bad->len; in front_merge() local 607 merged = min_t(sector_t, sectors, BB_END(p[prev]) - s); in front_merge() 609 merged = min_t(sector_t, sectors, BB_MAX_LEN - BB_LEN(p[prev])); in front_merge() 859 static int _badblocks_set(struct badblocks *bb, sector_t s, int sectors, in _badblocks_set() argument 875 if (sectors == 0) in _badblocks_set() 881 sector_t next = s + sectors; in _badblocks_set() [all …]
|
/linux-6.12.1/fs/bcachefs/ |
D | fs-io.h | 58 u64 sectors; member 67 BUG_ON(res->sectors > inode->ei_quota_reserved); in __bch2_quota_reservation_put() 70 -((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC); in __bch2_quota_reservation_put() 71 inode->ei_quota_reserved -= res->sectors; in __bch2_quota_reservation_put() 72 res->sectors = 0; in __bch2_quota_reservation_put() 79 if (res->sectors) { in bch2_quota_reservation_put() 89 u64 sectors, in bch2_quota_reservation_add() argument 98 ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, in bch2_quota_reservation_add() 101 inode->ei_quota_reserved += sectors; in bch2_quota_reservation_add() 102 res->sectors += sectors; in bch2_quota_reservation_add() [all …]
|
D | buckets.h | 154 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) in ptr_disk_sectors() argument 156 EBUG_ON(sectors < 0); in ptr_disk_sectors() 159 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size, in ptr_disk_sectors() 161 : sectors; in ptr_disk_sectors() 344 if (res->sectors) { in bch2_disk_reservation_put() 345 this_cpu_sub(*c->online_reserved, res->sectors); in bch2_disk_reservation_put() 346 res->sectors = 0; in bch2_disk_reservation_put() 359 u64 sectors, enum bch_reservation_flags flags) in bch2_disk_reservation_add() argument 366 if (sectors > old) in bch2_disk_reservation_add() 367 return __bch2_disk_reservation_add(c, res, sectors, flags); in bch2_disk_reservation_add() [all …]
|
D | buckets.c | 93 usage->d[i].sectors, in bch2_dev_usage_to_text() 347 m->sectors)) { in bch2_check_fix_ptrs() 391 s64 sectors, enum bch_data_type ptr_data_type, in bch2_bucket_ref_update() argument 398 bool inserting = sectors > 0; in bch2_bucket_ref_update() 401 BUG_ON(!sectors); in bch2_bucket_ref_update() 468 if ((u64) *bucket_sectors + sectors > U32_MAX) { in bch2_bucket_ref_update() 475 *bucket_sectors, sectors, in bch2_bucket_ref_update() 480 sectors = -*bucket_sectors; in bch2_bucket_ref_update() 483 *bucket_sectors += sectors; in bch2_bucket_ref_update() 496 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0; in bch2_trans_account_disk_usage_change() [all …]
|
D | fs-io-pagecache.c | 160 unsigned i, sectors = folio_sectors(folio); in __bch2_folio_set() local 162 BUG_ON(pg_offset >= sectors); in __bch2_folio_set() 163 BUG_ON(pg_offset + pg_len > sectors); in __bch2_folio_set() 172 if (i == sectors) in __bch2_folio_set() 367 unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0; in bch2_get_folio_disk_reservation() local 373 for (i = 0; i < sectors; i++) in bch2_get_folio_disk_reservation() 387 for (i = 0; i < sectors; i++) in bch2_get_folio_disk_reservation() 433 if (unlikely(disk_res.sectors != disk_sectors)) { in __bch2_folio_reservation_get() 440 if (disk_sectors > disk_res.sectors) { in __bch2_folio_reservation_get() 467 res->disk.sectors += disk_res.sectors; in __bch2_folio_reservation_get() [all …]
|
D | movinggc.c | 34 size_t sectors; member 69 list->sectors += b.sectors; in move_bucket_in_flight_add() 100 b->sectors = bch2_bucket_sectors_dirty(*a); in bch2_bucket_is_movable() 130 list->sectors -= i->bucket.sectors; in move_buckets_wait() 156 size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0; in bch2_copygc_get_buckets() local 191 sectors += b.sectors; in bch2_copygc_get_buckets() 200 buckets_in_flight->nr, buckets_in_flight->sectors, in bch2_copygc_get_buckets() 201 saw, in_flight, not_movable, buckets->nr, sectors, nr_to_get, ret); in bch2_copygc_get_buckets()
|
D | alloc_foreground.h | 183 struct bkey_i *k, unsigned sectors, in bch2_alloc_sectors_append_ptrs_inlined() argument 189 BUG_ON(sectors > wp->sectors_free); in bch2_alloc_sectors_append_ptrs_inlined() 190 wp->sectors_free -= sectors; in bch2_alloc_sectors_append_ptrs_inlined() 191 wp->sectors_allocated += sectors; in bch2_alloc_sectors_append_ptrs_inlined() 203 BUG_ON(sectors > ob->sectors_free); in bch2_alloc_sectors_append_ptrs_inlined() 204 ob->sectors_free -= sectors; in bch2_alloc_sectors_append_ptrs_inlined()
|
D | fs-io.c | 145 struct quota_res *quota_res, s64 sectors) in __bch2_i_sectors_acct() argument 147 bch2_fs_inconsistent_on((s64) inode->v.i_blocks + sectors < 0, c, in __bch2_i_sectors_acct() 149 inode->v.i_ino, (u64) inode->v.i_blocks, sectors, in __bch2_i_sectors_acct() 151 inode->v.i_blocks += sectors; in __bch2_i_sectors_acct() 156 sectors > 0) { in __bch2_i_sectors_acct() 157 BUG_ON(sectors > quota_res->sectors); in __bch2_i_sectors_acct() 158 BUG_ON(sectors > inode->ei_quota_reserved); in __bch2_i_sectors_acct() 160 quota_res->sectors -= sectors; in __bch2_i_sectors_acct() 161 inode->ei_quota_reserved -= sectors; in __bch2_i_sectors_acct() 163 bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors, KEY_TYPE_QUOTA_WARN); in __bch2_i_sectors_acct() [all …]
|
D | fs-io-buffered.c | 167 unsigned bytes, sectors, offset_into_extent; in bchfs_read() local 189 sectors = k.k->size - offset_into_extent; in bchfs_read() 200 sectors = min(sectors, k.k->size - offset_into_extent); in bchfs_read() 203 ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors, in bchfs_read() 209 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9; in bchfs_read() 572 unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0; in __bch2_writepage() local 582 while (offset + sectors < f_sectors && in __bch2_writepage() 583 w->tmp[offset + sectors].state >= SECTOR_dirty) { in __bch2_writepage() 584 reserved_sectors += w->tmp[offset + sectors].replicas_reserved; in __bch2_writepage() 585 dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty; in __bch2_writepage() [all …]
|
D | io_misc.c | 26 u64 sectors, in bch2_extent_fallocate() argument 51 sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset); in bch2_extent_fallocate() 59 ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0); in bch2_extent_fallocate() 71 bch2_key_resize(&reservation->k, sectors); in bch2_extent_fallocate() 98 sectors = min_t(u64, sectors, wp->sectors_free); in bch2_extent_fallocate() 99 sectors_allocated = sectors; in bch2_extent_fallocate() 101 bch2_key_resize(&e->k, sectors); in bch2_extent_fallocate() 104 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false); in bch2_extent_fallocate()
|
/linux-6.12.1/Documentation/admin-guide/device-mapper/ |
D | dm-ebs.rst | 8 size. Its main purpose is to provide emulation of 512 byte sectors on 18 <dev path> <offset> <emulated sectors> [<underlying sectors>] 27 has to be a multiple of <emulated sectors>. 28 <emulated sectors>: 29 Number of sectors defining the logical block size to be emulated; 30 1, 2, 4, 8 sectors of 512 bytes supported. 34 <underlying sectors>: 35 Number of sectors defining the logical block size of <dev path>. 36 2^N supported, e.g. 8 = emulate 8 sectors of 512 bytes = 4KiB. 43 offset 1024 sectors with underlying devices block size automatically set: [all …]
|
D | dm-integrity.rst | 68 dm-integrity won't read of write these sectors 77 not used and data sectors and integrity tags are written 106 The number of interleaved sectors. This values is rounded down to 115 The number of sectors in one metadata buffer. The value is rounded 178 512-byte sectors that corresponds to one bitmap bit. 197 copy sectors from one journal section to another journal section 201 key and also to disallow the attacker to move sectors from one 219 256 sectors of metadata per data area. With the default buffer_sectors of 226 2. provided data sectors - that is the number of sectors that the user 233 * reserved sectors [all …]
|
/linux-6.12.1/drivers/target/ |
D | target_core_sbc.c | 216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) in sbc_get_size() argument 218 return cmd->se_dev->dev_attrib.block_size * sectors; in sbc_get_size() 279 unsigned int sectors = sbc_get_write_same_sectors(cmd); in sbc_setup_write_same() local 288 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { in sbc_setup_write_same() 290 sectors, cmd->se_dev->dev_attrib.max_write_same_len); in sbc_setup_write_same() 296 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || in sbc_setup_write_same() 297 ((cmd->t_task_lba + sectors) > end_lba)) { in sbc_setup_write_same() 299 (unsigned long long)end_lba, cmd->t_task_lba, sectors); in sbc_setup_write_same() 333 ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); in sbc_setup_write_same() 668 u32 sectors, bool is_write) in sbc_check_prot() argument [all …]
|
/linux-6.12.1/drivers/md/ |
D | raid0.c | 66 sector_t curr_zone_end, sectors; in create_strip_zones() local 83 sectors = rdev1->sectors; in create_strip_zones() 84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones() 85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones() 95 (unsigned long long)rdev1->sectors, in create_strip_zones() 97 (unsigned long long)rdev2->sectors); in create_strip_zones() 103 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones() 194 if (!smallest || (rdev1->sectors < smallest->sectors)) in create_strip_zones() 204 zone->zone_end = smallest->sectors * cnt; in create_strip_zones() 217 zone->dev_start = smallest->sectors; in create_strip_zones() [all …]
|
D | raid1.c | 63 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial() 340 r1_bio->sector + (r1_bio->sectors); in update_head_pos() 424 mddev->bitmap_ops->endwrite(mddev, r1_bio->sector, r1_bio->sectors, in close_write() 456 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request() 515 if (rdev_has_badblock(rdev, r1_bio->sector, r1_bio->sectors) && in raid1_end_write_request() 563 sector_t sectors) in align_to_barrier_unit_end() argument 567 WARN_ON(sectors == 0); in align_to_barrier_unit_end() 575 if (len > sectors) in align_to_barrier_unit_end() 576 len = sectors; in align_to_barrier_unit_end() 596 int len = r1_bio->sectors; in choose_first_rdev() [all …]
|
D | raid10.c | 345 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos() 432 mddev->bitmap_ops->endwrite(mddev, r10_bio->sector, r10_bio->sectors, in close_write() 533 r10_bio->sectors) && in raid10_end_write_request() 730 int sectors = r10_bio->sectors; in read_balance() local 750 if (raid1_should_read_first(conf->mddev, this_sector, sectors)) in read_balance() 765 r10_bio->devs[slot].addr + sectors > in read_balance() 772 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance() 776 if (is_badblock(rdev, dev_sector, sectors, in read_balance() 787 if (!do_balance && sectors > bad_sectors) in read_balance() 788 sectors = bad_sectors; in read_balance() [all …]
|
/linux-6.12.1/Documentation/block/ |
D | stat.rst | 31 read sectors sectors number of sectors read 35 write sectors sectors number of sectors written 42 discard sectors sectors number of sectors discarded 67 read sectors, write sectors, discard_sectors 70 These values count the number of sectors read from, written to, or 71 discarded from this block device. The "sectors" in question are the 72 standard UNIX 512-byte sectors, not any device- or filesystem-specific
|
/linux-6.12.1/include/linux/ |
D | blk-integrity.h | 69 unsigned int sectors) in bio_integrity_intervals() argument 71 return sectors >> (bi->interval_exp - 9); in bio_integrity_intervals() 75 unsigned int sectors) in bio_integrity_bytes() argument 77 return bio_integrity_intervals(bi, sectors) * bi->tuple_size; in bio_integrity_bytes() 132 unsigned int sectors) in bio_integrity_intervals() argument 138 unsigned int sectors) in bio_integrity_bytes() argument
|
D | badblocks.h | 51 int badblocks_check(struct badblocks *bb, sector_t s, int sectors, 53 int badblocks_set(struct badblocks *bb, sector_t s, int sectors, 55 int badblocks_clear(struct badblocks *bb, sector_t s, int sectors);
|
/linux-6.12.1/drivers/scsi/ |
D | scsicam.c | 179 unsigned long heads, sectors, cylinders, temp; in setsize() local 182 sectors = 62L; /* Maximize sectors per track */ in setsize() 184 temp = cylinders * sectors; /* Compute divisor for heads */ in setsize() 189 sectors = capacity / temp; /* Compute value for sectors per in setsize() 192 sectors++; /* Else, increment number of sectors */ in setsize() 193 temp = heads * sectors; /* Compute divisor for cylinders */ in setsize() 201 *secs = (unsigned int) sectors; in setsize()
|
/linux-6.12.1/drivers/mtd/ |
D | ssfdc.c | 22 unsigned char sectors; member 317 ssfdc->sectors = 32; in ssfdcr_add_mtd() 318 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); in ssfdcr_add_mtd() 320 ((long)ssfdc->sectors * (long)ssfdc->heads)); in ssfdcr_add_mtd() 323 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, in ssfdcr_add_mtd() 325 (long)ssfdc->sectors); in ssfdcr_add_mtd() 328 (long)ssfdc->sectors; in ssfdcr_add_mtd() 411 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); in ssfdcr_getgeo() 414 geo->sectors = ssfdc->sectors; in ssfdcr_getgeo()
|
/linux-6.12.1/drivers/usb/storage/ |
D | datafab.c | 60 unsigned long sectors; /* total sector count */ member 138 u32 sectors) in datafab_read_data() argument 153 if (sectors > 0x0FFFFFFF) in datafab_read_data() 162 totallen = sectors * info->ssize; in datafab_read_data() 221 u32 sectors) in datafab_write_data() argument 237 if (sectors > 0x0FFFFFFF) in datafab_write_data() 246 totallen = sectors * info->ssize; in datafab_write_data() 420 info->sectors = ((u32)(reply[117]) << 24) | in datafab_id_device() 582 info->sectors, info->ssize); in datafab_transport() 586 ((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1); in datafab_transport()
|
/linux-6.12.1/Documentation/ABI/testing/ |
D | procfs-diskstats | 15 6 sectors read 19 10 sectors written 32 17 sectors discarded
|
/linux-6.12.1/drivers/md/bcache/ |
D | alloc.c | 86 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() argument 93 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities() 631 unsigned int sectors, in bch_alloc_sectors() argument 679 sectors = min(sectors, b->sectors_free); in bch_alloc_sectors() 681 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors); in bch_alloc_sectors() 682 SET_KEY_SIZE(k, sectors); in bch_alloc_sectors() 693 b->sectors_free -= sectors; in bch_alloc_sectors() 696 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); in bch_alloc_sectors() 698 atomic_long_add(sectors, in bch_alloc_sectors()
|
/linux-6.12.1/fs/hfsplus/ |
D | btree.c | 76 u64 sectors, int file_id) in hfsplus_calc_btree_clump_size() argument 100 if (sectors < 0x200000) { in hfsplus_calc_btree_clump_size() 101 clump_size = sectors << 2; /* 0.8 % */ in hfsplus_calc_btree_clump_size() 106 for (i = 0, sectors = sectors >> 22; in hfsplus_calc_btree_clump_size() 107 sectors && (i < CLUMP_ENTRIES - 1); in hfsplus_calc_btree_clump_size() 108 ++i, sectors = sectors >> 1) { in hfsplus_calc_btree_clump_size()
|