Home
last modified time | relevance | path

Searched refs:nr_zones (Results 1 – 25 of 51) sorted by relevance

123

/linux-6.12.1/drivers/scsi/
Dsd_zbc.c169 unsigned int nr_zones, size_t *buflen) in sd_zbc_alloc_report_buffer() argument
184 nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); in sd_zbc_alloc_report_buffer()
185 bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); in sd_zbc_alloc_report_buffer()
223 unsigned int nr_zones, report_zones_cb cb, void *data) in sd_zbc_report_zones() argument
242 buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen); in sd_zbc_report_zones()
246 while (zone_idx < nr_zones && lba < sdkp->capacity) { in sd_zbc_report_zones()
252 nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64); in sd_zbc_report_zones()
256 for (i = 0; i < nr && zone_idx < nr_zones; i++) { in sd_zbc_report_zones()
529 sdkp->zone_info.nr_zones - 1, in sd_zbc_print_zones()
534 sdkp->zone_info.nr_zones, in sd_zbc_print_zones()
[all …]
Dsd.h80 u32 nr_zones; member
243 unsigned int nr_zones, report_zones_cb cb, void *data);
/linux-6.12.1/drivers/md/
Ddm-zone.c20 sector_t sector, unsigned int nr_zones, in dm_blk_do_report_zones() argument
40 nr_zones - args.zone_idx); in dm_blk_do_report_zones()
43 } while (args.zone_idx < nr_zones && in dm_blk_do_report_zones()
55 unsigned int nr_zones, report_zones_cb cb, void *data) in dm_blk_report_zones() argument
74 ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb, data); in dm_blk_report_zones()
117 struct dm_report_zones_args *args, unsigned int nr_zones) in dm_report_zones() argument
125 return blkdev_report_zones(bdev, sector, nr_zones, in dm_report_zones()
162 if (!disk->nr_zones || disk->nr_zones != md->nr_zones) { in dm_revalidate_zones()
166 md->nr_zones = 0; in dm_revalidate_zones()
169 if (md->nr_zones) in dm_revalidate_zones()
[all …]
Ddm-zoned-reclaim.c450 unsigned int nr_unmap, nr_zones; in dmz_reclaim_percentage() local
453 nr_zones = nr_cache; in dmz_reclaim_percentage()
456 nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx); in dmz_reclaim_percentage()
461 return nr_unmap * 100 / nr_zones; in dmz_reclaim_percentage()
Ddm.h110 unsigned int nr_zones, report_zones_cb cb, void *data);
113 sector_t sector, unsigned int nr_zones,
Ddm-target.c220 struct dm_report_zones_args *args, unsigned int nr_zones) in io_err_report_zones() argument
233 args, nr_zones); in io_err_report_zones()
Ddm-linear.c139 struct dm_report_zones_args *args, unsigned int nr_zones) in linear_report_zones() argument
145 args, nr_zones); in linear_report_zones()
Ddm-zoned-metadata.c164 unsigned int nr_zones; member
265 return zmd->nr_zones; in dmz_nr_zones()
1429 for (idx = 0; idx < dev->nr_zones; idx++) { in dmz_emulate_zones()
1456 for (idx = 0; idx < zmd->nr_zones; idx++) { in dmz_drop_zones()
1486 zmd->nr_zones = 0; in dmz_init_zones()
1491 zmd->nr_zones += dev->nr_zones; in dmz_init_zones()
1502 if (!zmd->nr_zones) { in dmz_init_zones()
1509 zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones); in dmz_init_zones()
1717 if (dzone_id >= zmd->nr_zones) { in dmz_load_mapping()
1745 if (bzone_id >= zmd->nr_zones) { in dmz_load_mapping()
[all …]
Ddm-zoned-target.c795 zoned_dev->nr_zones = bdev_nr_zones(bdev); in dmz_fixup_devices()
806 zoned_dev->nr_zones = bdev_nr_zones(bdev); in dmz_fixup_devices()
813 reg_dev->nr_zones = in dmz_fixup_devices()
817 zone_offset = reg_dev->nr_zones; in dmz_fixup_devices()
820 zone_offset += dmz->dev[i].nr_zones; in dmz_fixup_devices()
/linux-6.12.1/drivers/block/null_blk/
Dtrace.h63 TP_PROTO(struct nullb *nullb, unsigned int nr_zones),
64 TP_ARGS(nullb, nr_zones),
67 __field(unsigned int, nr_zones)
70 __entry->nr_zones = nr_zones;
74 __print_disk_name(__entry->disk), __entry->nr_zones)
Dzoned.c91 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects) in null_init_zoned_dev()
94 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone), in null_init_zoned_dev()
101 if (dev->zone_nr_conv >= dev->nr_zones) { in null_init_zoned_dev()
102 dev->zone_nr_conv = dev->nr_zones - 1; in null_init_zoned_dev()
113 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) { in null_init_zoned_dev()
123 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) { in null_init_zoned_dev()
144 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { in null_init_zoned_dev()
194 unsigned int nr_zones, report_zones_cb cb, void *data) in null_report_zones() argument
204 if (first_zone >= dev->nr_zones) in null_report_zones()
207 nr_zones = min(nr_zones, dev->nr_zones - first_zone); in null_report_zones()
[all …]
Dnull_blk.h67 unsigned int nr_zones; member
139 unsigned int nr_zones, report_zones_cb cb, void *data);
/linux-6.12.1/drivers/nvme/host/
Dzns.c120 unsigned int nr_zones, size_t *buflen) in nvme_zns_alloc_report_buffer() argument
129 nr_zones = min_t(unsigned int, nr_zones, in nvme_zns_alloc_report_buffer()
133 nr_zones * sizeof(struct nvme_zone_descriptor); in nvme_zns_alloc_report_buffer()
177 unsigned int nr_zones, report_zones_cb cb, void *data) in nvme_ns_report_zones() argument
188 report = nvme_zns_alloc_report_buffer(ns, nr_zones, &buflen); in nvme_ns_report_zones()
200 while (zone_idx < nr_zones && sector < get_capacity(ns->disk)) { in nvme_ns_report_zones()
211 nz = min((unsigned int)le64_to_cpu(report->nr_zones), nr_zones); in nvme_ns_report_zones()
215 for (i = 0; i < nz && zone_idx < nr_zones; i++) { in nvme_ns_report_zones()
Dmultipath.c510 unsigned int nr_zones, report_zones_cb cb, void *data) in nvme_ns_head_report_zones() argument
519 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); in nvme_ns_head_report_zones()
992 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()
/linux-6.12.1/fs/btrfs/
Dzoned.c195 struct blk_zone *zones, unsigned int nr_zones) in emulate_report_zones() argument
202 for (i = 0; i < nr_zones; i++) { in emulate_report_zones()
220 struct blk_zone *zones, unsigned int *nr_zones) in btrfs_get_dev_zones() argument
225 if (!*nr_zones) in btrfs_get_dev_zones()
229 ret = emulate_report_zones(device, pos, zones, *nr_zones); in btrfs_get_dev_zones()
230 *nr_zones = ret; in btrfs_get_dev_zones()
245 *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno); in btrfs_get_dev_zones()
247 for (i = 0; i < *nr_zones; i++) { in btrfs_get_dev_zones()
255 if (i == *nr_zones) { in btrfs_get_dev_zones()
258 sizeof(*zinfo->zone_cache) * *nr_zones); in btrfs_get_dev_zones()
[all …]
/linux-6.12.1/drivers/nvme/target/
Dzns.c206 u64 nr_zones; member
227 if (rz->nr_zones < rz->out_nr_zones) { in nvmet_bdev_report_zone_cb()
246 rz->nr_zones++; in nvmet_bdev_report_zone_cb()
273 __le64 nr_zones; in nvmet_bdev_zone_zmgmt_recv_work() local
281 .nr_zones = 0, in nvmet_bdev_zone_zmgmt_recv_work()
306 rz_data.nr_zones = min(rz_data.nr_zones, rz_data.out_nr_zones); in nvmet_bdev_zone_zmgmt_recv_work()
308 nr_zones = cpu_to_le64(rz_data.nr_zones); in nvmet_bdev_zone_zmgmt_recv_work()
309 status = nvmet_copy_to_sgl(req, 0, &nr_zones, sizeof(nr_zones)); in nvmet_bdev_zone_zmgmt_recv_work()
399 unsigned int nr_zones = bdev_nr_zones(bdev); in nvmet_bdev_zone_mgmt_emulate_all() local
407 d.zbitmap = kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(*(d.zbitmap)), in nvmet_bdev_zone_mgmt_emulate_all()
[all …]
/linux-6.12.1/block/
Dblk-zoned.c138 unsigned int nr_zones, report_zones_cb cb, void *data) in blkdev_report_zones() argument
146 if (!nr_zones || sector >= capacity) in blkdev_report_zones()
149 return disk->fops->report_zones(disk, sector, nr_zones, cb, data); in blkdev_report_zones()
263 if (!rep.nr_zones) in blkdev_report_zones_ioctl()
267 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, in blkdev_report_zones_ioctl()
272 rep.nr_zones = ret; in blkdev_report_zones_ioctl()
366 if (zno < disk->nr_zones - 1) in disk_zone_is_full()
1485 disk->nr_zones = 0; in disk_free_zone_resources()
1502 unsigned int nr_zones) in disk_revalidate_zone_resources() argument
1516 pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones); in disk_revalidate_zone_resources()
[all …]
/linux-6.12.1/drivers/block/
Dublk_drv.c217 __u32 nr_zones; member
257 int nr_zones; in ublk_dev_param_zoned_validate() local
265 nr_zones = ublk_get_nr_zones(ub); in ublk_dev_param_zoned_validate()
267 if (p->max_active_zones > nr_zones) in ublk_dev_param_zoned_validate()
270 if (p->max_open_zones > nr_zones) in ublk_dev_param_zoned_validate()
278 ub->ub_disk->nr_zones = ublk_get_nr_zones(ub); in ublk_dev_param_zoned_apply()
283 unsigned int nr_zones, size_t *buflen) in ublk_alloc_report_buffer() argument
289 nr_zones = min_t(unsigned int, nr_zones, in ublk_alloc_report_buffer()
290 ublk->ub_disk->nr_zones); in ublk_alloc_report_buffer()
292 bufsize = nr_zones * sizeof(struct blk_zone); in ublk_alloc_report_buffer()
[all …]
Dvirtio_blk.c534 unsigned int nr_zones, in virtblk_alloc_report_buffer() argument
541 nr_zones = min_t(unsigned int, nr_zones, in virtblk_alloc_report_buffer()
545 nr_zones * sizeof(struct virtio_blk_zone_descriptor); in virtblk_alloc_report_buffer()
663 unsigned int nr_zones, report_zones_cb cb, in virtblk_report_zones() argument
676 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen); in virtblk_report_zones()
687 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { in virtblk_report_zones()
695 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones), in virtblk_report_zones()
696 nr_zones); in virtblk_report_zones()
700 for (i = 0; i < nz && zone_idx < nr_zones; i++) { in virtblk_report_zones()
/linux-6.12.1/include/uapi/linux/
Dblkzoned.h131 __u32 nr_zones; member
Dvirtio_blk.h257 __virtio64 nr_zones; member
/linux-6.12.1/Documentation/block/
Dnull_blk.rst151 zone_nr_conv >= nr_zones, it will be reduced to nr_zones - 1.
/linux-6.12.1/include/linux/
Dpowercap.h67 int nr_zones; member
Ddevice-mapper.h101 unsigned int nr_zones);
541 struct dm_report_zones_args *args, unsigned int nr_zones);
/linux-6.12.1/drivers/powercap/
Dpowercap_sys.c566 control_type->nr_zones++; in powercap_register_zone()
595 control_type->nr_zones--; in powercap_unregister_zone()
648 if (control_type->nr_zones) { in powercap_unregister_control_type()

123