/linux-6.12.1/drivers/md/dm-vdo/ |
D | logical-zone.c | 55 struct logical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local 58 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations); in initialize_zone() 63 zone->next = &zones->zones[zone_number + 1]; in initialize_zone() 65 vdo_initialize_completion(&zone->completion, vdo, in initialize_zone() 67 zone->zones = zones; in initialize_zone() 68 zone->zone_number = zone_number; in initialize_zone() 69 zone->thread_id = vdo->thread_config.logical_threads[zone_number]; in initialize_zone() 70 zone->block_map_zone = &vdo->block_map->zones[zone_number]; in initialize_zone() 71 INIT_LIST_HEAD(&zone->write_vios); in initialize_zone() 72 vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION); in initialize_zone() [all …]
|
D | block-map.c | 60 struct block_map_zone *zone; member 98 struct block_map_zone *zone; member 199 info->vio->completion.callback_thread_id = cache->zone->thread_id; in initialize_info() 249 VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id), in assert_on_cache_thread() 251 function_name, cache->zone->thread_id, thread_id); in assert_on_cache_thread() 257 VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state), in assert_io_allowed() 624 static void check_for_drain_complete(struct block_map_zone *zone) in check_for_drain_complete() argument 626 if (vdo_is_state_draining(&zone->state) && in check_for_drain_complete() 627 (zone->active_lookups == 0) && in check_for_drain_complete() 628 !vdo_waitq_has_waiters(&zone->flush_waiters) && in check_for_drain_complete() [all …]
|
D | physical-zone.c | 329 struct physical_zone *zone = &zones->zones[zone_number]; in initialize_zone() local 331 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations); in initialize_zone() 335 result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool); in initialize_zone() 337 vdo_int_map_free(zone->pbn_operations); in initialize_zone() 341 zone->zone_number = zone_number; in initialize_zone() 342 zone->thread_id = vdo->thread_config.physical_threads[zone_number]; in initialize_zone() 343 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone() 344 zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count]; in initialize_zone() 345 result = vdo_make_default_thread(vdo, zone->thread_id); in initialize_zone() 347 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in initialize_zone() [all …]
|
/linux-6.12.1/drivers/block/null_blk/ |
D | zoned.c | 25 struct nullb_zone *zone) in null_init_zone_lock() argument 28 spin_lock_init(&zone->spinlock); in null_init_zone_lock() 30 mutex_init(&zone->mutex); in null_init_zone_lock() 34 struct nullb_zone *zone) in null_lock_zone() argument 37 spin_lock_irq(&zone->spinlock); in null_lock_zone() 39 mutex_lock(&zone->mutex); in null_lock_zone() 43 struct nullb_zone *zone) in null_unlock_zone() argument 46 spin_unlock_irq(&zone->spinlock); in null_unlock_zone() 48 mutex_unlock(&zone->mutex); in null_unlock_zone() 55 struct nullb_zone *zone; in null_init_zoned_dev() local [all …]
|
/linux-6.12.1/fs/pstore/ |
D | zone.c | 160 static inline int buffer_datalen(struct pstore_zone *zone) in buffer_datalen() argument 162 return atomic_read(&zone->buffer->datalen); in buffer_datalen() 165 static inline int buffer_start(struct pstore_zone *zone) in buffer_start() argument 167 return atomic_read(&zone->buffer->start); in buffer_start() 175 static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf, in psz_zone_read_buffer() argument 178 if (!buf || !zone || !zone->buffer) in psz_zone_read_buffer() 180 if (off > zone->buffer_size) in psz_zone_read_buffer() 182 len = min_t(size_t, len, zone->buffer_size - off); in psz_zone_read_buffer() 183 memcpy(buf, zone->buffer->data + off, len); in psz_zone_read_buffer() 187 static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf, in psz_zone_read_oldbuf() argument [all …]
|
/linux-6.12.1/mm/ |
D | page_alloc.c | 289 static bool cond_accept_memory(struct zone *zone, unsigned int order); 314 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 316 return deferred_grow_zone(zone, order); in _deferred_grow_zone() 324 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument 429 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument 437 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries() 438 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries() 439 sp = zone->spanned_pages; in page_outside_zone_boundaries() 440 ret = !zone_spans_pfn(zone, pfn); in page_outside_zone_boundaries() 441 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries() [all …]
|
D | show_mem.c | 26 static inline void show_node(struct zone *zone) in show_node() argument 29 printk("Node %d ", zone_to_nid(zone)); in show_node() 38 struct zone *zone; in si_mem_available() local 40 for_each_zone(zone) in si_mem_available() 41 wmark_low += low_wmark_pages(zone); in si_mem_available() 104 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local 106 if (is_highmem(zone)) { in si_meminfo_node() 107 managed_highpages += zone_managed_pages(zone); in si_meminfo_node() 108 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node() 190 struct zone *zone; in show_free_areas() local [all …]
|
D | vmstat.c | 38 static void zero_zone_numa_counters(struct zone *zone) in zero_zone_numa_counters() argument 43 atomic_long_set(&zone->vm_numa_event[item], 0); in zero_zone_numa_counters() 45 per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item] in zero_zone_numa_counters() 54 struct zone *zone; in zero_zones_numa_counters() local 56 for_each_populated_zone(zone) in zero_zones_numa_counters() 57 zero_zone_numa_counters(zone); in zero_zones_numa_counters() 169 static void fold_vm_zone_numa_events(struct zone *zone) in fold_vm_zone_numa_events() argument 178 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in fold_vm_zone_numa_events() 184 zone_numa_event_add(zone_numa_events[item], zone, item); in fold_vm_zone_numa_events() 189 struct zone *zone; in fold_vm_numa_events() local [all …]
|
D | compaction.c | 158 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument 160 zone->compact_considered = 0; in defer_compaction() 161 zone->compact_defer_shift++; in defer_compaction() 163 if (order < zone->compact_order_failed) in defer_compaction() 164 zone->compact_order_failed = order; in defer_compaction() 166 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction() 167 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction() 169 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction() 173 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument 175 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred() [all …]
|
D | memory_hotplug.c | 414 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument 425 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 435 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument 450 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn() 459 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument 463 int nid = zone_to_nid(zone); in shrink_zone_span() 465 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() 472 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span() 473 zone_end_pfn(zone)); in shrink_zone_span() 475 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span() [all …]
|
D | mm_init.c | 52 struct zone *zone; in mminit_verify_zonelist() local 63 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist() 64 if (!populated_zone(zone)) in mminit_verify_zonelist() 70 zone->name); in mminit_verify_zonelist() 73 for_each_zone_zonelist(zone, z, zonelist, zoneid) in mminit_verify_zonelist() 74 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); in mminit_verify_zonelist() 566 unsigned long zone, int nid) in __init_single_page() argument 569 set_page_links(page, zone, nid, pfn); in __init_single_page() 578 if (!is_highmem_idx(zone)) in __init_single_page() 720 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local [all …]
|
D | page_isolation.c | 37 struct zone *zone = page_zone(page); in has_unmovable_pages() local 72 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages() 150 struct zone *zone = page_zone(page); in set_migratetype_isolate() local 158 spin_lock_irqsave(&zone->lock, flags); in set_migratetype_isolate() 166 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate() 184 if (!move_freepages_block_isolate(zone, page, MIGRATE_ISOLATE)) { in set_migratetype_isolate() 185 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate() 188 zone->nr_isolate_pageblock++; in set_migratetype_isolate() 189 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate() 193 spin_unlock_irqrestore(&zone->lock, flags); in set_migratetype_isolate() [all …]
|
/linux-6.12.1/include/linux/ |
D | memory_hotplug.h | 11 struct zone; 101 static inline unsigned zone_span_seqbegin(struct zone *zone) in zone_span_seqbegin() argument 103 return read_seqbegin(&zone->span_seqlock); in zone_span_seqbegin() 105 static inline int zone_span_seqretry(struct zone *zone, unsigned iv) in zone_span_seqretry() argument 107 return read_seqretry(&zone->span_seqlock, iv); in zone_span_seqretry() 109 static inline void zone_span_writelock(struct zone *zone) in zone_span_writelock() argument 111 write_seqlock(&zone->span_seqlock); in zone_span_writelock() 113 static inline void zone_span_writeunlock(struct zone *zone) in zone_span_writeunlock() argument 115 write_sequnlock(&zone->span_seqlock); in zone_span_writeunlock() 117 static inline void zone_seqlock_init(struct zone *zone) in zone_seqlock_init() argument [all …]
|
D | mmzone.h | 818 struct zone { struct 1014 static inline unsigned long wmark_pages(const struct zone *z, in wmark_pages() 1020 static inline unsigned long min_wmark_pages(const struct zone *z) in min_wmark_pages() 1025 static inline unsigned long low_wmark_pages(const struct zone *z) in low_wmark_pages() 1030 static inline unsigned long high_wmark_pages(const struct zone *z) in high_wmark_pages() 1035 static inline unsigned long promo_wmark_pages(const struct zone *z) in promo_wmark_pages() 1040 static inline unsigned long zone_managed_pages(struct zone *zone) in zone_managed_pages() argument 1042 return (unsigned long)atomic_long_read(&zone->managed_pages); in zone_managed_pages() 1045 static inline unsigned long zone_cma_pages(struct zone *zone) in zone_cma_pages() argument 1048 return zone->cma_pages; in zone_cma_pages() [all …]
|
D | vmstat.h | 149 static inline void zone_numa_event_add(long x, struct zone *zone, in zone_numa_event_add() argument 152 atomic_long_add(x, &zone->vm_numa_event[item]); in zone_numa_event_add() 156 static inline unsigned long zone_numa_event_state(struct zone *zone, in zone_numa_event_state() argument 159 return atomic_long_read(&zone->vm_numa_event[item]); in zone_numa_event_state() 169 static inline void zone_page_state_add(long x, struct zone *zone, in zone_page_state_add() argument 172 atomic_long_add(x, &zone->vm_stat[item]); in zone_page_state_add() 211 static inline unsigned long zone_page_state(struct zone *zone, in zone_page_state() argument 214 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state() 228 static inline unsigned long zone_page_state_snapshot(struct zone *zone, in zone_page_state_snapshot() argument 231 long x = atomic_long_read(&zone->vm_stat[item]); in zone_page_state_snapshot() [all …]
|
/linux-6.12.1/tools/power/cpupower/lib/ |
D | powercap.c | 124 static int sysfs_powercap_get64_val(struct powercap_zone *zone, in sysfs_powercap_get64_val() argument 132 strcat(file, zone->sys_name); in sysfs_powercap_get64_val() 146 int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_energy_range_uj() argument 148 return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val); in powercap_get_max_energy_range_uj() 151 int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val) in powercap_get_energy_uj() argument 153 return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val); in powercap_get_energy_uj() 156 int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_max_power_range_uw() argument 158 return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val); in powercap_get_max_power_range_uw() 161 int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val) in powercap_get_power_uw() argument 163 return sysfs_powercap_get64_val(zone, GET_POWER_UW, val); in powercap_get_power_uw() [all …]
|
/linux-6.12.1/drivers/md/dm-vdo/indexer/ |
D | index.c | 78 static bool is_zone_chapter_sparse(const struct index_zone *zone, u64 virtual_chapter) in is_zone_chapter_sparse() argument 80 return uds_is_chapter_sparse(zone->index->volume->geometry, in is_zone_chapter_sparse() 81 zone->oldest_virtual_chapter, in is_zone_chapter_sparse() 82 zone->newest_virtual_chapter, virtual_chapter); in is_zone_chapter_sparse() 85 static int launch_zone_message(struct uds_zone_message message, unsigned int zone, in launch_zone_message() argument 97 request->zone_number = zone; in launch_zone_message() 110 unsigned int zone; in enqueue_barrier_messages() local 112 for (zone = 0; zone < index->zone_count; zone++) { in enqueue_barrier_messages() 113 int result = launch_zone_message(message, zone, index); in enqueue_barrier_messages() 127 struct index_zone *zone; in triage_index_request() local [all …]
|
/linux-6.12.1/drivers/md/ |
D | dm-zoned-metadata.c | 137 struct dm_zone *zone; member 221 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_dev_zone_id() argument 223 if (WARN_ON(!zone)) in dmz_dev_zone_id() 226 return zone->id - zone->dev->zone_offset; in dmz_dev_zone_id() 229 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_sect() argument 231 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_sect() 236 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) in dmz_start_block() argument 238 unsigned int zone_id = dmz_dev_zone_id(zmd, zone); in dmz_start_block() 311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL); in dmz_insert() local 313 if (!zone) in dmz_insert() [all …]
|
/linux-6.12.1/include/net/netfilter/ |
D | nf_conntrack_zones.h | 12 return &ct->zone; in nf_ct_zone() 19 nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags) in nf_ct_zone_init() argument 21 zone->id = id; in nf_ct_zone_init() 22 zone->flags = flags; in nf_ct_zone_init() 23 zone->dir = dir; in nf_ct_zone_init() 25 return zone; in nf_ct_zone_init() 36 if (tmpl->zone.flags & NF_CT_FLAG_MARK) in nf_ct_zone_tmpl() 37 return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0); in nf_ct_zone_tmpl() 43 const struct nf_conntrack_zone *zone) in nf_ct_zone_add() argument 46 ct->zone = *zone; in nf_ct_zone_add() [all …]
|
/linux-6.12.1/kernel/power/ |
D | snapshot.c | 406 struct mem_zone_bm_rtree *zone; member 468 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask, in add_rtree_block() argument 475 block_nr = zone->blocks; in add_rtree_block() 485 for (i = zone->levels; i < levels_needed; i++) { in add_rtree_block() 487 &zone->nodes); in add_rtree_block() 491 node->data[0] = (unsigned long)zone->rtree; in add_rtree_block() 492 zone->rtree = node; in add_rtree_block() 493 zone->levels += 1; in add_rtree_block() 497 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves); in add_rtree_block() 502 node = zone->rtree; in add_rtree_block() [all …]
|
/linux-6.12.1/fs/adfs/ |
D | map.c | 159 static int scan_map(struct adfs_sb_info *asb, unsigned int zone, in scan_map() argument 166 dm = asb->s_map + zone; in scan_map() 167 zone = asb->s_map_size; in scan_map() 168 dm_end = asb->s_map + zone; in scan_map() 179 } while (--zone > 0); in scan_map() 202 unsigned int zone; in adfs_map_statfs() local 205 zone = asb->s_map_size; in adfs_map_statfs() 209 } while (--zone > 0); in adfs_map_statfs() 220 unsigned int zone, mapoff; in adfs_map_lookup() local 228 zone = asb->s_map_size >> 1; in adfs_map_lookup() [all …]
|
/linux-6.12.1/virt/kvm/ |
D | coalesced_mmio.c | 36 if (addr < dev->zone.addr) in coalesced_mmio_in_range() 38 if (addr + len > dev->zone.addr + dev->zone.size) in coalesced_mmio_in_range() 74 ring->coalesced_mmio[insert].pio = dev->zone.pio; in coalesced_mmio_write() 123 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_register_coalesced_mmio() argument 128 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_register_coalesced_mmio() 138 dev->zone = *zone; in kvm_vm_ioctl_register_coalesced_mmio() 142 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, in kvm_vm_ioctl_register_coalesced_mmio() 143 zone->addr, zone->size, &dev->dev); in kvm_vm_ioctl_register_coalesced_mmio() 159 struct kvm_coalesced_mmio_zone *zone) in kvm_vm_ioctl_unregister_coalesced_mmio() argument 164 if (zone->pio != 1 && zone->pio != 0) in kvm_vm_ioctl_unregister_coalesced_mmio() [all …]
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx4/ |
D | alloc.c | 250 struct mlx4_zone_entry *zone = kmalloc(sizeof(*zone), GFP_KERNEL); in mlx4_zone_add_one() local 252 if (NULL == zone) in mlx4_zone_add_one() 255 zone->flags = flags; in mlx4_zone_add_one() 256 zone->bitmap = bitmap; in mlx4_zone_add_one() 257 zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0; in mlx4_zone_add_one() 258 zone->priority = priority; in mlx4_zone_add_one() 259 zone->offset = offset; in mlx4_zone_add_one() 263 zone->uid = zone_alloc->last_uid++; in mlx4_zone_add_one() 264 zone->allocator = zone_alloc; in mlx4_zone_add_one() 274 list_add_tail(&zone->prio_list, &it->prio_list); in mlx4_zone_add_one() [all …]
|
/linux-6.12.1/include/trace/events/ |
D | compaction.h | 194 TP_PROTO(struct zone *zone, 198 TP_ARGS(zone, order, ret), 208 __entry->nid = zone_to_nid(zone); 209 __entry->idx = zone_idx(zone); 223 TP_PROTO(struct zone *zone, 227 TP_ARGS(zone, order, ret) 232 TP_PROTO(struct zone *zone, 236 TP_ARGS(zone, order, ret) 241 TP_PROTO(struct zone *zone, int order), 243 TP_ARGS(zone, order), [all …]
|
/linux-6.12.1/drivers/thermal/tegra/ |
D | tegra-bpmp-thermal.c | 33 static int __tegra_bpmp_thermal_get_temp(struct tegra_bpmp_thermal_zone *zone, in __tegra_bpmp_thermal_get_temp() argument 43 req.get_temp.zone = zone->idx; in __tegra_bpmp_thermal_get_temp() 52 err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); in __tegra_bpmp_thermal_get_temp() 67 struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz); in tegra_bpmp_thermal_get_temp() local 69 return __tegra_bpmp_thermal_get_temp(zone, out_temp); in tegra_bpmp_thermal_get_temp() 74 struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz); in tegra_bpmp_thermal_set_trips() local 81 req.set_trip.zone = zone->idx; in tegra_bpmp_thermal_set_trips() 91 err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg); in tegra_bpmp_thermal_set_trips() 102 struct tegra_bpmp_thermal_zone *zone; in tz_device_update_work_fn() local 104 zone = container_of(work, struct tegra_bpmp_thermal_zone, in tz_device_update_work_fn() [all …]
|