/linux-6.12.1/drivers/md/dm-vdo/ |
D | slab-depot.c | 186 read_only = vdo_is_read_only(slab->allocator->depot->vdo); in check_if_slab_drained() 214 static u8 __must_check compute_fullness_hint(struct slab_depot *depot, in compute_fullness_hint() argument 224 hint = free_blocks >> depot->hint_shift; in compute_fullness_hint() 238 (vdo_is_read_only(allocator->depot->vdo) ? in check_summary_drain_complete() 250 int result = (vdo_is_read_only(allocator->depot->vdo) ? in notify_summary_waiters() 283 atomic64_inc(&block->allocator->depot->summary_statistics.blocks_written); in finish_update() 317 struct slab_depot *depot = allocator->depot; in launch_write() local 328 if (vdo_is_read_only(depot->vdo)) { in launch_write() 343 pbn = (depot->summary_origin + in launch_write() 388 .fullness_hint = compute_fullness_hint(allocator->depot, free_blocks), in update_slab_summary_entry() [all …]
|
D | slab-depot.h | 374 struct slab_depot *depot; member 513 int __must_check vdo_adjust_reference_count_for_rebuild(struct slab_depot *depot, 548 void vdo_free_slab_depot(struct slab_depot *depot); 550 struct slab_depot_state_2_0 __must_check vdo_record_slab_depot(const struct slab_depot *depot); 552 int __must_check vdo_allocate_reference_counters(struct slab_depot *depot); 554 struct vdo_slab * __must_check vdo_get_slab(const struct slab_depot *depot, 557 u8 __must_check vdo_get_increment_limit(struct slab_depot *depot, 560 bool __must_check vdo_is_physical_data_block(const struct slab_depot *depot, 563 block_count_t __must_check vdo_get_slab_depot_allocated_blocks(const struct slab_depot *depot); 565 block_count_t __must_check vdo_get_slab_depot_data_blocks(const struct slab_depot *depot); [all …]
|
D | repair.c | 281 vdo_continue_completion(parent, vdo_allocate_reference_counters(vdo->depot)); in finish_repair() 342 vdo_drain_slab_depot(vdo->depot, operation, completion); in drain_slab_depot() 423 struct slab_depot *depot = completion->vdo->depot; in process_slot() local 440 if (!vdo_is_physical_data_block(depot, mapping.pbn)) { in process_slot() 449 result = vdo_adjust_reference_count_for_rebuild(depot, mapping.pbn, in process_slot() 527 if (vdo_is_physical_data_block(repair->completion.vdo->depot, pbn)) in get_pbn_to_fetch() 616 struct slab_depot *depot = completion->vdo->depot; in process_entry() local 619 if ((pbn == VDO_ZERO_BLOCK) || !vdo_is_physical_data_block(depot, pbn)) { in process_entry() 625 result = vdo_adjust_reference_count_for_rebuild(depot, pbn, in process_entry() 644 if (abort_on_error(vdo_allocate_reference_counters(vdo->depot), repair)) in rebuild_reference_counts() [all …]
|
D | recovery-journal.h | 149 struct slab_depot *depot; member 275 struct slab_depot *depot, struct block_map *block_map);
|
D | dm-vdo-target.c | 1360 &vdo->depot); in decode_vdo() 1743 result = vdo_prepare_to_grow_slab_depot(vdo->depot, in prepare_to_grow_physical() 2065 vdo_drain_slab_depot(vdo->depot, vdo_get_admin_state_code(state), in suspend_callback() 2195 vdo_open_recovery_journal(vdo->recovery_journal, vdo->depot, in load_callback() 2216 vdo_load_slab_depot(vdo->depot, in load_callback() 2230 vdo_prepare_slab_depot_to_allocate(vdo->depot, get_load_type(vdo), in load_callback() 2238 vdo_scrub_all_unrecovered_slabs(vdo->depot, completion); in load_callback() 2377 vdo_resume_slab_depot(vdo->depot, completion); in resume_callback() 2615 vdo_update_slab_depot_size(vdo->depot); in grow_physical_callback() 2620 vdo_use_new_slabs(vdo->depot, completion); in grow_physical_callback() [all …]
|
D | vdo.c | 700 vdo_free_slab_depot(vdo_forget(vdo->depot)); in vdo_destroy() 904 vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot); in record_vdo() 1468 return (vdo_get_slab_depot_allocated_blocks(vdo->depot) - in vdo_get_physical_blocks_allocated() 1486 vdo_get_slab_depot_data_blocks(vdo->depot) + in vdo_get_physical_blocks_overhead() 1541 vdo_get_slab_depot_statistics(vdo->depot, stats); in get_vdo_statistics() 1637 vdo_dump_slab_depot(vdo->depot); in vdo_dump_status() 1719 if (!vdo_is_physical_data_block(vdo->depot, pbn)) in vdo_get_physical_zone() 1723 slab = vdo_get_slab(vdo->depot, pbn); in vdo_get_physical_zone()
|
D | Makefile | 34 slab-depot.o \
|
D | recovery-journal.c | 448 vdo_commit_oldest_slab_journal_tail_blocks(journal->depot, in check_slab_journal_commit_threshold() 871 struct slab_depot *depot, struct block_map *block_map) in vdo_open_recovery_journal() argument 873 journal->depot = depot; in vdo_open_recovery_journal()
|
D | dedupe.c | 1277 struct slab_depot *depot) in acquire_provisional_reference() argument 1280 struct vdo_slab *slab = vdo_get_slab(depot, agent->duplicate.pbn); in acquire_provisional_reference() 1312 struct slab_depot *depot = vdo_from_data_vio(agent)->depot; in lock_duplicate_pbn() local 1323 increment_limit = vdo_get_increment_limit(depot, agent->duplicate.pbn); in lock_duplicate_pbn() 1383 if (!acquire_provisional_reference(agent, lock, depot)) in lock_duplicate_pbn()
|
D | vdo.h | 204 struct slab_depot *depot; member
|
D | physical-zone.c | 343 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone()
|
D | block-map.c | 1772 return !vdo_is_physical_data_block(vdo->depot, mapping->pbn); in is_invalid_tree_entry()
|
/linux-6.12.1/drivers/iommu/ |
D | iova.c | 587 struct iova_magazine *depot; member 674 struct iova_magazine *mag = rcache->depot; in iova_depot_pop() 676 rcache->depot = mag->next; in iova_depot_pop() 684 mag->next = rcache->depot; in iova_depot_push() 685 rcache->depot = mag; in iova_depot_push() 835 if (rcache->depot) { in __iova_rcache_get() 888 while (rcache->depot) in free_iova_rcaches() 927 while (rcache->depot) { in free_global_cached_iovas()
|
/linux-6.12.1/drivers/scsi/aic7xxx/ |
D | aic7xxx_reg_print.c_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
|
D | Kconfig.aic79xx | 4 # $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic79xx#4 $
|
D | Kconfig.aic7xxx | 4 # $Id: //depot/linux-aic79xx-2.5.0/drivers/scsi/aic7xxx/Kconfig.aic7xxx#7 $
|
D | aic79xx_reg_print.c_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
|
D | aic7xxx_reg.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
|
D | aic79xx_seq.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
|
D | aic7xxx_seq.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
|
D | aic79xx_reg.h_shipped | 5 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $ 6 * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
|
D | aic7xxx.reg | 42 VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $"
|
D | aic7xxx.seq | 43 VERSION = "$Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $"
|
/linux-6.12.1/lib/ |
D | Kconfig | 709 Stack depot: stack trace storage that avoids duplication 715 Always initialize stack depot during early boot 718 int "Maximum number of frames in trace saved in stack depot"
|
/linux-6.12.1/Documentation/admin-guide/device-mapper/ |
D | vdo-design.rst | 229 Most of the vdo volume belongs to the slab depot. The depot contains a 252 The slab depot maintains an additional small data structure, the "slab 302 slab depot. Each write request causes an entry to be made in the journal.
|