Lines Matching +full:lock +full:- +full:- +full:- +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
6 #include "physical-zone.h"
11 #include "memory-alloc.h"
14 #include "block-map.h"
17 #include "data-vio.h"
21 #include "int-map.h"
22 #include "slab-depot.h"
23 #include "status-codes.h"
26 /* Each user data_vio needs a PBN read lock and write lock. */
54 static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type type) in has_lock_type() argument
56 return (lock->implementation == &LOCK_IMPLEMENTATIONS[type]); in has_lock_type()
60 * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock.
61 * @lock: The lock to check.
63 * Return: true if the lock is a read lock.
65 bool vdo_is_pbn_read_lock(const struct pbn_lock *lock) in vdo_is_pbn_read_lock() argument
67 return has_lock_type(lock, VIO_READ_LOCK); in vdo_is_pbn_read_lock()
70 static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type type) in set_pbn_lock_type() argument
72 lock->implementation = &LOCK_IMPLEMENTATIONS[type]; in set_pbn_lock_type()
76 * vdo_downgrade_pbn_write_lock() - Downgrade a PBN write lock to a PBN read lock.
77 * @lock: The PBN write lock to downgrade.
79 * The lock holder count is cleared and the caller is responsible for setting the new count.
81 void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write) in vdo_downgrade_pbn_write_lock() argument
83 VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock), in vdo_downgrade_pbn_write_lock()
84 "PBN lock must not already have been downgraded"); in vdo_downgrade_pbn_write_lock()
85 VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK), in vdo_downgrade_pbn_write_lock()
87 VDO_ASSERT_LOG_ONLY(lock->holder_count == 1, in vdo_downgrade_pbn_write_lock()
88 "PBN write lock should have one holder but has %u", in vdo_downgrade_pbn_write_lock()
89 lock->holder_count); in vdo_downgrade_pbn_write_lock()
91 * data_vio write locks are downgraded in place--the writer retains the hold on the lock. in vdo_downgrade_pbn_write_lock()
95 lock->increment_limit = in vdo_downgrade_pbn_write_lock()
96 (compressed_write ? MAXIMUM_REFERENCE_COUNT : MAXIMUM_REFERENCE_COUNT - 1); in vdo_downgrade_pbn_write_lock()
97 set_pbn_lock_type(lock, VIO_READ_LOCK); in vdo_downgrade_pbn_write_lock()
101 * vdo_claim_pbn_lock_increment() - Try to claim one of the available reference count increments on
102 * a read lock.
103 * @lock: The PBN read lock from which to claim an increment.
105 * Claims may be attempted from any thread. A claim is only valid until the PBN lock is released.
110 bool vdo_claim_pbn_lock_increment(struct pbn_lock *lock) in vdo_claim_pbn_lock_increment() argument
114 * threads might be concurrently deduplicating against a single PBN lock on compressed in vdo_claim_pbn_lock_increment()
115 * block. As long as hitting the increment limit will lead to the PBN lock being released in vdo_claim_pbn_lock_increment()
116 * in a sane time-frame, we won't overflow a 32-bit claim counter, allowing a simple add in vdo_claim_pbn_lock_increment()
117 * instead of a compare-and-swap. in vdo_claim_pbn_lock_increment()
119 u32 claim_number = (u32) atomic_add_return(1, &lock->increments_claimed); in vdo_claim_pbn_lock_increment()
121 return (claim_number <= lock->increment_limit); in vdo_claim_pbn_lock_increment()
125 * vdo_assign_pbn_lock_provisional_reference() - Inform a PBN lock that it is responsible for a
127 * @lock: The PBN lock.
129 void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock) in vdo_assign_pbn_lock_provisional_reference() argument
131 VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference, in vdo_assign_pbn_lock_provisional_reference()
132 "lock does not have a provisional reference"); in vdo_assign_pbn_lock_provisional_reference()
133 lock->has_provisional_reference = true; in vdo_assign_pbn_lock_provisional_reference()
137 * vdo_unassign_pbn_lock_provisional_reference() - Inform a PBN lock that it is no longer
139 * @lock: The PBN lock.
141 void vdo_unassign_pbn_lock_provisional_reference(struct pbn_lock *lock) in vdo_unassign_pbn_lock_provisional_reference() argument
143 lock->has_provisional_reference = false; in vdo_unassign_pbn_lock_provisional_reference()
147 * release_pbn_lock_provisional_reference() - If the lock is responsible for a provisional
149 * @lock: The lock.
150 * @locked_pbn: The PBN covered by the lock.
153 * This method is called when the lock is released.
155 static void release_pbn_lock_provisional_reference(struct pbn_lock *lock, in release_pbn_lock_provisional_reference() argument
161 if (!vdo_pbn_lock_has_provisional_reference(lock)) in release_pbn_lock_provisional_reference()
168 lock->implementation->release_reason, in release_pbn_lock_provisional_reference()
172 vdo_unassign_pbn_lock_provisional_reference(lock); in release_pbn_lock_provisional_reference()
176 * union idle_pbn_lock - PBN lock list entries.
178 * Unused (idle) PBN locks are kept in a list. Just like in a malloc implementation, the lock
179 * structure is unused memory, so we can save a bit of space (and not pollute the lock structure
180 * proper) by using a union to overlay the lock structure with the free list.
185 /** @lock: Only used while locks are not in the pool. */
186 struct pbn_lock lock; member
190 * struct pbn_lock_pool - list of PBN locks.
192 * The lock pool is little more than the memory allocated for the locks.
199 /** @idle_list: A list containing all idle PBN lock instances. */
206 * return_pbn_lock_to_pool() - Return a pbn lock to its pool.
207 * @pool: The pool from which the lock was borrowed.
208 * @lock: The last reference to the lock being returned.
210 * It must be the last live reference, as if the memory were being freed (the lock memory will
211 * re-initialized or zeroed).
213 static void return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock *lock) in return_pbn_lock_to_pool() argument
217 /* A bit expensive, but will promptly catch some use-after-free errors. */ in return_pbn_lock_to_pool()
218 memset(lock, 0, sizeof(*lock)); in return_pbn_lock_to_pool()
220 idle = container_of(lock, idle_pbn_lock, lock); in return_pbn_lock_to_pool()
221 INIT_LIST_HEAD(&idle->entry); in return_pbn_lock_to_pool()
222 list_add_tail(&idle->entry, &pool->idle_list); in return_pbn_lock_to_pool()
224 VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed"); in return_pbn_lock_to_pool()
225 pool->borrowed -= 1; in return_pbn_lock_to_pool()
229 * make_pbn_lock_pool() - Create a new PBN lock pool and all the lock instances it can loan out.
247 pool->capacity = capacity; in make_pbn_lock_pool()
248 pool->borrowed = capacity; in make_pbn_lock_pool()
249 INIT_LIST_HEAD(&pool->idle_list); in make_pbn_lock_pool()
252 return_pbn_lock_to_pool(pool, &pool->locks[i].lock); in make_pbn_lock_pool()
259 * free_pbn_lock_pool() - Free a PBN lock pool.
260 * @pool: The lock pool to free.
270 VDO_ASSERT_LOG_ONLY(pool->borrowed == 0, in free_pbn_lock_pool()
272 pool->borrowed); in free_pbn_lock_pool()
277 * borrow_pbn_lock_from_pool() - Borrow a PBN lock from the pool and initialize it with the
280 * @type: The type with which to initialize the lock.
281 * @lock_ptr: A pointer to receive the borrowed lock.
296 if (pool->borrowed >= pool->capacity) in borrow_pbn_lock_from_pool()
299 pool->borrowed += 1; in borrow_pbn_lock_from_pool()
301 result = VDO_ASSERT(!list_empty(&pool->idle_list), in borrow_pbn_lock_from_pool()
306 idle_entry = pool->idle_list.prev; in borrow_pbn_lock_from_pool()
311 idle->lock.holder_count = 0; in borrow_pbn_lock_from_pool()
312 set_pbn_lock_type(&idle->lock, type); in borrow_pbn_lock_from_pool()
314 *lock_ptr = &idle->lock; in borrow_pbn_lock_from_pool()
319 * initialize_zone() - Initialize a physical zone.
328 zone_count_t zone_number = zones->zone_count; in initialize_zone()
329 struct physical_zone *zone = &zones->zones[zone_number]; in initialize_zone()
331 result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations); in initialize_zone()
335 result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool); in initialize_zone()
337 vdo_int_map_free(zone->pbn_operations); in initialize_zone()
341 zone->zone_number = zone_number; in initialize_zone()
342 zone->thread_id = vdo->thread_config.physical_threads[zone_number]; in initialize_zone()
343 zone->allocator = &vdo->depot->allocators[zone_number]; in initialize_zone()
344 zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count]; in initialize_zone()
345 result = vdo_make_default_thread(vdo, zone->thread_id); in initialize_zone()
347 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in initialize_zone()
348 vdo_int_map_free(zone->pbn_operations); in initialize_zone()
355 * vdo_make_physical_zones() - Make the physical zones for a vdo.
365 zone_count_t zone_count = vdo->thread_config.physical_zone_count; in vdo_make_physical_zones()
375 for (zones->zone_count = 0; zones->zone_count < zone_count; zones->zone_count++) { in vdo_make_physical_zones()
388 * vdo_free_physical_zones() - Destroy the physical zones.
398 for (index = 0; index < zones->zone_count; index++) { in vdo_free_physical_zones()
399 struct physical_zone *zone = &zones->zones[index]; in vdo_free_physical_zones()
401 free_pbn_lock_pool(vdo_forget(zone->lock_pool)); in vdo_free_physical_zones()
402 vdo_int_map_free(vdo_forget(zone->pbn_operations)); in vdo_free_physical_zones()
409 * vdo_get_physical_zone_pbn_lock() - Get the lock on a PBN if one exists.
411 * @pbn: The physical block number whose lock is desired.
413 * Return: The lock or NULL if the PBN is not locked.
418 return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn)); in vdo_get_physical_zone_pbn_lock()
422 * vdo_attempt_physical_zone_pbn_lock() - Attempt to lock a physical block in the zone responsible
425 * @pbn: The physical block number to lock.
426 * @type: The type with which to initialize a new lock.
427 * @lock_ptr: A pointer to receive the lock, existing or new.
429 * If the PBN is already locked, the existing lock will be returned. Otherwise, a new lock instance
430 * will be borrowed from the pool, initialized, and returned. The lock owner will be NULL for a new
431 * lock acquired by the caller, who is responsible for setting that field promptly. The lock owner
432 * will be non-NULL when there is already an existing lock on the PBN.
442 * Borrow and prepare a lock from the pool so we don't have to do two int_map accesses in in vdo_attempt_physical_zone_pbn_lock()
443 * the common case of no lock contention. in vdo_attempt_physical_zone_pbn_lock()
445 struct pbn_lock *lock, *new_lock = NULL; in vdo_attempt_physical_zone_pbn_lock() local
448 result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock); in vdo_attempt_physical_zone_pbn_lock()
450 VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock"); in vdo_attempt_physical_zone_pbn_lock()
454 result = vdo_int_map_put(zone->pbn_operations, pbn, new_lock, false, in vdo_attempt_physical_zone_pbn_lock()
455 (void **) &lock); in vdo_attempt_physical_zone_pbn_lock()
457 return_pbn_lock_to_pool(zone->lock_pool, new_lock); in vdo_attempt_physical_zone_pbn_lock()
461 if (lock != NULL) { in vdo_attempt_physical_zone_pbn_lock()
462 /* The lock is already held, so we don't need the borrowed one. */ in vdo_attempt_physical_zone_pbn_lock()
463 return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock)); in vdo_attempt_physical_zone_pbn_lock()
464 result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held", in vdo_attempt_physical_zone_pbn_lock()
468 *lock_ptr = lock; in vdo_attempt_physical_zone_pbn_lock()
476 * allocate_and_lock_block() - Attempt to allocate a block from this zone.
479 * If a block is allocated, the recipient will also hold a lock on it.
486 struct pbn_lock *lock; in allocate_and_lock_block() local
488 VDO_ASSERT_LOG_ONLY(allocation->lock == NULL, in allocate_and_lock_block()
489 "must not allocate a block while already holding a lock on one"); in allocate_and_lock_block()
491 result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn); in allocate_and_lock_block()
495 result = vdo_attempt_physical_zone_pbn_lock(allocation->zone, allocation->pbn, in allocate_and_lock_block()
496 allocation->write_lock_type, &lock); in allocate_and_lock_block()
500 if (lock->holder_count > 0) { in allocate_and_lock_block()
504 (unsigned long long) allocation->pbn, in allocate_and_lock_block()
505 lock->holder_count); in allocate_and_lock_block()
508 /* We've successfully acquired a new lock, so mark it as ours. */ in allocate_and_lock_block()
509 lock->holder_count += 1; in allocate_and_lock_block()
510 allocation->lock = lock; in allocate_and_lock_block()
511 vdo_assign_pbn_lock_provisional_reference(lock); in allocate_and_lock_block()
516 * retry_allocation() - Retry allocating a block now that we're done waiting for scrubbing.
525 data_vio->allocation.wait_for_clean_slab = false; in retry_allocation()
526 data_vio->allocation.first_allocation_zone = data_vio->allocation.zone->zone_number; in retry_allocation()
531 * continue_allocating() - Continue searching for an allocation by enqueuing to wait for scrubbing
541 struct allocation *allocation = &data_vio->allocation; in continue_allocating()
542 struct physical_zone *zone = allocation->zone; in continue_allocating()
543 struct vdo_completion *completion = &data_vio->vio.completion; in continue_allocating()
545 bool was_waiting = allocation->wait_for_clean_slab; in continue_allocating()
546 bool tried_all = (allocation->first_allocation_zone == zone->next->zone_number); in continue_allocating()
555 allocation->wait_for_clean_slab = true; in continue_allocating()
556 allocation->first_allocation_zone = zone->zone_number; in continue_allocating()
559 if (allocation->wait_for_clean_slab) { in continue_allocating()
560 data_vio->waiter.callback = retry_allocation; in continue_allocating()
561 result = vdo_enqueue_clean_slab_waiter(zone->allocator, in continue_allocating()
562 &data_vio->waiter); in continue_allocating()
574 allocation->zone = zone->next; in continue_allocating()
575 completion->callback_thread_id = allocation->zone->thread_id; in continue_allocating()
581 * vdo_allocate_block_in_zone() - Attempt to allocate a block in the current physical zone, and if
590 int result = allocate_and_lock_block(&data_vio->allocation); in vdo_allocate_block_in_zone()
602 * vdo_release_physical_zone_pbn_lock() - Release a physical block lock if it is held and return it
603 * to the lock pool.
604 * @zone: The physical zone in which the lock was obtained.
606 * @lock: The lock being released.
609 * lock memory will re-initialized or zeroed).
613 struct pbn_lock *lock) in vdo_release_physical_zone_pbn_lock() argument
617 if (lock == NULL) in vdo_release_physical_zone_pbn_lock()
620 VDO_ASSERT_LOG_ONLY(lock->holder_count > 0, in vdo_release_physical_zone_pbn_lock()
621 "should not be releasing a lock that is not held"); in vdo_release_physical_zone_pbn_lock()
623 lock->holder_count -= 1; in vdo_release_physical_zone_pbn_lock()
624 if (lock->holder_count > 0) { in vdo_release_physical_zone_pbn_lock()
625 /* The lock was shared and is still referenced, so don't release it yet. */ in vdo_release_physical_zone_pbn_lock()
629 holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn); in vdo_release_physical_zone_pbn_lock()
630 VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu", in vdo_release_physical_zone_pbn_lock()
633 release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator); in vdo_release_physical_zone_pbn_lock()
634 return_pbn_lock_to_pool(zone->lock_pool, lock); in vdo_release_physical_zone_pbn_lock()
638 * vdo_dump_physical_zone() - Dump information about a physical zone to the log for debugging.
643 vdo_dump_block_allocator(zone->allocator); in vdo_dump_physical_zone()