Lines Matching +full:hot +full:- +full:swap
1 // SPDX-License-Identifier: GPL-2.0
3 * Manage cache of swap slots to be used for and returned from
4 * swap.
10 * We allocate the swap slots from the global pool and put
21 * The swap entry allocated is marked with SWAP_HAS_CACHE
25 * The swap slots cache is protected by a mutex instead of
43 /* Serialize swap slots cache enable/disable operations */
67 /* Must not be called with cpu hot plug lock */
126 return -ENOMEM; in alloc_swap_slot_cache()
132 return -ENOMEM; in alloc_swap_slot_cache()
137 if (cache->slots || cache->slots_ret) { in alloc_swap_slot_cache()
147 if (!cache->lock_initialized) { in alloc_swap_slot_cache()
148 mutex_init(&cache->alloc_lock); in alloc_swap_slot_cache()
149 spin_lock_init(&cache->free_lock); in alloc_swap_slot_cache()
150 cache->lock_initialized = true; in alloc_swap_slot_cache()
152 cache->nr = 0; in alloc_swap_slot_cache()
153 cache->cur = 0; in alloc_swap_slot_cache()
154 cache->n_ret = 0; in alloc_swap_slot_cache()
157 * !cache->slots or !cache->slots_ret to know if it is safe to acquire in alloc_swap_slot_cache()
162 cache->slots = slots; in alloc_swap_slot_cache()
163 cache->slots_ret = slots_ret; in alloc_swap_slot_cache()
175 if ((type & SLOTS_CACHE) && cache->slots) { in drain_slots_cache_cpu()
176 mutex_lock(&cache->alloc_lock); in drain_slots_cache_cpu()
177 swapcache_free_entries(cache->slots + cache->cur, cache->nr); in drain_slots_cache_cpu()
178 cache->cur = 0; in drain_slots_cache_cpu()
179 cache->nr = 0; in drain_slots_cache_cpu()
180 if (free_slots && cache->slots) { in drain_slots_cache_cpu()
181 kvfree(cache->slots); in drain_slots_cache_cpu()
182 cache->slots = NULL; in drain_slots_cache_cpu()
184 mutex_unlock(&cache->alloc_lock); in drain_slots_cache_cpu()
186 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { in drain_slots_cache_cpu()
187 spin_lock_irq(&cache->free_lock); in drain_slots_cache_cpu()
188 swapcache_free_entries(cache->slots_ret, cache->n_ret); in drain_slots_cache_cpu()
189 cache->n_ret = 0; in drain_slots_cache_cpu()
190 if (free_slots && cache->slots_ret) { in drain_slots_cache_cpu()
191 slots = cache->slots_ret; in drain_slots_cache_cpu()
192 cache->slots_ret = NULL; in drain_slots_cache_cpu()
194 spin_unlock_irq(&cache->free_lock); in drain_slots_cache_cpu()
207 * a swap device; in __drain_swap_slots_cache()
208 * 2) disabling of swap slot cache, when we run low in __drain_swap_slots_cache()
209 * on swap slots when allocating memory and need in __drain_swap_slots_cache()
210 * to return swap slots to global pool. in __drain_swap_slots_cache()
212 * We cannot acquire cpu hot plug lock here as in __drain_swap_slots_cache()
214 * hot plug path: in __drain_swap_slots_cache()
215 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback in __drain_swap_slots_cache()
216 * -> memory allocation -> direct reclaim -> folio_alloc_swap in __drain_swap_slots_cache()
217 * -> drain_swap_slots_cache in __drain_swap_slots_cache()
223 * fill any swap slots in slots cache of such cpu. in __drain_swap_slots_cache()
247 "without swap slots cache.\n", __func__)) in enable_swap_slots_cache()
258 /* called with swap slot cache's alloc lock held */
264 cache->cur = 0; in refill_swap_slots_cache()
266 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, in refill_swap_slots_cache()
267 cache->slots, 0); in refill_swap_slots_cache()
269 return cache->nr; in refill_swap_slots_cache()
276 /* Large folio swap slot is not covered. */ in free_swap_slot()
280 if (likely(use_swap_slot_cache && cache->slots_ret)) { in free_swap_slot()
281 spin_lock_irq(&cache->free_lock); in free_swap_slot()
282 /* Swap slots cache may be deactivated before acquiring lock */ in free_swap_slot()
283 if (!use_swap_slot_cache || !cache->slots_ret) { in free_swap_slot()
284 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
287 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { in free_swap_slot()
294 swapcache_free_entries(cache->slots_ret, cache->n_ret); in free_swap_slot()
295 cache->n_ret = 0; in free_swap_slot()
297 cache->slots_ret[cache->n_ret++] = entry; in free_swap_slot()
298 spin_unlock_irq(&cache->free_lock); in free_swap_slot()
321 * accesses to the per-CPU data structure are protected by the in folio_alloc_swap()
322 * mutex cache->alloc_lock. in folio_alloc_swap()
324 * The alloc path here does not touch cache->slots_ret in folio_alloc_swap()
325 * so cache->free_lock is not taken. in folio_alloc_swap()
329 if (likely(check_cache_active() && cache->slots)) { in folio_alloc_swap()
330 mutex_lock(&cache->alloc_lock); in folio_alloc_swap()
331 if (cache->slots) { in folio_alloc_swap()
333 if (cache->nr) { in folio_alloc_swap()
334 entry = cache->slots[cache->cur]; in folio_alloc_swap()
335 cache->slots[cache->cur++].val = 0; in folio_alloc_swap()
336 cache->nr--; in folio_alloc_swap()
341 mutex_unlock(&cache->alloc_lock); in folio_alloc_swap()