Lines Matching refs:cpu_fbatches

50 struct cpu_fbatches {  struct
68 static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { argument
221 local_lock_irqsave(&cpu_fbatches.lock_irq, flags); in __folio_batch_add_and_move()
223 local_lock(&cpu_fbatches.lock); in __folio_batch_add_and_move()
230 local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags); in __folio_batch_add_and_move()
232 local_unlock(&cpu_fbatches.lock); in __folio_batch_add_and_move()
237 &cpu_fbatches.op, \
241 offsetof(struct cpu_fbatches, op) >= offsetof(struct cpu_fbatches, lock_irq) \
349 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.lru_activate, cpu); in folio_activate_drain()
387 local_lock(&cpu_fbatches.lock); in __lru_cache_activate_folio()
388 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); in __lru_cache_activate_folio()
409 local_unlock(&cpu_fbatches.lock); in __lru_cache_activate_folio()
643 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); in lru_add_drain_cpu()
655 local_lock_irqsave(&cpu_fbatches.lock_irq, flags); in lru_add_drain_cpu()
657 local_unlock_irqrestore(&cpu_fbatches.lock_irq, flags); in lru_add_drain_cpu()
728 local_lock(&cpu_fbatches.lock); in lru_add_drain()
730 local_unlock(&cpu_fbatches.lock); in lru_add_drain()
742 local_lock(&cpu_fbatches.lock); in lru_add_and_bh_lrus_drain()
744 local_unlock(&cpu_fbatches.lock); in lru_add_and_bh_lrus_drain()
751 local_lock(&cpu_fbatches.lock); in lru_add_drain_cpu_zone()
754 local_unlock(&cpu_fbatches.lock); in lru_add_drain_cpu_zone()
769 struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); in cpu_needs_drain()