Lines Matching full:slow
92 stats->alloc_stats.slow += pool->alloc_stats.slow; in page_pool_get_stats()
137 *data++ = pool_stats->alloc_stats.slow; in page_pool_ethtool_stats_get()
201 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow)); in page_pool_init()
206 if (pool->slow.flags & ~PP_FLAG_ALL) in page_pool_init()
220 if (pool->slow.flags & PP_FLAG_DMA_MAP) { in page_pool_init()
228 if (pool->slow.flags & PP_FLAG_DMA_SYNC_DEV) { in page_pool_init()
232 if (!(pool->slow.flags & PP_FLAG_DMA_MAP)) in page_pool_init()
245 pool->has_init_callback = !!pool->slow.init_callback; in page_pool_init()
248 if (!(pool->slow.flags & PP_FLAG_SYSTEM_POOL)) { in page_pool_init()
278 if (pool->slow.flags & PP_FLAG_ALLOW_UNREADABLE_NETMEM) { in page_pool_init()
284 rxq = __netif_get_rx_queue(pool->slow.netdev, in page_pool_init()
285 pool->slow.queue_idx); in page_pool_init()
517 /* slow path */
565 alloc_stat_inc(pool, slow); in __page_pool_alloc_pages_slow()
586 /* Slow-path: cache empty, do real allocation */ in page_pool_alloc_netmem()
639 pool->slow.init_callback(netmem, pool->slow.init_arg); in page_pool_set_pp_info()
1073 netdev = READ_ONCE(pool->slow.netdev); in page_pool_release_retry()