Lines Matching +full:0 +full:xa
25 #define REG_STATE_NEW 0x0
26 #define REG_STATE_REGISTERED 0x1
27 #define REG_STATE_UNREGISTERED 0x2
28 #define REG_STATE_UNUSED 0x3
32 #define MEM_ID_MAX 0xFFFE
54 const struct xdp_mem_allocator *xa = ptr; in xdp_mem_id_cmp() local
57 return xa->mem.id != mem_id; in xdp_mem_id_cmp()
74 struct xdp_mem_allocator *xa; in __xdp_mem_allocator_rcu_free() local
76 xa = container_of(rcu, struct xdp_mem_allocator, rcu); in __xdp_mem_allocator_rcu_free()
79 ida_free(&mem_id_pool, xa->mem.id); in __xdp_mem_allocator_rcu_free()
81 kfree(xa); in __xdp_mem_allocator_rcu_free()
84 static void mem_xa_remove(struct xdp_mem_allocator *xa) in mem_xa_remove() argument
86 trace_mem_disconnect(xa); in mem_xa_remove()
88 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) in mem_xa_remove()
89 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); in mem_xa_remove()
94 struct xdp_mem_allocator *xa; in mem_allocator_disconnect() local
103 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { in mem_allocator_disconnect()
104 if (xa->allocator == allocator) in mem_allocator_disconnect()
105 mem_xa_remove(xa); in mem_allocator_disconnect()
110 } while (xa == ERR_PTR(-EAGAIN)); in mem_allocator_disconnect()
118 struct xdp_mem_allocator *xa; in xdp_unreg_mem_model() local
123 mem->id = 0; in xdp_unreg_mem_model()
124 mem->type = 0; in xdp_unreg_mem_model()
126 if (id == 0) in xdp_unreg_mem_model()
130 xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); in xdp_unreg_mem_model()
131 page_pool_destroy(xa->page_pool); in xdp_unreg_mem_model()
162 memset(xdp_rxq, 0, sizeof(*xdp_rxq)); in xdp_rxq_info_init()
165 /* Returns 0 on success, negative on failure */
193 return 0; in __xdp_rxq_info_reg()
215 return 0; in __mem_id_init_hash_table()
222 if (ret < 0) { in __mem_id_init_hash_table()
230 return 0; in __mem_id_init_hash_table()
245 if (id < 0) { in __mem_id_cyclic_get()
296 if (ret < 0) in __xdp_reg_mem_model()
306 if (id < 0) { in __xdp_reg_mem_model()
318 mem->id = 0; in __xdp_reg_mem_model()
343 return 0; in xdp_reg_mem_model()
363 return 0; in xdp_rxq_info_reg_mem_model()
384 /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) in __xdp_return()
416 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_frame()
435 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_frame_rx_napi()
457 struct xdp_mem_allocator *xa = bq->xa; in xdp_flush_frame_bulk() local
459 if (unlikely(!xa || !bq->count)) in xdp_flush_frame_bulk()
462 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); in xdp_flush_frame_bulk()
463 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ in xdp_flush_frame_bulk()
464 bq->count = 0; in xdp_flush_frame_bulk()
473 struct xdp_mem_allocator *xa; in xdp_return_frame_bulk() local
480 xa = bq->xa; in xdp_return_frame_bulk()
481 if (unlikely(!xa)) { in xdp_return_frame_bulk()
482 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
483 bq->count = 0; in xdp_return_frame_bulk()
484 bq->xa = xa; in xdp_return_frame_bulk()
490 if (unlikely(mem->id != xa->mem.id)) { in xdp_return_frame_bulk()
492 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
500 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_frame_bulk()
521 for (i = 0; i < sinfo->nr_frags; i++) { in xdp_return_buff()
549 metasize = xdp_data_meta_unsupported(xdp) ? 0 : in xdp_convert_zc_to_xdp_frame()
562 memset(xdpf, 0, sizeof(*xdpf)); in xdp_convert_zc_to_xdp_frame()
570 xdpf->headroom = 0; in xdp_convert_zc_to_xdp_frame()
593 return 0; in xdp_alloc_skb_bulk()
662 memset(skb, 0, offsetof(struct sk_buff, tail)); in xdp_build_skb_from_frame()
691 nxdpf->mem.id = 0; in xdpf_clone()
704 * * Returns 0 on success or ``-errno`` on error.
726 * * Returns 0 on success or ``-errno`` on error.
753 * *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``,
759 * * Returns 0 on success or ``-errno`` on error.