Lines Matching refs:dma_map
319 struct xsk_dma_map *dma_map; in xp_find_dma_map() local
321 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { in xp_find_dma_map()
322 if (dma_map->netdev == pool->netdev) in xp_find_dma_map()
323 return dma_map; in xp_find_dma_map()
332 struct xsk_dma_map *dma_map; in xp_create_dma_map() local
334 dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL); in xp_create_dma_map()
335 if (!dma_map) in xp_create_dma_map()
338 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); in xp_create_dma_map()
339 if (!dma_map->dma_pages) { in xp_create_dma_map()
340 kfree(dma_map); in xp_create_dma_map()
344 dma_map->netdev = netdev; in xp_create_dma_map()
345 dma_map->dev = dev; in xp_create_dma_map()
346 dma_map->dma_pages_cnt = nr_pages; in xp_create_dma_map()
347 refcount_set(&dma_map->users, 1); in xp_create_dma_map()
348 list_add(&dma_map->list, &umem->xsk_dma_list); in xp_create_dma_map()
349 return dma_map; in xp_create_dma_map()
352 static void xp_destroy_dma_map(struct xsk_dma_map *dma_map) in xp_destroy_dma_map() argument
354 list_del(&dma_map->list); in xp_destroy_dma_map()
355 kvfree(dma_map->dma_pages); in xp_destroy_dma_map()
356 kfree(dma_map); in xp_destroy_dma_map()
359 static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs) in __xp_dma_unmap() argument
364 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in __xp_dma_unmap()
365 dma = &dma_map->dma_pages[i]; in __xp_dma_unmap()
368 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, in __xp_dma_unmap()
374 xp_destroy_dma_map(dma_map); in __xp_dma_unmap()
379 struct xsk_dma_map *dma_map; in xp_dma_unmap() local
384 dma_map = xp_find_dma_map(pool); in xp_dma_unmap()
385 if (!dma_map) { in xp_dma_unmap()
390 if (!refcount_dec_and_test(&dma_map->users)) in xp_dma_unmap()
393 __xp_dma_unmap(dma_map, attrs); in xp_dma_unmap()
401 static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map) in xp_check_dma_contiguity() argument
405 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { in xp_check_dma_contiguity()
406 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) in xp_check_dma_contiguity()
407 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
409 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
413 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) in xp_init_dma_info() argument
421 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); in xp_init_dma_info()
425 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); in xp_init_dma_info()
429 pool->dev = dma_map->dev; in xp_init_dma_info()
430 pool->dma_pages_cnt = dma_map->dma_pages_cnt; in xp_init_dma_info()
431 memcpy(pool->dma_pages, dma_map->dma_pages, in xp_init_dma_info()
440 struct xsk_dma_map *dma_map; in xp_dma_map() local
445 dma_map = xp_find_dma_map(pool); in xp_dma_map()
446 if (dma_map) { in xp_dma_map()
447 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
451 refcount_inc(&dma_map->users); in xp_dma_map()
455 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
456 if (!dma_map) in xp_dma_map()
459 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in xp_dma_map()
463 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()
466 dma_map->dma_pages[i] = dma; in xp_dma_map()
470 xp_check_dma_contiguity(dma_map); in xp_dma_map()
472 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
474 __xp_dma_unmap(dma_map, attrs); in xp_dma_map()