Searched refs:sgt_append (Results 1 – 14 of 14) sorted by relevance
/linux-6.12.1/drivers/infiniband/core/ |
D | umem.c | 55 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, in __ib_umem_release() 58 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) in __ib_umem_release() 62 sg_free_append_table(&umem->sgt_append); in __ib_umem_release() 109 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in ib_umem_find_best_pgsz() 119 if (i != (umem->sgt_append.sgt.nents - 1)) in ib_umem_find_best_pgsz() 226 &umem->sgt_append, page_list, pinned, 0, in ib_umem_get() 238 ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt, in ib_umem_get() 301 ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl, in ib_umem_copy_from() 302 umem->sgt_append.sgt.orig_nents, dst, length, in ib_umem_copy_from()
|
D | umem_dmabuf.c | 65 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; in ib_umem_dmabuf_map_pages() 66 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
|
/linux-6.12.1/include/rdma/ |
D | ib_umem.h | 28 struct sg_append_table sgt_append; member 58 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & in ib_umem_dma_offset() 79 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, in __rdma_umem_block_iter_start() 80 umem->sgt_append.sgt.nents, pgsz); in __rdma_umem_block_iter_start() 138 struct scatterlist *sg = umem->sgt_append.sgt.sgl; in ib_umem_find_best_pgoff()
|
/linux-6.12.1/lib/ |
D | scatterlist.c | 455 int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append, in sg_alloc_append_table_from_pages() argument 462 struct scatterlist *s = sgt_append->prv; in sg_alloc_append_table_from_pages() 473 if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv) in sg_alloc_append_table_from_pages() 476 if (sgt_append->prv) { in sg_alloc_append_table_from_pages() 477 unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) + in sg_alloc_append_table_from_pages() 478 sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE; in sg_alloc_append_table_from_pages() 484 prv_len = sgt_append->prv->length; in sg_alloc_append_table_from_pages() 488 if (sgt_append->prv->length + PAGE_SIZE > max_segment) in sg_alloc_append_table_from_pages() 490 sgt_append->prv->length += PAGE_SIZE; in sg_alloc_append_table_from_pages() 527 s = get_next_sg(sgt_append, s, chunks - i + left_pages, in sg_alloc_append_table_from_pages() [all …]
|
/linux-6.12.1/drivers/infiniband/hw/hns/ |
D | hns_roce_db.c | 44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user() 45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
|
/linux-6.12.1/drivers/infiniband/hw/mlx4/ |
D | doorbell.c | 78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
|
D | mr.c | 203 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_write_mtt() 276 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { in mlx4_ib_umem_calc_optimal_mtt_size()
|
/linux-6.12.1/drivers/infiniband/hw/mlx5/ |
D | doorbell.c | 82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
|
/linux-6.12.1/drivers/infiniband/sw/siw/ |
D | siw_mem.c | 393 sgt = &base_mem->sgt_append.sgt; in siw_umem_get()
|
/linux-6.12.1/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 146 err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt); in rxe_mr_init_user()
|
/linux-6.12.1/drivers/infiniband/sw/rdmavt/ |
D | mr.c | 371 for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { in rvt_reg_user_mr()
|
/linux-6.12.1/drivers/infiniband/hw/erdma/ |
D | erdma_verbs.c | 850 *dma_addr = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in erdma_map_user_dbrecords()
|
/linux-6.12.1/drivers/infiniband/hw/qedr/ |
D | verbs.c | 1491 sg = srq->prod_umem->sgt_append.sgt.sgl; in qedr_init_srq_user_params()
|
/linux-6.12.1/drivers/infiniband/hw/irdma/ |
D | verbs.c | 2347 iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl); in irdma_copy_user_pgaddrs()
|