Lines Matching full:pbl
1285 static int pbl_chunk_list_create(struct efa_dev *dev, struct pbl_context *pbl) in pbl_chunk_list_create() argument
1287 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; in pbl_chunk_list_create()
1288 int page_cnt = pbl->phys.indirect.pbl_buf_size_in_pages; in pbl_chunk_list_create()
1289 struct scatterlist *pages_sgl = pbl->phys.indirect.sgl; in pbl_chunk_list_create()
1291 int sg_dma_cnt = pbl->phys.indirect.sg_dma_cnt; in pbl_chunk_list_create()
1385 static void pbl_chunk_list_destroy(struct efa_dev *dev, struct pbl_context *pbl) in pbl_chunk_list_destroy() argument
1387 struct pbl_chunk_list *chunk_list = &pbl->phys.indirect.chunk_list; in pbl_chunk_list_destroy()
1399 /* initialize pbl continuous mode: map pbl buffer to a dma address. */
1401 struct pbl_context *pbl) in pbl_continuous_initialize() argument
1405 dma_addr = dma_map_single(&dev->pdev->dev, pbl->pbl_buf, in pbl_continuous_initialize()
1406 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE); in pbl_continuous_initialize()
1408 ibdev_err(&dev->ibdev, "Unable to map pbl to DMA address\n"); in pbl_continuous_initialize()
1412 pbl->phys.continuous.dma_addr = dma_addr; in pbl_continuous_initialize()
1414 "pbl continuous - dma_addr = %pad, size[%u]\n", in pbl_continuous_initialize()
1415 &dma_addr, pbl->pbl_buf_size_in_bytes); in pbl_continuous_initialize()
1421 * initialize pbl indirect mode:
1423 * pbl buffer.
1425 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl) in pbl_indirect_initialize() argument
1427 u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE); in pbl_indirect_initialize()
1432 sgl = efa_vmalloc_buf_to_sg(pbl->pbl_buf, size_in_pages); in pbl_indirect_initialize()
1442 pbl->phys.indirect.pbl_buf_size_in_pages = size_in_pages; in pbl_indirect_initialize()
1443 pbl->phys.indirect.sgl = sgl; in pbl_indirect_initialize()
1444 pbl->phys.indirect.sg_dma_cnt = sg_dma_cnt; in pbl_indirect_initialize()
1445 err = pbl_chunk_list_create(dev, pbl); in pbl_indirect_initialize()
1453 "pbl indirect - size[%u], chunks[%u]\n", in pbl_indirect_initialize()
1454 pbl->pbl_buf_size_in_bytes, in pbl_indirect_initialize()
1455 pbl->phys.indirect.chunk_list.size); in pbl_indirect_initialize()
1466 static void pbl_indirect_terminate(struct efa_dev *dev, struct pbl_context *pbl) in pbl_indirect_terminate() argument
1468 pbl_chunk_list_destroy(dev, pbl); in pbl_indirect_terminate()
1469 dma_unmap_sg(&dev->pdev->dev, pbl->phys.indirect.sgl, in pbl_indirect_terminate()
1470 pbl->phys.indirect.pbl_buf_size_in_pages, DMA_TO_DEVICE); in pbl_indirect_terminate()
1471 kfree(pbl->phys.indirect.sgl); in pbl_indirect_terminate()
1476 struct pbl_context *pbl, in pbl_create() argument
1483 pbl->pbl_buf_size_in_bytes = hp_cnt * EFA_CHUNK_PAYLOAD_PTR_SIZE; in pbl_create()
1484 pbl->pbl_buf = kvzalloc(pbl->pbl_buf_size_in_bytes, GFP_KERNEL); in pbl_create()
1485 if (!pbl->pbl_buf) in pbl_create()
1488 if (is_vmalloc_addr(pbl->pbl_buf)) { in pbl_create()
1489 pbl->physically_continuous = 0; in pbl_create()
1490 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt, in pbl_create()
1495 err = pbl_indirect_initialize(dev, pbl); in pbl_create()
1499 pbl->physically_continuous = 1; in pbl_create()
1500 err = umem_to_page_list(dev, umem, pbl->pbl_buf, hp_cnt, in pbl_create()
1505 err = pbl_continuous_initialize(dev, pbl); in pbl_create()
1512 hp_cnt, pbl->physically_continuous); in pbl_create()
1517 kvfree(pbl->pbl_buf); in pbl_create()
1521 static void pbl_destroy(struct efa_dev *dev, struct pbl_context *pbl) in pbl_destroy() argument
1523 if (pbl->physically_continuous) in pbl_destroy()
1524 dma_unmap_single(&dev->pdev->dev, pbl->phys.continuous.dma_addr, in pbl_destroy()
1525 pbl->pbl_buf_size_in_bytes, DMA_TO_DEVICE); in pbl_destroy()
1527 pbl_indirect_terminate(dev, pbl); in pbl_destroy()
1529 kvfree(pbl->pbl_buf); in pbl_destroy()
1538 err = umem_to_page_list(dev, mr->umem, params->pbl.inline_pbl_array, in efa_create_inline_pbl()
1550 struct pbl_context *pbl, in efa_create_pbl() argument
1556 err = pbl_create(dev, pbl, mr->umem, params->page_num, in efa_create_pbl()
1559 ibdev_dbg(&dev->ibdev, "Failed to create pbl[%d]\n", err); in efa_create_pbl()
1564 params->indirect = !pbl->physically_continuous; in efa_create_pbl()
1565 if (pbl->physically_continuous) { in efa_create_pbl()
1566 params->pbl.pbl.length = pbl->pbl_buf_size_in_bytes; in efa_create_pbl()
1568 efa_com_set_dma_addr(pbl->phys.continuous.dma_addr, in efa_create_pbl()
1569 ¶ms->pbl.pbl.address.mem_addr_high, in efa_create_pbl()
1570 ¶ms->pbl.pbl.address.mem_addr_low); in efa_create_pbl()
1572 params->pbl.pbl.length = in efa_create_pbl()
1573 pbl->phys.indirect.chunk_list.chunks[0].length; in efa_create_pbl()
1575 efa_com_set_dma_addr(pbl->phys.indirect.chunk_list.chunks[0].dma_addr, in efa_create_pbl()
1576 ¶ms->pbl.pbl.address.mem_addr_high, in efa_create_pbl()
1577 ¶ms->pbl.pbl.address.mem_addr_low); in efa_create_pbl()
1623 struct pbl_context pbl; in efa_register_mr() local
1649 inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array); in efa_register_mr()
1659 err = efa_create_pbl(dev, &pbl, mr, ¶ms); in efa_register_mr()
1664 pbl_destroy(dev, &pbl); in efa_register_mr()