Lines Matching refs:pdsc_dev

40 	struct device *pdsc_dev = &pci_physfn(pdev)->dev;  in pds_vfio_print_guest_region_info()  local
53 dma_map_single(pdsc_dev, region_info, len, DMA_FROM_DEVICE); in pds_vfio_print_guest_region_info()
54 if (dma_mapping_error(pdsc_dev, regions_dma)) in pds_vfio_print_guest_region_info()
59 dma_unmap_single(pdsc_dev, regions_dma, len, DMA_FROM_DEVICE); in pds_vfio_print_guest_region_info()
116 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in __pds_vfio_dirty_free_sgl() local
118 dma_unmap_single(pdsc_dev, region->sgl_addr, in __pds_vfio_dirty_free_sgl()
148 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in pds_vfio_dirty_alloc_sgl() local
161 sgl_addr = dma_map_single(pdsc_dev, sgl, sgl_size, DMA_BIDIRECTIONAL); in pds_vfio_dirty_alloc_sgl()
162 if (dma_mapping_error(pdsc_dev, sgl_addr)) { in pds_vfio_dirty_alloc_sgl()
249 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in pds_vfio_dirty_enable() local
317 regions_dma = dma_map_single(pdsc_dev, (void *)region_info, len, in pds_vfio_dirty_enable()
319 if (dma_mapping_error(pdsc_dev, regions_dma)) { in pds_vfio_dirty_enable()
325 dma_unmap_single(pdsc_dev, regions_dma, len, DMA_BIDIRECTIONAL); in pds_vfio_dirty_enable()
376 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in pds_vfio_dirty_seq_ack() local
419 err = dma_map_sgtable(pdsc_dev, &sg_table, dma_dir, 0); in pds_vfio_dirty_seq_ack()
433 dma_sync_single_for_device(pdsc_dev, region->sgl_addr, size, dma_dir); in pds_vfio_dirty_seq_ack()
441 dma_sync_single_for_cpu(pdsc_dev, region->sgl_addr, size, dma_dir); in pds_vfio_dirty_seq_ack()
443 dma_unmap_sgtable(pdsc_dev, &sg_table, dma_dir, 0); in pds_vfio_dirty_seq_ack()