Lines Matching +full:sg +full:- +full:micro

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc. */
21 return pds_vfio->dirty.is_enabled; in pds_vfio_dirty_is_enabled()
26 pds_vfio->dirty.is_enabled = true; in pds_vfio_dirty_set_enabled()
31 pds_vfio->dirty.is_enabled = false; in pds_vfio_dirty_set_disabled()
39 struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev; in pds_vfio_print_guest_region_info()
40 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in pds_vfio_print_guest_region_info()
64 dev_dbg(&pdev->dev, in pds_vfio_print_guest_region_info()
81 return -ENOMEM; in pds_vfio_dirty_alloc_bitmaps()
86 return -ENOMEM; in pds_vfio_dirty_alloc_bitmaps()
89 region->host_seq = host_seq_bmp; in pds_vfio_dirty_alloc_bitmaps()
90 region->host_ack = host_ack_bmp; in pds_vfio_dirty_alloc_bitmaps()
91 region->bmp_bytes = bytes; in pds_vfio_dirty_alloc_bitmaps()
98 if (!dirty->regions) in pds_vfio_dirty_free_bitmaps()
101 for (int i = 0; i < dirty->num_regions; i++) { in pds_vfio_dirty_free_bitmaps()
102 struct pds_vfio_region *region = &dirty->regions[i]; in pds_vfio_dirty_free_bitmaps()
104 vfree(region->host_seq); in pds_vfio_dirty_free_bitmaps()
105 vfree(region->host_ack); in pds_vfio_dirty_free_bitmaps()
106 region->host_seq = NULL; in pds_vfio_dirty_free_bitmaps()
107 region->host_ack = NULL; in pds_vfio_dirty_free_bitmaps()
108 region->bmp_bytes = 0; in pds_vfio_dirty_free_bitmaps()
115 struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev; in __pds_vfio_dirty_free_sgl()
116 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in __pds_vfio_dirty_free_sgl()
118 dma_unmap_single(pdsc_dev, region->sgl_addr, in __pds_vfio_dirty_free_sgl()
119 region->num_sge * sizeof(struct pds_lm_sg_elem), in __pds_vfio_dirty_free_sgl()
121 kfree(region->sgl); in __pds_vfio_dirty_free_sgl()
123 region->num_sge = 0; in __pds_vfio_dirty_free_sgl()
124 region->sgl = NULL; in __pds_vfio_dirty_free_sgl()
125 region->sgl_addr = 0; in __pds_vfio_dirty_free_sgl()
130 struct pds_vfio_dirty *dirty = &pds_vfio->dirty; in pds_vfio_dirty_free_sgl()
132 if (!dirty->regions) in pds_vfio_dirty_free_sgl()
135 for (int i = 0; i < dirty->num_regions; i++) { in pds_vfio_dirty_free_sgl()
136 struct pds_vfio_region *region = &dirty->regions[i]; in pds_vfio_dirty_free_sgl()
138 if (region->sgl) in pds_vfio_dirty_free_sgl()
147 struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev; in pds_vfio_dirty_alloc_sgl()
148 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in pds_vfio_dirty_alloc_sgl()
159 return -ENOMEM; in pds_vfio_dirty_alloc_sgl()
164 return -EIO; in pds_vfio_dirty_alloc_sgl()
167 region->sgl = sgl; in pds_vfio_dirty_alloc_sgl()
168 region->num_sge = max_sge; in pds_vfio_dirty_alloc_sgl()
169 region->sgl_addr = sgl_addr; in pds_vfio_dirty_alloc_sgl()
176 vfree(dirty->regions); in pds_vfio_dirty_free_regions()
177 dirty->regions = NULL; in pds_vfio_dirty_free_regions()
178 dirty->num_regions = 0; in pds_vfio_dirty_free_regions()
185 struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev; in pds_vfio_dirty_alloc_regions()
186 struct pds_vfio_dirty *dirty = &pds_vfio->dirty; in pds_vfio_dirty_alloc_regions()
190 dirty->regions = vcalloc(num_regions, sizeof(struct pds_vfio_region)); in pds_vfio_dirty_alloc_regions()
191 if (!dirty->regions) in pds_vfio_dirty_alloc_regions()
192 return -ENOMEM; in pds_vfio_dirty_alloc_regions()
193 dirty->num_regions = num_regions; in pds_vfio_dirty_alloc_regions()
197 struct pds_vfio_region *region = &dirty->regions[i]; in pds_vfio_dirty_alloc_regions()
202 page_count = le32_to_cpu(ri->page_count); in pds_vfio_dirty_alloc_regions()
203 region_start = le64_to_cpu(ri->dma_base); in pds_vfio_dirty_alloc_regions()
209 dev_err(&pdev->dev, "Failed to alloc dirty bitmaps: %pe\n", in pds_vfio_dirty_alloc_regions()
216 dev_err(&pdev->dev, "Failed to alloc dirty sg lists: %pe\n", in pds_vfio_dirty_alloc_regions()
221 region->size = region_size; in pds_vfio_dirty_alloc_regions()
222 region->start = region_start; in pds_vfio_dirty_alloc_regions()
223 region->page_size = region_page_size; in pds_vfio_dirty_alloc_regions()
224 region->dev_bmp_offset_start_byte = dev_bmp_offset_byte; in pds_vfio_dirty_alloc_regions()
228 dev_err(&pdev->dev, "Device bitmap offset is mis-aligned\n"); in pds_vfio_dirty_alloc_regions()
229 err = -EINVAL; in pds_vfio_dirty_alloc_regions()
248 struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev; in pds_vfio_dirty_enable()
249 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in pds_vfio_dirty_enable()
259 dev_dbg(&pdev->dev, "vf%u: Start dirty page tracking\n", in pds_vfio_dirty_enable()
260 pds_vfio->vf_id); in pds_vfio_dirty_enable()
263 return -EINVAL; in pds_vfio_dirty_enable()
269 dev_err(&pdev->dev, "Failed to get dirty status, err %pe\n", in pds_vfio_dirty_enable()
273 dev_err(&pdev->dev, in pds_vfio_dirty_enable()
276 return -EEXIST; in pds_vfio_dirty_enable()
278 dev_err(&pdev->dev, in pds_vfio_dirty_enable()
281 return -EOPNOTSUPP; in pds_vfio_dirty_enable()
291 return -ENOMEM; in pds_vfio_dirty_enable()
296 return -EINVAL; in pds_vfio_dirty_enable()
299 u64 region_size = node->last - node->start + 1; in pds_vfio_dirty_enable()
300 u64 region_start = node->start; in pds_vfio_dirty_enable()
305 ri->dma_base = cpu_to_le64(region_start); in pds_vfio_dirty_enable()
306 ri->page_count = cpu_to_le32(page_count); in pds_vfio_dirty_enable()
307 ri->page_size_log2 = ilog2(region_page_size); in pds_vfio_dirty_enable()
309 dev_dbg(&pdev->dev, in pds_vfio_dirty_enable()
311 i, region_start, node->last, region_size, page_count, in pds_vfio_dirty_enable()
320 err = -ENOMEM; in pds_vfio_dirty_enable()
332 dev_err(&pdev->dev, in pds_vfio_dirty_enable()
360 pds_vfio_dirty_free_bitmaps(&pds_vfio->dirty); in pds_vfio_dirty_disable()
361 pds_vfio_dirty_free_regions(&pds_vfio->dirty); in pds_vfio_dirty_disable()
375 struct pci_dev *pdev = pds_vfio->vfio_coredev.pdev; in pds_vfio_dirty_seq_ack()
376 struct device *pdsc_dev = &pci_physfn(pdev)->dev; in pds_vfio_dirty_seq_ack()
379 struct scatterlist *sg; in pds_vfio_dirty_seq_ack() local
390 bmp -= page_offset; in pds_vfio_dirty_seq_ack()
400 return -ENOMEM; in pds_vfio_dirty_seq_ack()
406 err = -EFAULT; in pds_vfio_dirty_seq_ack()
423 for_each_sgtable_dma_sg(&sg_table, sg, i) { in pds_vfio_dirty_seq_ack()
424 struct pds_lm_sg_elem *sg_elem = &region->sgl[i]; in pds_vfio_dirty_seq_ack()
426 sg_elem->addr = cpu_to_le64(sg_dma_address(sg)); in pds_vfio_dirty_seq_ack()
427 sg_elem->len = cpu_to_le32(sg_dma_len(sg)); in pds_vfio_dirty_seq_ack()
432 offset += region->dev_bmp_offset_start_byte; in pds_vfio_dirty_seq_ack()
433 dma_sync_single_for_device(pdsc_dev, region->sgl_addr, size, dma_dir); in pds_vfio_dirty_seq_ack()
434 err = pds_vfio_dirty_seq_ack_cmd(pds_vfio, region->sgl_addr, num_sge, in pds_vfio_dirty_seq_ack()
437 dev_err(&pdev->dev, in pds_vfio_dirty_seq_ack()
440 num_sge, region->sgl_addr, ERR_PTR(err)); in pds_vfio_dirty_seq_ack()
441 dma_sync_single_for_cpu(pdsc_dev, region->sgl_addr, size, dma_dir); in pds_vfio_dirty_seq_ack()
457 return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_ack, in pds_vfio_dirty_write_ack()
465 return pds_vfio_dirty_seq_ack(pds_vfio, region, region->host_seq, in pds_vfio_dirty_read_seq()
474 u64 page_size = region->page_size; in pds_vfio_dirty_process_bitmaps()
475 u64 region_start = region->start; in pds_vfio_dirty_process_bitmaps()
481 seq = (__le64 *)((u64)region->host_seq + bmp_offset); in pds_vfio_dirty_process_bitmaps()
482 ack = (__le64 *)((u64)region->host_ack + bmp_offset); in pds_vfio_dirty_process_bitmaps()
508 struct pds_vfio_dirty *dirty = &pds_vfio->dirty; in pds_vfio_get_region()
510 for (int i = 0; i < dirty->num_regions; i++) { in pds_vfio_get_region()
511 struct pds_vfio_region *region = &dirty->regions[i]; in pds_vfio_get_region()
513 if (iova >= region->start && in pds_vfio_get_region()
514 iova < (region->start + region->size)) in pds_vfio_get_region()
525 struct device *dev = &pds_vfio->vfio_coredev.pdev->dev; in pds_vfio_dirty_sync()
531 dev_dbg(dev, "vf%u: Get dirty page bitmap\n", pds_vfio->vf_id); in pds_vfio_dirty_sync()
535 pds_vfio->vf_id); in pds_vfio_dirty_sync()
536 return -EINVAL; in pds_vfio_dirty_sync()
542 pds_vfio->vf_id, iova, length); in pds_vfio_dirty_sync()
543 return -EINVAL; in pds_vfio_dirty_sync()
546 pages = DIV_ROUND_UP(length, region->page_size); in pds_vfio_dirty_sync()
552 pds_vfio->vf_id, iova, length, region->page_size, in pds_vfio_dirty_sync()
555 if (!length || ((iova - region->start + length) > region->size)) { in pds_vfio_dirty_sync()
558 return -EINVAL; in pds_vfio_dirty_sync()
562 bmp_bytes = ALIGN(DIV_ROUND_UP(length / region->page_size, in pds_vfio_dirty_sync()
568 return -EINVAL; in pds_vfio_dirty_sync()
571 if (bmp_bytes > region->bmp_bytes) { in pds_vfio_dirty_sync()
574 bmp_bytes, region->bmp_bytes); in pds_vfio_dirty_sync()
575 return -EINVAL; in pds_vfio_dirty_sync()
578 bmp_offset = DIV_ROUND_UP((iova - region->start) / in pds_vfio_dirty_sync()
579 region->page_size, sizeof(u64)); in pds_vfio_dirty_sync()
609 mutex_lock(&pds_vfio->state_mutex); in pds_vfio_dma_logging_report()
611 mutex_unlock(&pds_vfio->state_mutex); in pds_vfio_dma_logging_report()
625 mutex_lock(&pds_vfio->state_mutex); in pds_vfio_dma_logging_start()
628 mutex_unlock(&pds_vfio->state_mutex); in pds_vfio_dma_logging_start()
639 mutex_lock(&pds_vfio->state_mutex); in pds_vfio_dma_logging_stop()
641 mutex_unlock(&pds_vfio->state_mutex); in pds_vfio_dma_logging_stop()