Lines Matching full:pd

191 						struct usnic_uiom_pd *pd)  in usnic_uiom_unmap_sorted_intervals()  argument
202 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals()
209 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, in __usnic_uiom_reg_release() argument
223 spin_lock(&pd->lock); in __usnic_uiom_reg_release()
224 usnic_uiom_remove_interval(&pd->root, vpn_start, in __usnic_uiom_reg_release()
226 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); in __usnic_uiom_reg_release()
236 spin_unlock(&pd->lock); in __usnic_uiom_reg_release()
250 struct usnic_uiom_pd *pd = uiomr->pd; in usnic_uiom_map_sorted_intervals() local
279 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
296 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
325 usnic_uiom_unmap_sorted_intervals(intervals, pd); in usnic_uiom_map_sorted_intervals()
329 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, in usnic_uiom_reg_get() argument
362 uiomr->pd = pd; in usnic_uiom_reg_get()
372 spin_lock(&pd->lock); in usnic_uiom_reg_get()
376 &pd->root, in usnic_uiom_reg_get()
392 err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last, in usnic_uiom_reg_get()
401 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
406 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); in usnic_uiom_reg_get()
411 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
431 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); in usnic_uiom_reg_release()
439 struct usnic_uiom_pd *pd; in usnic_uiom_alloc_pd() local
442 pd = kzalloc(sizeof(*pd), GFP_KERNEL); in usnic_uiom_alloc_pd()
443 if (!pd) in usnic_uiom_alloc_pd()
446 pd->domain = domain = iommu_paging_domain_alloc(dev); in usnic_uiom_alloc_pd()
449 kfree(pd); in usnic_uiom_alloc_pd()
453 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); in usnic_uiom_alloc_pd()
455 spin_lock_init(&pd->lock); in usnic_uiom_alloc_pd()
456 INIT_LIST_HEAD(&pd->devs); in usnic_uiom_alloc_pd()
458 return pd; in usnic_uiom_alloc_pd()
461 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) in usnic_uiom_dealloc_pd() argument
463 iommu_domain_free(pd->domain); in usnic_uiom_dealloc_pd()
464 kfree(pd); in usnic_uiom_dealloc_pd()
467 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_attach_dev_to_pd() argument
477 err = iommu_attach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
488 spin_lock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
489 list_add_tail(&uiom_dev->link, &pd->devs); in usnic_uiom_attach_dev_to_pd()
490 pd->dev_cnt++; in usnic_uiom_attach_dev_to_pd()
491 spin_unlock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
496 iommu_detach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
502 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_detach_dev_from_pd() argument
507 spin_lock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
508 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_detach_dev_from_pd()
518 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
523 pd->dev_cnt--; in usnic_uiom_detach_dev_from_pd()
524 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
526 return iommu_detach_device(pd->domain, dev); in usnic_uiom_detach_dev_from_pd()
529 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) in usnic_uiom_get_dev_list() argument
535 spin_lock(&pd->lock); in usnic_uiom_get_dev_list()
536 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); in usnic_uiom_get_dev_list()
542 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_get_dev_list()
546 spin_unlock(&pd->lock); in usnic_uiom_get_dev_list()