Lines Matching refs:dev_data

78 			  struct iommu_dev_data *dev_data);
198 struct iommu_dev_data *dev_data; in alloc_dev_data() local
201 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); in alloc_dev_data()
202 if (!dev_data) in alloc_dev_data()
205 spin_lock_init(&dev_data->lock); in alloc_dev_data()
206 dev_data->devid = devid; in alloc_dev_data()
207 ratelimit_default_init(&dev_data->rs); in alloc_dev_data()
209 llist_add(&dev_data->dev_data_list, &pci_seg->dev_data_list); in alloc_dev_data()
210 return dev_data; in alloc_dev_data()
215 struct iommu_dev_data *dev_data; in search_dev_data() local
223 llist_for_each_entry(dev_data, node, dev_data_list) { in search_dev_data()
224 if (dev_data->devid == devid) in search_dev_data()
225 return dev_data; in search_dev_data()
295 struct iommu_dev_data *dev_data; in find_dev_data() local
297 dev_data = search_dev_data(iommu, devid); in find_dev_data()
299 if (dev_data == NULL) { in find_dev_data()
300 dev_data = alloc_dev_data(iommu, devid); in find_dev_data()
301 if (!dev_data) in find_dev_data()
305 dev_data->defer_attach = true; in find_dev_data()
308 return dev_data; in find_dev_data()
336 static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data) in pdev_pasid_supported() argument
338 return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP); in pdev_pasid_supported()
368 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_ats() local
371 if (dev_data->ats_enabled) in pdev_enable_cap_ats()
375 (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) { in pdev_enable_cap_ats()
378 dev_data->ats_enabled = 1; in pdev_enable_cap_ats()
379 dev_data->ats_qdep = pci_ats_queue_depth(pdev); in pdev_enable_cap_ats()
388 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_ats() local
390 if (dev_data->ats_enabled) { in pdev_disable_cap_ats()
392 dev_data->ats_enabled = 0; in pdev_disable_cap_ats()
398 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pri() local
401 if (dev_data->pri_enabled) in pdev_enable_cap_pri()
404 if (!dev_data->ats_enabled) in pdev_enable_cap_pri()
407 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { in pdev_enable_cap_pri()
413 dev_data->pri_enabled = 1; in pdev_enable_cap_pri()
414 dev_data->pri_tlp = pci_prg_resp_pasid_required(pdev); in pdev_enable_cap_pri()
425 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pri() local
427 if (dev_data->pri_enabled) { in pdev_disable_cap_pri()
429 dev_data->pri_enabled = 0; in pdev_disable_cap_pri()
435 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_enable_cap_pasid() local
438 if (dev_data->pasid_enabled) in pdev_enable_cap_pasid()
441 if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) { in pdev_enable_cap_pasid()
445 dev_data->pasid_enabled = 1; in pdev_enable_cap_pasid()
453 struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); in pdev_disable_cap_pasid() local
455 if (dev_data->pasid_enabled) { in pdev_disable_cap_pasid()
457 dev_data->pasid_enabled = 0; in pdev_disable_cap_pasid()
507 struct iommu_dev_data *dev_data; in iommu_init_device() local
518 dev_data = find_dev_data(iommu, devid); in iommu_init_device()
519 if (!dev_data) in iommu_init_device()
522 dev_data->dev = dev; in iommu_init_device()
533 dev_data->flags = pdev_get_caps(to_pci_dev(dev)); in iommu_init_device()
536 dev_iommu_priv_set(dev, dev_data); in iommu_init_device()
560 struct iommu_dev_data *dev_data; in amd_iommu_uninit_device() local
562 dev_data = dev_iommu_priv_get(dev); in amd_iommu_uninit_device()
563 if (!dev_data) in amd_iommu_uninit_device()
566 if (dev_data->domain) in amd_iommu_uninit_device()
601 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_hw_error() local
614 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_hw_error()
616 if (dev_data) { in amd_iommu_report_rmp_hw_error()
617 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_hw_error()
633 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_rmp_fault() local
647 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_rmp_fault()
649 if (dev_data) { in amd_iommu_report_rmp_fault()
650 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_rmp_fault()
674 struct iommu_dev_data *dev_data = NULL; in amd_iommu_report_page_fault() local
680 dev_data = dev_iommu_priv_get(&pdev->dev); in amd_iommu_report_page_fault()
682 if (dev_data) { in amd_iommu_report_page_fault()
690 if (dev_data->domain == NULL) { in amd_iommu_report_page_fault()
698 if (!report_iommu_fault(&dev_data->domain->domain, in amd_iommu_report_page_fault()
706 if (__ratelimit(&dev_data->rs)) { in amd_iommu_report_page_fault()
1363 static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address, in device_flush_iotlb() argument
1366 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_iotlb()
1368 int qdep = dev_data->ats_qdep; in device_flush_iotlb()
1370 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, in device_flush_iotlb()
1386 static int device_flush_dte(struct iommu_dev_data *dev_data) in device_flush_dte() argument
1388 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in device_flush_dte()
1394 if (dev_is_pci(dev_data->dev)) in device_flush_dte()
1395 pdev = to_pci_dev(dev_data->dev); in device_flush_dte()
1401 ret = iommu_flush_dte(iommu, dev_data->devid); in device_flush_dte()
1406 alias = pci_seg->alias_table[dev_data->devid]; in device_flush_dte()
1407 if (alias != dev_data->devid) { in device_flush_dte()
1413 if (dev_data->ats_enabled) { in device_flush_dte()
1415 ret = device_flush_iotlb(dev_data, 0, ~0UL, in device_flush_dte()
1425 struct iommu_dev_data *dev_data; in domain_flush_pages_v2() local
1429 list_for_each_entry(dev_data, &pdom->dev_list, list) { in domain_flush_pages_v2()
1430 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in domain_flush_pages_v2()
1431 u16 domid = dev_data->gcr3_info.domid; in domain_flush_pages_v2()
1472 struct iommu_dev_data *dev_data; in __domain_flush_pages() local
1484 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1486 if (!dev_data->ats_enabled) in __domain_flush_pages()
1489 ret |= device_flush_iotlb(dev_data, address, size, pasid, gn); in __domain_flush_pages()
1552 void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, in amd_iommu_dev_flush_pasid_pages() argument
1556 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in amd_iommu_dev_flush_pasid_pages()
1559 dev_data->gcr3_info.domid, pasid, true); in amd_iommu_dev_flush_pasid_pages()
1562 if (dev_data->ats_enabled) in amd_iommu_dev_flush_pasid_pages()
1563 device_flush_iotlb(dev_data, address, size, pasid, true); in amd_iommu_dev_flush_pasid_pages()
1568 static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, in dev_flush_pasid_all() argument
1571 amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, in dev_flush_pasid_all()
1594 struct iommu_dev_data *dev_data; in amd_iommu_update_and_flush_device_table() local
1596 list_for_each_entry(dev_data, &domain->dev_list, list) { in amd_iommu_update_and_flush_device_table()
1597 struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); in amd_iommu_update_and_flush_device_table()
1599 set_dte_entry(iommu, dev_data); in amd_iommu_update_and_flush_device_table()
1600 clone_aliases(iommu, dev_data->dev); in amd_iommu_update_and_flush_device_table()
1603 list_for_each_entry(dev_data, &domain->dev_list, list) in amd_iommu_update_and_flush_device_table()
1604 device_flush_dte(dev_data); in amd_iommu_update_and_flush_device_table()
1620 struct iommu_dev_data *dev_data; in amd_iommu_complete_ppr() local
1624 dev_data = dev_iommu_priv_get(dev); in amd_iommu_complete_ppr()
1627 build_complete_ppr(&cmd, dev_data->devid, pasid, status, in amd_iommu_complete_ppr()
1628 tag, dev_data->pri_tlp); in amd_iommu_complete_ppr()
1795 static int update_gcr3(struct iommu_dev_data *dev_data, in update_gcr3() argument
1798 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in update_gcr3()
1810 dev_flush_pasid_all(dev_data, pasid); in update_gcr3()
1814 int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, in amd_iommu_set_gcr3() argument
1817 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_set_gcr3()
1820 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_set_gcr3()
1822 ret = update_gcr3(dev_data, pasid, gcr3, true); in amd_iommu_set_gcr3()
1830 int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid) in amd_iommu_clear_gcr3() argument
1832 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in amd_iommu_clear_gcr3()
1835 iommu_group_mutex_assert(dev_data->dev); in amd_iommu_clear_gcr3()
1837 ret = update_gcr3(dev_data, pasid, 0, false); in amd_iommu_clear_gcr3()
1846 struct iommu_dev_data *dev_data) in set_dte_entry() argument
1851 u16 devid = dev_data->devid; in set_dte_entry()
1853 struct protection_domain *domain = dev_data->domain; in set_dte_entry()
1855 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in set_dte_entry()
1858 domid = dev_data->gcr3_info.domid; in set_dte_entry()
1879 if (dev_data->ats_enabled) in set_dte_entry()
1882 if (dev_data->ppr) in set_dte_entry()
1956 static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) in dev_update_dte() argument
1958 struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); in dev_update_dte()
1961 set_dte_entry(iommu, dev_data); in dev_update_dte()
1963 clear_dte_entry(iommu, dev_data->devid); in dev_update_dte()
1965 clone_aliases(iommu, dev_data->dev); in dev_update_dte()
1966 device_flush_dte(dev_data); in dev_update_dte()
1974 static int init_gcr3_table(struct iommu_dev_data *dev_data, in init_gcr3_table() argument
1977 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in init_gcr3_table()
1978 int max_pasids = dev_data->max_pasids; in init_gcr3_table()
1985 if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) in init_gcr3_table()
1992 ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, in init_gcr3_table()
2001 ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); in init_gcr3_table()
2003 free_gcr3_table(&dev_data->gcr3_info); in init_gcr3_table()
2008 static void destroy_gcr3_table(struct iommu_dev_data *dev_data, in destroy_gcr3_table() argument
2011 struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; in destroy_gcr3_table()
2014 update_gcr3(dev_data, 0, 0, false); in destroy_gcr3_table()
2022 static int do_attach(struct iommu_dev_data *dev_data, in do_attach() argument
2025 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in do_attach()
2030 dev_data->domain = domain; in do_attach()
2031 list_add(&dev_data->list, &domain->dev_list); in do_attach()
2035 cfg->amd.nid = dev_to_node(dev_data->dev); in do_attach()
2043 ret = init_gcr3_table(dev_data, domain); in do_attach()
2051 static void do_detach(struct iommu_dev_data *dev_data) in do_detach() argument
2053 struct protection_domain *domain = dev_data->domain; in do_detach()
2054 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in do_detach()
2057 dev_update_dte(dev_data, false); in do_detach()
2064 destroy_gcr3_table(dev_data, domain); in do_detach()
2067 dev_data->domain = NULL; in do_detach()
2068 list_del(&dev_data->list); in do_detach()
2082 struct iommu_dev_data *dev_data; in attach_device() local
2088 dev_data = dev_iommu_priv_get(dev); in attach_device()
2090 spin_lock(&dev_data->lock); in attach_device()
2092 if (dev_data->domain != NULL) { in attach_device()
2097 ret = do_attach(dev_data, domain); in attach_device()
2100 spin_unlock(&dev_data->lock); in attach_device()
2112 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in detach_device() local
2113 struct protection_domain *domain = dev_data->domain; in detach_device()
2114 struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); in detach_device()
2116 bool ppr = dev_data->ppr; in detach_device()
2120 spin_lock(&dev_data->lock); in detach_device()
2128 if (WARN_ON(!dev_data->domain)) in detach_device()
2135 dev_data->ppr = false; in detach_device()
2138 do_detach(dev_data); in detach_device()
2141 spin_unlock(&dev_data->lock); in detach_device()
2147 amd_iommu_iopf_remove_device(iommu, dev_data); in detach_device()
2158 struct iommu_dev_data *dev_data; in amd_iommu_probe_device() local
2190 dev_data = dev_iommu_priv_get(dev); in amd_iommu_probe_device()
2192 pdev_pasid_supported(dev_data)) { in amd_iommu_probe_device()
2193 dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, in amd_iommu_probe_device()
2427 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in blocked_domain_attach_device() local
2429 if (dev_data->domain) in blocked_domain_attach_device()
2433 spin_lock(&dev_data->lock); in blocked_domain_attach_device()
2434 dev_update_dte(dev_data, false); in blocked_domain_attach_device()
2435 spin_unlock(&dev_data->lock); in blocked_domain_attach_device()
2450 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_attach_device() local
2460 if (dev_data->domain == domain) in amd_iommu_attach_device()
2463 dev_data->defer_attach = false; in amd_iommu_attach_device()
2472 if (dev_data->domain) in amd_iommu_attach_device()
2480 dev_data->use_vapic = 1; in amd_iommu_attach_device()
2482 dev_data->use_vapic = 0; in amd_iommu_attach_device()
2486 pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; in amd_iommu_attach_device()
2495 if (amd_iommu_iopf_add_device(iommu, dev_data)) in amd_iommu_attach_device()
2502 dev_update_dte(dev_data, true); in amd_iommu_attach_device()
2624 struct iommu_dev_data *dev_data; in amd_iommu_set_dirty_tracking() local
2636 list_for_each_entry(dev_data, &pdomain->dev_list, list) { in amd_iommu_set_dirty_tracking()
2637 iommu = get_amd_iommu_from_dev_data(dev_data); in amd_iommu_set_dirty_tracking()
2640 pte_root = dev_table[dev_data->devid].data[0]; in amd_iommu_set_dirty_tracking()
2646 dev_table[dev_data->devid].data[0] = pte_root; in amd_iommu_set_dirty_tracking()
2647 device_flush_dte(dev_data); in amd_iommu_set_dirty_tracking()
2744 struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); in amd_iommu_is_attach_deferred() local
2746 return dev_data->defer_attach; in amd_iommu_is_attach_deferred()
2773 struct iommu_dev_data *dev_data; in amd_iommu_def_domain_type() local
2775 dev_data = dev_iommu_priv_get(dev); in amd_iommu_def_domain_type()
2776 if (!dev_data) in amd_iommu_def_domain_type()
2790 if (pdev_pasid_supported(dev_data) && in amd_iommu_def_domain_type()
3660 struct iommu_dev_data *dev_data; in amd_ir_set_vcpu_affinity() local
3665 dev_data = search_dev_data(ir_data->iommu, irte_info->devid); in amd_ir_set_vcpu_affinity()
3671 if (!dev_data || !dev_data->use_vapic) in amd_ir_set_vcpu_affinity()