Lines Matching refs:dmar_domain

287 static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)  in domain_pfn_supported()
355 static void domain_update_iommu_coherency(struct dmar_domain *domain) in domain_update_iommu_coherency()
385 static int domain_update_iommu_superpage(struct dmar_domain *domain, in domain_update_iommu_superpage()
415 static int domain_update_device_node(struct dmar_domain *domain) in domain_update_device_node()
439 static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain) in domain_super_pgsize_bitmap()
456 void domain_update_iommu_cap(struct dmar_domain *domain) in domain_update_iommu_cap()
668 static void domain_flush_cache(struct dmar_domain *domain, in domain_flush_cache()
807 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, in pfn_to_dma_pte()
865 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, in dma_pfn_level_pte()
897 static void dma_pte_clear_range(struct dmar_domain *domain, in dma_pte_clear_range()
928 static void dma_pte_free_level(struct dmar_domain *domain, int level, in dma_pte_free_level()
971 static void dma_pte_free_pagetable(struct dmar_domain *domain, in dma_pte_free_pagetable()
995 static void dma_pte_list_pagetables(struct dmar_domain *domain, in dma_pte_list_pagetables()
1015 static void dma_pte_clear_level(struct dmar_domain *domain, int level, in dma_pte_clear_level()
1062 static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, in domain_unmap()
1238 domain_lookup_dev_info(struct dmar_domain *domain, in domain_lookup_dev_info()
1457 static struct dmar_domain *alloc_domain(unsigned int type) in alloc_domain()
1459 struct dmar_domain *domain; in alloc_domain()
1478 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) in domain_attach_iommu()
1530 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) in domain_detach_iommu()
1563 static void domain_exit(struct dmar_domain *domain) in domain_exit()
1634 static int domain_context_mapping_one(struct dmar_domain *domain, in domain_context_mapping_one()
1701 struct dmar_domain *domain = opaque; in domain_context_mapping_cb()
1708 domain_context_mapping(struct dmar_domain *domain, struct device *dev) in domain_context_mapping()
1722 static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn, in hardware_largepage_caps()
1752 static void switch_to_super_page(struct dmar_domain *domain, in switch_to_super_page()
1781 __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, in __domain_mapping()
1909 struct dmar_domain *domain, in domain_setup_first_level()
1948 static int dmar_domain_attach_device(struct dmar_domain *domain, in dmar_domain_attach_device()
3384 static int md_domain_init(struct dmar_domain *domain, int guest_width) in md_domain_init()
3430 static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage) in paging_domain_alloc()
3434 struct dmar_domain *domain; in paging_domain_alloc()
3493 struct dmar_domain *dmar_domain; in intel_iommu_domain_alloc() local
3499 dmar_domain = alloc_domain(type); in intel_iommu_domain_alloc()
3500 if (!dmar_domain) { in intel_iommu_domain_alloc()
3504 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { in intel_iommu_domain_alloc()
3506 domain_exit(dmar_domain); in intel_iommu_domain_alloc()
3510 domain = &dmar_domain->domain; in intel_iommu_domain_alloc()
3513 __DOMAIN_MAX_ADDR(dmar_domain->gaw); in intel_iommu_domain_alloc()
3533 struct dmar_domain *dmar_domain; in intel_iommu_domain_alloc_user() local
3552 dmar_domain = paging_domain_alloc(dev, false); in intel_iommu_domain_alloc_user()
3553 if (IS_ERR(dmar_domain)) in intel_iommu_domain_alloc_user()
3554 return ERR_CAST(dmar_domain); in intel_iommu_domain_alloc_user()
3555 domain = &dmar_domain->domain; in intel_iommu_domain_alloc_user()
3561 dmar_domain->nested_parent = true; in intel_iommu_domain_alloc_user()
3562 INIT_LIST_HEAD(&dmar_domain->s1_domains); in intel_iommu_domain_alloc_user()
3563 spin_lock_init(&dmar_domain->s1_lock); in intel_iommu_domain_alloc_user()
3567 if (dmar_domain->use_first_level) { in intel_iommu_domain_alloc_user()
3579 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_domain_free() local
3581 WARN_ON(dmar_domain->nested_parent && in intel_iommu_domain_free()
3582 !list_empty(&dmar_domain->s1_domains)); in intel_iommu_domain_free()
3583 domain_exit(dmar_domain); in intel_iommu_domain_free()
3590 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in prepare_domain_attach_device() local
3594 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in prepare_domain_attach_device()
3605 if (dmar_domain->max_addr > (1LL << addr_width)) in prepare_domain_attach_device()
3607 dmar_domain->gaw = addr_width; in prepare_domain_attach_device()
3612 while (iommu->agaw < dmar_domain->agaw) { in prepare_domain_attach_device()
3615 pte = dmar_domain->pgd; in prepare_domain_attach_device()
3617 dmar_domain->pgd = phys_to_virt(dma_pte_addr(pte)); in prepare_domain_attach_device()
3620 dmar_domain->agaw--; in prepare_domain_attach_device()
3648 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_map() local
3656 if (dmar_domain->set_pte_snp) in intel_iommu_map()
3660 if (dmar_domain->max_addr < max_addr) { in intel_iommu_map()
3664 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; in intel_iommu_map()
3668 __func__, dmar_domain->gaw, max_addr); in intel_iommu_map()
3671 dmar_domain->max_addr = max_addr; in intel_iommu_map()
3676 return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, in intel_iommu_map()
3706 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_unmap() local
3712 if (unlikely(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, in intel_iommu_unmap()
3722 domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist); in intel_iommu_unmap()
3724 if (dmar_domain->max_addr == iova + size) in intel_iommu_unmap()
3725 dmar_domain->max_addr = iova; in intel_iommu_unmap()
3759 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_iova_to_phys() local
3764 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level, in intel_iommu_iova_to_phys()
3774 static bool domain_support_force_snooping(struct dmar_domain *domain) in domain_support_force_snooping()
3790 static void domain_set_force_snooping(struct dmar_domain *domain) in domain_set_force_snooping()
3811 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_enforce_cache_coherency() local
3814 if (dmar_domain->force_snooping) in intel_iommu_enforce_cache_coherency()
3817 spin_lock_irqsave(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3818 if (!domain_support_force_snooping(dmar_domain) || in intel_iommu_enforce_cache_coherency()
3819 (!dmar_domain->use_first_level && dmar_domain->has_mappings)) { in intel_iommu_enforce_cache_coherency()
3820 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3824 domain_set_force_snooping(dmar_domain); in intel_iommu_enforce_cache_coherency()
3825 dmar_domain->force_snooping = true; in intel_iommu_enforce_cache_coherency()
3826 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
4261 struct dmar_domain *dmar_domain; in intel_iommu_remove_dev_pasid() local
4269 dmar_domain = to_dmar_domain(domain); in intel_iommu_remove_dev_pasid()
4270 spin_lock_irqsave(&dmar_domain->lock, flags); in intel_iommu_remove_dev_pasid()
4271 list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { in intel_iommu_remove_dev_pasid()
4279 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_remove_dev_pasid()
4281 cache_tag_unassign_domain(dmar_domain, dev, pasid); in intel_iommu_remove_dev_pasid()
4282 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_remove_dev_pasid()
4293 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_set_dev_pasid() local
4316 ret = domain_attach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4320 ret = cache_tag_assign_domain(dmar_domain, dev, pasid); in intel_iommu_set_dev_pasid()
4324 if (dmar_domain->use_first_level) in intel_iommu_set_dev_pasid()
4325 ret = domain_setup_first_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4328 ret = intel_pasid_setup_second_level(iommu, dmar_domain, in intel_iommu_set_dev_pasid()
4335 spin_lock_irqsave(&dmar_domain->lock, flags); in intel_iommu_set_dev_pasid()
4336 list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); in intel_iommu_set_dev_pasid()
4337 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_set_dev_pasid()
4344 cache_tag_unassign_domain(dmar_domain, dev, pasid); in intel_iommu_set_dev_pasid()
4346 domain_detach_iommu(dmar_domain, iommu); in intel_iommu_set_dev_pasid()
4389 static int parent_domain_set_dirty_tracking(struct dmar_domain *domain, in parent_domain_set_dirty_tracking()
4392 struct dmar_domain *s1_domain; in parent_domain_set_dirty_tracking()
4421 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_set_dirty_tracking() local
4424 spin_lock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4425 if (dmar_domain->dirty_tracking == enable) in intel_iommu_set_dirty_tracking()
4428 ret = device_set_dirty_tracking(&dmar_domain->devices, enable); in intel_iommu_set_dirty_tracking()
4432 if (dmar_domain->nested_parent) { in intel_iommu_set_dirty_tracking()
4433 ret = parent_domain_set_dirty_tracking(dmar_domain, enable); in intel_iommu_set_dirty_tracking()
4438 dmar_domain->dirty_tracking = enable; in intel_iommu_set_dirty_tracking()
4440 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4445 device_set_dirty_tracking(&dmar_domain->devices, in intel_iommu_set_dirty_tracking()
4446 dmar_domain->dirty_tracking); in intel_iommu_set_dirty_tracking()
4447 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4456 struct dmar_domain *dmar_domain = to_dmar_domain(domain); in intel_iommu_read_and_clear_dirty() local
4466 if (!dmar_domain->dirty_tracking && dirty->bitmap) in intel_iommu_read_and_clear_dirty()
4473 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &lvl, in intel_iommu_read_and_clear_dirty()