Lines Matching refs:smmu_domain

86 static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
1353 struct arm_smmu_domain *smmu_domain) in arm_smmu_make_s1_cd() argument
1355 struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; in arm_smmu_make_s1_cd()
1357 &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; in arm_smmu_make_s1_cd()
1638 struct arm_smmu_domain *smmu_domain, in arm_smmu_make_s2_domain_ste() argument
1641 struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; in arm_smmu_make_s2_domain_ste()
1643 &io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops)->cfg; in arm_smmu_make_s2_domain_ste()
2067 int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, in arm_smmu_atc_inv_domain() argument
2078 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) in arm_smmu_atc_inv_domain()
2095 if (!atomic_read(&smmu_domain->nr_ats_masters)) in arm_smmu_atc_inv_domain()
2098 arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
2100 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
2101 list_for_each_entry(master_domain, &smmu_domain->devices, in arm_smmu_atc_inv_domain()
2112 arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); in arm_smmu_atc_inv_domain()
2115 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_atc_inv_domain()
2117 return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); in arm_smmu_atc_inv_domain()
2123 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_context() local
2124 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context()
2134 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_context()
2135 arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid); in arm_smmu_tlb_inv_context()
2138 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_context()
2141 arm_smmu_atc_inv_domain(smmu_domain, 0, 0); in arm_smmu_tlb_inv_context()
2147 struct arm_smmu_domain *smmu_domain) in __arm_smmu_tlb_inv_range() argument
2149 struct arm_smmu_device *smmu = smmu_domain->smmu; in __arm_smmu_tlb_inv_range()
2159 tg = __ffs(smmu_domain->domain.pgsize_bitmap); in __arm_smmu_tlb_inv_range()
2217 struct arm_smmu_domain *smmu_domain) in arm_smmu_tlb_inv_range_domain() argument
2225 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_tlb_inv_range_domain()
2226 cmd.opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_domain()
2228 cmd.tlbi.asid = smmu_domain->cd.asid; in arm_smmu_tlb_inv_range_domain()
2231 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; in arm_smmu_tlb_inv_range_domain()
2233 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_domain()
2239 arm_smmu_atc_inv_domain(smmu_domain, iova, size); in arm_smmu_tlb_inv_range_domain()
2244 struct arm_smmu_domain *smmu_domain) in arm_smmu_tlb_inv_range_asid() argument
2247 .opcode = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ? in arm_smmu_tlb_inv_range_asid()
2255 __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); in arm_smmu_tlb_inv_range_asid()
2262 struct arm_smmu_domain *smmu_domain = cookie; in arm_smmu_tlb_inv_page_nosync() local
2263 struct iommu_domain *domain = &smmu_domain->domain; in arm_smmu_tlb_inv_page_nosync()
2308 struct arm_smmu_domain *smmu_domain; in arm_smmu_domain_alloc() local
2310 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); in arm_smmu_domain_alloc()
2311 if (!smmu_domain) in arm_smmu_domain_alloc()
2314 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
2315 INIT_LIST_HEAD(&smmu_domain->devices); in arm_smmu_domain_alloc()
2316 spin_lock_init(&smmu_domain->devices_lock); in arm_smmu_domain_alloc()
2318 return smmu_domain; in arm_smmu_domain_alloc()
2323 struct arm_smmu_domain *smmu_domain; in arm_smmu_domain_alloc_paging() local
2330 smmu_domain = arm_smmu_domain_alloc(); in arm_smmu_domain_alloc_paging()
2331 if (IS_ERR(smmu_domain)) in arm_smmu_domain_alloc_paging()
2332 return ERR_CAST(smmu_domain); in arm_smmu_domain_alloc_paging()
2338 ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, 0); in arm_smmu_domain_alloc_paging()
2340 kfree(smmu_domain); in arm_smmu_domain_alloc_paging()
2344 return &smmu_domain->domain; in arm_smmu_domain_alloc_paging()
2349 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_domain_free_paging() local
2350 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_free_paging()
2352 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_domain_free_paging()
2355 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_domain_free_paging()
2358 xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid); in arm_smmu_domain_free_paging()
2361 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_free_paging()
2366 kfree(smmu_domain); in arm_smmu_domain_free_paging()
2370 struct arm_smmu_domain *smmu_domain) in arm_smmu_domain_finalise_s1() argument
2374 struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; in arm_smmu_domain_finalise_s1()
2378 ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain, in arm_smmu_domain_finalise_s1()
2386 struct arm_smmu_domain *smmu_domain) in arm_smmu_domain_finalise_s2() argument
2389 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; in arm_smmu_domain_finalise_s2()
2401 static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, in arm_smmu_domain_finalise() argument
2409 struct arm_smmu_domain *smmu_domain); in arm_smmu_domain_finalise()
2414 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_domain_finalise()
2416 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_finalise()
2425 switch (smmu_domain->stage) { in arm_smmu_domain_finalise()
2450 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); in arm_smmu_domain_finalise()
2454 smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_domain_finalise()
2455 smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; in arm_smmu_domain_finalise()
2456 smmu_domain->domain.geometry.force_aperture = true; in arm_smmu_domain_finalise()
2457 if (enable_dirty && smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_domain_finalise()
2458 smmu_domain->domain.dirty_ops = &arm_smmu_dirty_ops; in arm_smmu_domain_finalise()
2460 ret = finalise_stage_fn(smmu, smmu_domain); in arm_smmu_domain_finalise()
2466 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_domain_finalise()
2467 smmu_domain->smmu = smmu; in arm_smmu_domain_finalise()
2596 arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain, in arm_smmu_find_master_domain() argument
2602 lockdep_assert_held(&smmu_domain->devices_lock); in arm_smmu_find_master_domain()
2604 list_for_each_entry(master_domain, &smmu_domain->devices, in arm_smmu_find_master_domain()
2634 struct arm_smmu_domain *smmu_domain = to_smmu_domain_devices(domain); in arm_smmu_remove_master_domain() local
2638 if (!smmu_domain) in arm_smmu_remove_master_domain()
2641 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_remove_master_domain()
2642 master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid); in arm_smmu_remove_master_domain()
2647 atomic_dec(&smmu_domain->nr_ats_masters); in arm_smmu_remove_master_domain()
2649 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_remove_master_domain()
2687 struct arm_smmu_domain *smmu_domain = in arm_smmu_attach_prepare() local
2698 if (smmu_domain || state->cd_needs_ats) { in arm_smmu_attach_prepare()
2712 if (smmu_domain) { in arm_smmu_attach_prepare()
2733 spin_lock_irqsave(&smmu_domain->devices_lock, flags); in arm_smmu_attach_prepare()
2735 atomic_inc(&smmu_domain->nr_ats_masters); in arm_smmu_attach_prepare()
2736 list_add(&master_domain->devices_elm, &smmu_domain->devices); in arm_smmu_attach_prepare()
2737 spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); in arm_smmu_attach_prepare()
2787 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_attach_dev() local
2801 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2803 if (!smmu_domain->smmu) { in arm_smmu_attach_dev()
2804 ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0); in arm_smmu_attach_dev()
2805 } else if (smmu_domain->smmu != smmu) in arm_smmu_attach_dev()
2808 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_attach_dev()
2812 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { in arm_smmu_attach_dev()
2833 switch (smmu_domain->stage) { in arm_smmu_attach_dev()
2837 arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); in arm_smmu_attach_dev()
2846 arm_smmu_make_s2_domain_ste(&target, master, smmu_domain, in arm_smmu_attach_dev()
2861 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_s1_set_dev_pasid() local
2867 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_s1_set_dev_pasid()
2868 if (!smmu_domain->smmu) in arm_smmu_s1_set_dev_pasid()
2869 ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0); in arm_smmu_s1_set_dev_pasid()
2870 else if (smmu_domain->smmu != smmu) in arm_smmu_s1_set_dev_pasid()
2872 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_s1_set_dev_pasid()
2876 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) in arm_smmu_s1_set_dev_pasid()
2883 arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); in arm_smmu_s1_set_dev_pasid()
2914 struct arm_smmu_domain *smmu_domain, ioasid_t pasid, in arm_smmu_set_pasid() argument
2931 if (smmu_domain->smmu != master->smmu) in arm_smmu_set_pasid()
2944 ret = arm_smmu_attach_prepare(&state, &smmu_domain->domain); in arm_smmu_set_pasid()
2954 FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->cd.asid)); in arm_smmu_set_pasid()
2970 struct arm_smmu_domain *smmu_domain; in arm_smmu_remove_dev_pasid() local
2972 smmu_domain = to_smmu_domain(domain); in arm_smmu_remove_dev_pasid()
2978 arm_smmu_remove_master_domain(master, &smmu_domain->domain, pasid); in arm_smmu_remove_dev_pasid()
3088 struct arm_smmu_domain *smmu_domain; in arm_smmu_domain_alloc_user() local
3096 smmu_domain = arm_smmu_domain_alloc(); in arm_smmu_domain_alloc_user()
3097 if (IS_ERR(smmu_domain)) in arm_smmu_domain_alloc_user()
3098 return ERR_CAST(smmu_domain); in arm_smmu_domain_alloc_user()
3100 smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED; in arm_smmu_domain_alloc_user()
3101 smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops; in arm_smmu_domain_alloc_user()
3102 ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, flags); in arm_smmu_domain_alloc_user()
3105 return &smmu_domain->domain; in arm_smmu_domain_alloc_user()
3108 kfree(smmu_domain); in arm_smmu_domain_alloc_user()
3128 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_unmap_pages() local
3129 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_unmap_pages()
3139 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_flush_iotlb_all() local
3141 if (smmu_domain->smmu) in arm_smmu_flush_iotlb_all()
3142 arm_smmu_tlb_inv_context(smmu_domain); in arm_smmu_flush_iotlb_all()
3148 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_iotlb_sync() local
3155 gather->pgsize, true, smmu_domain); in arm_smmu_iotlb_sync()
3348 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_read_and_clear_dirty() local
3349 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_read_and_clear_dirty()
3383 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_enable_nesting() local
3386 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
3387 if (smmu_domain->smmu) in arm_smmu_enable_nesting()
3390 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_enable_nesting()
3391 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()