Lines Matching full:as

279 	struct tegra_smmu_as *as;  in tegra_smmu_domain_alloc_paging()  local
281 as = kzalloc(sizeof(*as), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
282 if (!as) in tegra_smmu_domain_alloc_paging()
285 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; in tegra_smmu_domain_alloc_paging()
287 as->pd = __iommu_alloc_pages(GFP_KERNEL | __GFP_DMA, 0); in tegra_smmu_domain_alloc_paging()
288 if (!as->pd) { in tegra_smmu_domain_alloc_paging()
289 kfree(as); in tegra_smmu_domain_alloc_paging()
293 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
294 if (!as->count) { in tegra_smmu_domain_alloc_paging()
295 __iommu_free_pages(as->pd, 0); in tegra_smmu_domain_alloc_paging()
296 kfree(as); in tegra_smmu_domain_alloc_paging()
300 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); in tegra_smmu_domain_alloc_paging()
301 if (!as->pts) { in tegra_smmu_domain_alloc_paging()
302 kfree(as->count); in tegra_smmu_domain_alloc_paging()
303 __iommu_free_pages(as->pd, 0); in tegra_smmu_domain_alloc_paging()
304 kfree(as); in tegra_smmu_domain_alloc_paging()
308 spin_lock_init(&as->lock); in tegra_smmu_domain_alloc_paging()
311 as->domain.geometry.aperture_start = 0; in tegra_smmu_domain_alloc_paging()
312 as->domain.geometry.aperture_end = 0xffffffff; in tegra_smmu_domain_alloc_paging()
313 as->domain.geometry.force_aperture = true; in tegra_smmu_domain_alloc_paging()
315 return &as->domain; in tegra_smmu_domain_alloc_paging()
320 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_domain_free() local
324 WARN_ON_ONCE(as->use_count); in tegra_smmu_domain_free()
325 kfree(as->count); in tegra_smmu_domain_free()
326 kfree(as->pts); in tegra_smmu_domain_free()
327 kfree(as); in tegra_smmu_domain_free()
408 struct tegra_smmu_as *as) in tegra_smmu_as_prepare() argument
415 if (as->use_count > 0) { in tegra_smmu_as_prepare()
416 as->use_count++; in tegra_smmu_as_prepare()
420 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, in tegra_smmu_as_prepare()
422 if (dma_mapping_error(smmu->dev, as->pd_dma)) { in tegra_smmu_as_prepare()
428 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { in tegra_smmu_as_prepare()
433 err = tegra_smmu_alloc_asid(smmu, &as->id); in tegra_smmu_as_prepare()
437 smmu_flush_ptc(smmu, as->pd_dma, 0); in tegra_smmu_as_prepare()
438 smmu_flush_tlb_asid(smmu, as->id); in tegra_smmu_as_prepare()
440 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); in tegra_smmu_as_prepare()
441 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); in tegra_smmu_as_prepare()
445 as->smmu = smmu; in tegra_smmu_as_prepare()
446 as->use_count++; in tegra_smmu_as_prepare()
453 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_prepare()
461 struct tegra_smmu_as *as) in tegra_smmu_as_unprepare() argument
465 if (--as->use_count > 0) { in tegra_smmu_as_unprepare()
470 tegra_smmu_free_asid(smmu, as->id); in tegra_smmu_as_unprepare()
472 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_unprepare()
474 as->smmu = NULL; in tegra_smmu_as_unprepare()
484 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_attach_dev() local
492 err = tegra_smmu_as_prepare(smmu, as); in tegra_smmu_attach_dev()
496 tegra_smmu_enable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
506 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
507 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_attach_dev()
518 struct tegra_smmu_as *as; in tegra_smmu_identity_attach() local
528 as = to_smmu_as(domain); in tegra_smmu_identity_attach()
529 smmu = as->smmu; in tegra_smmu_identity_attach()
531 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_identity_attach()
532 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_identity_attach()
546 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument
550 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pde()
551 u32 *pd = page_address(as->pd); in tegra_smmu_set_pde()
558 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, in tegra_smmu_set_pde()
562 smmu_flush_ptc(smmu, as->pd_dma, offset); in tegra_smmu_set_pde()
563 smmu_flush_tlb_section(smmu, as->id, iova); in tegra_smmu_set_pde()
574 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_pte_lookup() argument
578 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_lookup()
582 pt_page = as->pts[pd_index]; in tegra_smmu_pte_lookup()
586 pd = page_address(as->pd); in tegra_smmu_pte_lookup()
592 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, in as_get_pte() argument
596 struct tegra_smmu *smmu = as->smmu; in as_get_pte()
598 if (!as->pts[pde]) { in as_get_pte()
615 as->pts[pde] = page; in as_get_pte()
617 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | in as_get_pte()
622 u32 *pd = page_address(as->pd); in as_get_pte()
627 return tegra_smmu_pte_offset(as->pts[pde], iova); in as_get_pte()
630 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) in tegra_smmu_pte_get_use() argument
634 as->count[pd_index]++; in tegra_smmu_pte_get_use()
637 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) in tegra_smmu_pte_put_use() argument
640 struct page *page = as->pts[pde]; in tegra_smmu_pte_put_use()
646 if (--as->count[pde] == 0) { in tegra_smmu_pte_put_use()
647 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_put_use()
648 u32 *pd = page_address(as->pd); in tegra_smmu_pte_put_use()
651 tegra_smmu_set_pde(as, iova, 0); in tegra_smmu_pte_put_use()
655 as->pts[pde] = NULL; in tegra_smmu_pte_put_use()
659 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pte() argument
662 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pte()
670 smmu_flush_tlb_group(smmu, as->id, iova); in tegra_smmu_set_pte()
674 static struct page *as_get_pde_page(struct tegra_smmu_as *as, in as_get_pde_page() argument
679 struct page *page = as->pts[pde]; in as_get_pde_page()
691 spin_unlock_irqrestore(&as->lock, *flags); in as_get_pde_page()
696 spin_lock_irqsave(&as->lock, *flags); in as_get_pde_page()
703 if (as->pts[pde]) { in as_get_pde_page()
707 page = as->pts[pde]; in as_get_pde_page()
718 struct tegra_smmu_as *as = to_smmu_as(domain); in __tegra_smmu_map() local
724 page = as_get_pde_page(as, iova, gfp, flags); in __tegra_smmu_map()
728 pte = as_get_pte(as, iova, &pte_dma, page); in __tegra_smmu_map()
734 tegra_smmu_pte_get_use(as, iova); in __tegra_smmu_map()
744 tegra_smmu_set_pte(as, iova, pte, pte_dma, in __tegra_smmu_map()
754 struct tegra_smmu_as *as = to_smmu_as(domain); in __tegra_smmu_unmap() local
758 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); in __tegra_smmu_unmap()
762 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); in __tegra_smmu_unmap()
763 tegra_smmu_pte_put_use(as, iova); in __tegra_smmu_unmap()
772 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_map() local
776 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_map()
778 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_map()
789 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_unmap() local
792 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_unmap()
794 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_unmap()
802 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_iova_to_phys() local
807 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); in tegra_smmu_iova_to_phys()
811 pfn = *pte & as->smmu->pfn_mask; in tegra_smmu_iova_to_phys()
974 * the SMMU parent device is the same as the MC, so the reference count in tegra_smmu_of_xlate()