Lines Matching +full:protection +full:- +full:domain

1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/dma-mapping.h>
25 #include "iommu-pages.h"
40 #define SECT_MASK (~(SECT_SIZE - 1))
41 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
42 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
57 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
64 static short PG_ENT_SHIFT = -1;
100 #define section_offs(iova) (iova & (SECT_SIZE - 1))
102 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
104 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
116 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
154 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
158 /* v1.x - v3.x registers */
205 { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ },
206 { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE },
208 { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ },
209 { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ },
210 { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE },
211 { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE },
218 "MULTI-HIT",
219 "ACCESS PROTECTION",
220 "SECURITY PROTECTION"
226 "ACCESS PROTECTION",
231 * This structure is attached to dev->iommu->priv of the master device
238 struct iommu_domain *domain; /* domain this device is attached */ member
245 * been attached to this domain and page tables of IO address space defined by
246 * it. It is usually referenced by 'domain' pointer.
254 struct iommu_domain domain; /* generic domain data structure */ member
297 struct exynos_iommu_domain *domain; /* domain we belong to */ member
298 struct list_head domain_node; /* node for domain clients list */
310 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
319 return -ENXIO; in exynos_sysmmu_v1_get_fault_info()
322 fault->addr = readl(data->sfrbase + finfo->addr_reg); in exynos_sysmmu_v1_get_fault_info()
323 fault->name = finfo->name; in exynos_sysmmu_v1_get_fault_info()
324 fault->type = finfo->type; in exynos_sysmmu_v1_get_fault_info()
336 fault->type = IOMMU_FAULT_READ; in exynos_sysmmu_v5_get_fault_info()
339 fault->type = IOMMU_FAULT_WRITE; in exynos_sysmmu_v5_get_fault_info()
341 itype -= 16; in exynos_sysmmu_v5_get_fault_info()
343 return -ENXIO; in exynos_sysmmu_v5_get_fault_info()
346 fault->name = sysmmu_v5_fault_names[itype]; in exynos_sysmmu_v5_get_fault_info()
347 fault->addr = readl(data->sfrbase + addr_reg); in exynos_sysmmu_v5_get_fault_info()
358 fault->addr = readl(SYSMMU_REG(data, fault_va)); in exynos_sysmmu_v7_get_fault_info()
359 fault->name = sysmmu_v7_fault_names[itype % 4]; in exynos_sysmmu_v7_get_fault_info()
360 fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; in exynos_sysmmu_v7_get_fault_info()
390 /* SysMMU v7: non-VM capable register layout */
424 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
429 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in sysmmu_unblock()
436 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in sysmmu_block()
437 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) in sysmmu_block()
438 --i; in sysmmu_block()
440 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { in sysmmu_block()
458 if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { in __sysmmu_tlb_invalidate_entry()
466 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, in __sysmmu_tlb_invalidate_entry()
476 if (MMU_MAJ_VER(data->version) < 5) in __sysmmu_set_ptbase()
487 BUG_ON(clk_prepare_enable(data->clk_master)); in __sysmmu_enable_clocks()
488 BUG_ON(clk_prepare_enable(data->clk)); in __sysmmu_enable_clocks()
489 BUG_ON(clk_prepare_enable(data->pclk)); in __sysmmu_enable_clocks()
490 BUG_ON(clk_prepare_enable(data->aclk)); in __sysmmu_enable_clocks()
495 clk_disable_unprepare(data->aclk); in __sysmmu_disable_clocks()
496 clk_disable_unprepare(data->pclk); in __sysmmu_disable_clocks()
497 clk_disable_unprepare(data->clk); in __sysmmu_disable_clocks()
498 clk_disable_unprepare(data->clk_master); in __sysmmu_disable_clocks()
503 u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0); in __sysmmu_has_capa1()
510 u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1); in __sysmmu_get_vcr()
512 data->has_vcr = capa1 & CAPA1_VCR_ENABLED; in __sysmmu_get_vcr()
521 ver = readl(data->sfrbase + REG_MMU_VERSION); in __sysmmu_get_version()
525 data->version = MAKE_MMU_VER(1, 0); in __sysmmu_get_version()
527 data->version = MMU_RAW_VER(ver); in __sysmmu_get_version()
529 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", in __sysmmu_get_version()
530 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); in __sysmmu_get_version()
532 if (MMU_MAJ_VER(data->version) < 5) { in __sysmmu_get_version()
533 data->variant = &sysmmu_v1_variant; in __sysmmu_get_version()
534 } else if (MMU_MAJ_VER(data->version) < 7) { in __sysmmu_get_version()
535 data->variant = &sysmmu_v5_variant; in __sysmmu_get_version()
539 if (data->has_vcr) in __sysmmu_get_version()
540 data->variant = &sysmmu_v7_vm_variant; in __sysmmu_get_version()
542 data->variant = &sysmmu_v7_variant; in __sysmmu_get_version()
553 dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n", in show_fault_information()
554 dev_name(data->master), in show_fault_information()
555 fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE", in show_fault_information()
556 fault->name, fault->addr); in show_fault_information()
557 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); in show_fault_information()
558 ent = section_entry(phys_to_virt(data->pgtable), fault->addr); in show_fault_information()
559 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); in show_fault_information()
561 ent = page_entry(ent, fault->addr); in show_fault_information()
562 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); in show_fault_information()
571 int ret = -ENOSYS; in exynos_sysmmu_irq()
573 WARN_ON(!data->active); in exynos_sysmmu_irq()
575 spin_lock(&data->lock); in exynos_sysmmu_irq()
576 clk_enable(data->clk_master); in exynos_sysmmu_irq()
579 ret = data->variant->get_fault_info(data, itype, &fault); in exynos_sysmmu_irq()
581 dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype); in exynos_sysmmu_irq()
586 if (data->domain) { in exynos_sysmmu_irq()
587 ret = report_iommu_fault(&data->domain->domain, data->master, in exynos_sysmmu_irq()
598 clk_disable(data->clk_master); in exynos_sysmmu_irq()
599 spin_unlock(&data->lock); in exynos_sysmmu_irq()
608 clk_enable(data->clk_master); in __sysmmu_disable()
610 spin_lock_irqsave(&data->lock, flags); in __sysmmu_disable()
611 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_disable()
612 writel(0, data->sfrbase + REG_MMU_CFG); in __sysmmu_disable()
613 data->active = false; in __sysmmu_disable()
614 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_disable()
623 if (data->version <= MAKE_MMU_VER(3, 1)) in __sysmmu_init_config()
625 else if (data->version <= MAKE_MMU_VER(3, 2)) in __sysmmu_init_config()
630 cfg |= CFG_EAP; /* enable access protection bits check */ in __sysmmu_init_config()
632 writel(cfg, data->sfrbase + REG_MMU_CFG); in __sysmmu_init_config()
639 if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) in __sysmmu_enable_vid()
642 ctrl = readl(data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
644 writel(ctrl, data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
653 spin_lock_irqsave(&data->lock, flags); in __sysmmu_enable()
654 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
656 __sysmmu_set_ptbase(data, data->pgtable); in __sysmmu_enable()
658 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
659 data->active = true; in __sysmmu_enable()
660 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_enable()
668 clk_disable(data->clk_master); in __sysmmu_enable()
676 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
677 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { in sysmmu_tlb_invalidate_flpdcache()
678 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
680 if (data->version >= MAKE_MMU_VER(5, 0)) in sysmmu_tlb_invalidate_flpdcache()
686 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
688 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
696 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
697 if (data->active) { in sysmmu_tlb_invalidate_entry()
700 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_entry()
707 * because it is set-associative TLB in sysmmu_tlb_invalidate_entry()
708 * with 8-way and 64 sets. in sysmmu_tlb_invalidate_entry()
712 if (MMU_MAJ_VER(data->version) == 2) in sysmmu_tlb_invalidate_entry()
719 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_entry()
721 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
729 struct device *dev = &pdev->dev; in exynos_sysmmu_probe()
735 return -ENOMEM; in exynos_sysmmu_probe()
738 data->sfrbase = devm_ioremap_resource(dev, res); in exynos_sysmmu_probe()
739 if (IS_ERR(data->sfrbase)) in exynos_sysmmu_probe()
740 return PTR_ERR(data->sfrbase); in exynos_sysmmu_probe()
753 data->clk = devm_clk_get_optional(dev, "sysmmu"); in exynos_sysmmu_probe()
754 if (IS_ERR(data->clk)) in exynos_sysmmu_probe()
755 return PTR_ERR(data->clk); in exynos_sysmmu_probe()
757 data->aclk = devm_clk_get_optional(dev, "aclk"); in exynos_sysmmu_probe()
758 if (IS_ERR(data->aclk)) in exynos_sysmmu_probe()
759 return PTR_ERR(data->aclk); in exynos_sysmmu_probe()
761 data->pclk = devm_clk_get_optional(dev, "pclk"); in exynos_sysmmu_probe()
762 if (IS_ERR(data->pclk)) in exynos_sysmmu_probe()
763 return PTR_ERR(data->pclk); in exynos_sysmmu_probe()
765 if (!data->clk && (!data->aclk || !data->pclk)) { in exynos_sysmmu_probe()
767 return -ENOSYS; in exynos_sysmmu_probe()
770 data->clk_master = devm_clk_get_optional(dev, "master"); in exynos_sysmmu_probe()
771 if (IS_ERR(data->clk_master)) in exynos_sysmmu_probe()
772 return PTR_ERR(data->clk_master); in exynos_sysmmu_probe()
774 data->sysmmu = dev; in exynos_sysmmu_probe()
775 spin_lock_init(&data->lock); in exynos_sysmmu_probe()
779 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, in exynos_sysmmu_probe()
780 dev_name(data->sysmmu)); in exynos_sysmmu_probe()
787 if (MMU_MAJ_VER(data->version) < 5) { in exynos_sysmmu_probe()
798 if (MMU_MAJ_VER(data->version) >= 5) { in exynos_sysmmu_probe()
811 dma_dev = &pdev->dev; in exynos_sysmmu_probe()
815 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); in exynos_sysmmu_probe()
822 iommu_device_sysfs_remove(&data->iommu); in exynos_sysmmu_probe()
829 struct device *master = data->master; in exynos_sysmmu_suspend()
834 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_suspend()
835 if (&data->domain->domain != &exynos_identity_domain) { in exynos_sysmmu_suspend()
836 dev_dbg(data->sysmmu, "saving state\n"); in exynos_sysmmu_suspend()
839 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_suspend()
847 struct device *master = data->master; in exynos_sysmmu_resume()
852 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_resume()
853 if (&data->domain->domain != &exynos_identity_domain) { in exynos_sysmmu_resume()
854 dev_dbg(data->sysmmu, "restoring state\n"); in exynos_sysmmu_resume()
857 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_resume()
869 { .compatible = "samsung,exynos-sysmmu", },
876 .name = "exynos-sysmmu",
894 struct exynos_iommu_domain *domain; in exynos_iommu_domain_alloc_paging() local
901 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in exynos_iommu_domain_alloc_paging()
902 if (!domain) in exynos_iommu_domain_alloc_paging()
905 domain->pgtable = iommu_alloc_pages(GFP_KERNEL, 2); in exynos_iommu_domain_alloc_paging()
906 if (!domain->pgtable) in exynos_iommu_domain_alloc_paging()
909 domain->lv2entcnt = iommu_alloc_pages(GFP_KERNEL, 1); in exynos_iommu_domain_alloc_paging()
910 if (!domain->lv2entcnt) in exynos_iommu_domain_alloc_paging()
915 domain->pgtable[i] = ZERO_LV2LINK; in exynos_iommu_domain_alloc_paging()
917 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, in exynos_iommu_domain_alloc_paging()
920 BUG_ON(handle != virt_to_phys(domain->pgtable)); in exynos_iommu_domain_alloc_paging()
924 spin_lock_init(&domain->lock); in exynos_iommu_domain_alloc_paging()
925 spin_lock_init(&domain->pgtablelock); in exynos_iommu_domain_alloc_paging()
926 INIT_LIST_HEAD(&domain->clients); in exynos_iommu_domain_alloc_paging()
928 domain->domain.geometry.aperture_start = 0; in exynos_iommu_domain_alloc_paging()
929 domain->domain.geometry.aperture_end = ~0UL; in exynos_iommu_domain_alloc_paging()
930 domain->domain.geometry.force_aperture = true; in exynos_iommu_domain_alloc_paging()
932 return &domain->domain; in exynos_iommu_domain_alloc_paging()
935 iommu_free_pages(domain->lv2entcnt, 1); in exynos_iommu_domain_alloc_paging()
937 iommu_free_pages(domain->pgtable, 2); in exynos_iommu_domain_alloc_paging()
939 kfree(domain); in exynos_iommu_domain_alloc_paging()
945 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_domain_free() local
950 WARN_ON(!list_empty(&domain->clients)); in exynos_iommu_domain_free()
952 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_domain_free()
954 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_domain_free()
955 spin_lock(&data->lock); in exynos_iommu_domain_free()
957 data->pgtable = 0; in exynos_iommu_domain_free()
958 data->domain = NULL; in exynos_iommu_domain_free()
959 list_del_init(&data->domain_node); in exynos_iommu_domain_free()
960 spin_unlock(&data->lock); in exynos_iommu_domain_free()
963 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_domain_free()
965 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, in exynos_iommu_domain_free()
969 if (lv1ent_page(domain->pgtable + i)) { in exynos_iommu_domain_free()
970 phys_addr_t base = lv2table_base(domain->pgtable + i); in exynos_iommu_domain_free()
978 iommu_free_pages(domain->pgtable, 2); in exynos_iommu_domain_free()
979 iommu_free_pages(domain->lv2entcnt, 1); in exynos_iommu_domain_free()
980 kfree(domain); in exynos_iommu_domain_free()
987 struct exynos_iommu_domain *domain; in exynos_iommu_identity_attach() local
992 if (owner->domain == identity_domain) in exynos_iommu_identity_attach()
995 domain = to_exynos_domain(owner->domain); in exynos_iommu_identity_attach()
996 pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_identity_attach()
998 mutex_lock(&owner->rpm_lock); in exynos_iommu_identity_attach()
1000 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_identity_attach()
1001 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_identity_attach()
1002 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_identity_attach()
1004 pm_runtime_put(data->sysmmu); in exynos_iommu_identity_attach()
1007 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_identity_attach()
1008 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_identity_attach()
1009 spin_lock(&data->lock); in exynos_iommu_identity_attach()
1010 data->pgtable = 0; in exynos_iommu_identity_attach()
1011 data->domain = NULL; in exynos_iommu_identity_attach()
1012 list_del_init(&data->domain_node); in exynos_iommu_identity_attach()
1013 spin_unlock(&data->lock); in exynos_iommu_identity_attach()
1015 owner->domain = identity_domain; in exynos_iommu_identity_attach()
1016 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_identity_attach()
1018 mutex_unlock(&owner->rpm_lock); in exynos_iommu_identity_attach()
1037 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_attach_device() local
1040 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_attach_device()
1048 mutex_lock(&owner->rpm_lock); in exynos_iommu_attach_device()
1050 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_attach_device()
1051 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
1052 spin_lock(&data->lock); in exynos_iommu_attach_device()
1053 data->pgtable = pagetable; in exynos_iommu_attach_device()
1054 data->domain = domain; in exynos_iommu_attach_device()
1055 list_add_tail(&data->domain_node, &domain->clients); in exynos_iommu_attach_device()
1056 spin_unlock(&data->lock); in exynos_iommu_attach_device()
1058 owner->domain = iommu_domain; in exynos_iommu_attach_device()
1059 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_attach_device()
1061 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
1062 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_attach_device()
1063 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_attach_device()
1065 pm_runtime_put(data->sysmmu); in exynos_iommu_attach_device()
1068 mutex_unlock(&owner->rpm_lock); in exynos_iommu_attach_device()
1076 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, in alloc_lv2entry() argument
1081 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1090 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); in alloc_lv2entry()
1092 return ERR_PTR(-ENOMEM); in alloc_lv2entry()
1101 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1105 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, in alloc_lv2entry()
1124 spin_lock(&domain->lock); in alloc_lv2entry()
1125 list_for_each_entry(data, &domain->clients, domain_node) in alloc_lv2entry()
1127 spin_unlock(&domain->lock); in alloc_lv2entry()
1134 static int lv1set_section(struct exynos_iommu_domain *domain, in lv1set_section() argument
1141 return -EADDRINUSE; in lv1set_section()
1148 return -EADDRINUSE; in lv1set_section()
1157 spin_lock(&domain->lock); in lv1set_section()
1164 list_for_each_entry(data, &domain->clients, domain_node) in lv1set_section()
1167 spin_unlock(&domain->lock); in lv1set_section()
1177 return -EADDRINUSE; in lv2set_page()
1180 *pgcnt -= 1; in lv2set_page()
1191 memset(pent - i, 0, sizeof(*pent) * i); in lv2set_page()
1192 return -EADDRINUSE; in lv2set_page()
1200 *pgcnt -= SPAGES_PER_LPAGE; in lv2set_page()
1207 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1228 * - Any two consecutive I/O virtual regions must have a hole of size larger
1230 * - Start address of an I/O virtual region must be aligned by 128KiB.
1236 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_map() local
1240 int ret = -ENOMEM; in exynos_iommu_map()
1242 BUG_ON(domain->pgtable == NULL); in exynos_iommu_map()
1245 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_map()
1247 entry = section_entry(domain->pgtable, iova); in exynos_iommu_map()
1250 ret = lv1set_section(domain, entry, iova, paddr, prot, in exynos_iommu_map()
1251 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1255 pent = alloc_lv2entry(domain, entry, iova, in exynos_iommu_map()
1256 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1262 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1271 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_map()
1276 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, in exynos_iommu_tlb_invalidate_entry() argument
1282 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1284 list_for_each_entry(data, &domain->clients, domain_node) in exynos_iommu_tlb_invalidate_entry()
1287 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1294 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_unmap() local
1300 BUG_ON(domain->pgtable == NULL); in exynos_iommu_unmap()
1302 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1304 ent = section_entry(domain->pgtable, iova); in exynos_iommu_unmap()
1336 domain->lv2entcnt[lv1ent_offset(iova)] += 1; in exynos_iommu_unmap()
1354 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; in exynos_iommu_unmap()
1356 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1358 exynos_iommu_tlb_invalidate_entry(domain, iova, size); in exynos_iommu_unmap()
1362 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1373 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_iova_to_phys() local
1378 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1380 entry = section_entry(domain->pgtable, iova); in exynos_iommu_iova_to_phys()
1393 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1404 return ERR_PTR(-ENODEV); in exynos_iommu_probe_device()
1406 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_probe_device()
1412 data->link = device_link_add(dev, data->sysmmu, in exynos_iommu_probe_device()
1418 data = list_first_entry(&owner->controllers, in exynos_iommu_probe_device()
1421 return &data->iommu; in exynos_iommu_probe_device()
1431 list_for_each_entry(data, &owner->controllers, owner_node) in exynos_iommu_release_device()
1432 device_link_del(data->link); in exynos_iommu_release_device()
1438 struct platform_device *sysmmu = of_find_device_by_node(spec->np); in exynos_iommu_of_xlate()
1443 return -ENODEV; in exynos_iommu_of_xlate()
1447 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1448 return -ENODEV; in exynos_iommu_of_xlate()
1454 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1455 return -ENOMEM; in exynos_iommu_of_xlate()
1458 INIT_LIST_HEAD(&owner->controllers); in exynos_iommu_of_xlate()
1459 mutex_init(&owner->rpm_lock); in exynos_iommu_of_xlate()
1460 owner->domain = &exynos_identity_domain; in exynos_iommu_of_xlate()
1464 list_for_each_entry(entry, &owner->controllers, owner_node) in exynos_iommu_of_xlate()
1468 list_add_tail(&data->owner_node, &owner->controllers); in exynos_iommu_of_xlate()
1469 data->master = dev; in exynos_iommu_of_xlate()
1502 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", in exynos_iommu_init()
1506 return -ENOMEM; in exynos_iommu_init()
1513 ret = -ENOMEM; in exynos_iommu_init()