Lines Matching full:smmu

26 	struct tegra_smmu *smmu;  member
56 struct tegra_smmu *smmu; member
72 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, in smmu_writel() argument
75 writel(value, smmu->regs + offset); in smmu_writel()
78 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) in smmu_readl() argument
80 return readl(smmu->regs + offset); in smmu_readl()
89 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ argument
90 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
168 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) in smmu_dma_addr_valid() argument
171 return (addr & smmu->pfn_mask) == addr; in smmu_dma_addr_valid()
174 static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde) in smmu_pde_to_dma() argument
176 return (dma_addr_t)(pde & smmu->pfn_mask) << 12; in smmu_pde_to_dma()
179 static void smmu_flush_ptc_all(struct tegra_smmu *smmu) in smmu_flush_ptc_all() argument
181 smmu_writel(smmu, SMMU_PTC_FLUSH_TYPE_ALL, SMMU_PTC_FLUSH); in smmu_flush_ptc_all()
184 static inline void smmu_flush_ptc(struct tegra_smmu *smmu, dma_addr_t dma, in smmu_flush_ptc() argument
189 offset &= ~(smmu->mc->soc->atom_size - 1); in smmu_flush_ptc()
191 if (smmu->mc->soc->num_address_bits > 32) { in smmu_flush_ptc()
197 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); in smmu_flush_ptc()
201 smmu_writel(smmu, value, SMMU_PTC_FLUSH); in smmu_flush_ptc()
204 static inline void smmu_flush_tlb(struct tegra_smmu *smmu) in smmu_flush_tlb() argument
206 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); in smmu_flush_tlb()
209 static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, in smmu_flush_tlb_asid() argument
214 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_asid()
220 smmu_writel(smmu, value, SMMU_TLB_FLUSH); in smmu_flush_tlb_asid()
223 static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, in smmu_flush_tlb_section() argument
229 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_section()
235 smmu_writel(smmu, value, SMMU_TLB_FLUSH); in smmu_flush_tlb_section()
238 static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, in smmu_flush_tlb_group() argument
244 if (smmu->soc->num_asids == 4) in smmu_flush_tlb_group()
250 smmu_writel(smmu, value, SMMU_TLB_FLUSH); in smmu_flush_tlb_group()
253 static inline void smmu_flush(struct tegra_smmu *smmu) in smmu_flush() argument
255 smmu_readl(smmu, SMMU_PTB_ASID); in smmu_flush()
258 static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) in tegra_smmu_alloc_asid() argument
262 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); in tegra_smmu_alloc_asid()
263 if (id >= smmu->soc->num_asids) in tegra_smmu_alloc_asid()
266 set_bit(id, smmu->asids); in tegra_smmu_alloc_asid()
272 static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) in tegra_smmu_free_asid() argument
274 clear_bit(id, smmu->asids); in tegra_smmu_free_asid()
331 tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) in tegra_smmu_find_swgroup() argument
336 for (i = 0; i < smmu->soc->num_swgroups; i++) { in tegra_smmu_find_swgroup()
337 if (smmu->soc->swgroups[i].swgroup == swgroup) { in tegra_smmu_find_swgroup()
338 group = &smmu->soc->swgroups[i]; in tegra_smmu_find_swgroup()
346 static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, in tegra_smmu_enable() argument
353 group = tegra_smmu_find_swgroup(smmu, swgroup); in tegra_smmu_enable()
355 value = smmu_readl(smmu, group->reg); in tegra_smmu_enable()
359 smmu_writel(smmu, value, group->reg); in tegra_smmu_enable()
367 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_enable()
368 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_enable()
373 value = smmu_readl(smmu, client->regs.smmu.reg); in tegra_smmu_enable()
374 value |= BIT(client->regs.smmu.bit); in tegra_smmu_enable()
375 smmu_writel(smmu, value, client->regs.smmu.reg); in tegra_smmu_enable()
379 static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, in tegra_smmu_disable() argument
386 group = tegra_smmu_find_swgroup(smmu, swgroup); in tegra_smmu_disable()
388 value = smmu_readl(smmu, group->reg); in tegra_smmu_disable()
392 smmu_writel(smmu, value, group->reg); in tegra_smmu_disable()
395 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_disable()
396 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_disable()
401 value = smmu_readl(smmu, client->regs.smmu.reg); in tegra_smmu_disable()
402 value &= ~BIT(client->regs.smmu.bit); in tegra_smmu_disable()
403 smmu_writel(smmu, value, client->regs.smmu.reg); in tegra_smmu_disable()
407 static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, in tegra_smmu_as_prepare() argument
413 mutex_lock(&smmu->lock); in tegra_smmu_as_prepare()
420 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, in tegra_smmu_as_prepare()
422 if (dma_mapping_error(smmu->dev, as->pd_dma)) { in tegra_smmu_as_prepare()
428 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { in tegra_smmu_as_prepare()
433 err = tegra_smmu_alloc_asid(smmu, &as->id); in tegra_smmu_as_prepare()
437 smmu_flush_ptc(smmu, as->pd_dma, 0); in tegra_smmu_as_prepare()
438 smmu_flush_tlb_asid(smmu, as->id); in tegra_smmu_as_prepare()
440 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); in tegra_smmu_as_prepare()
442 smmu_writel(smmu, value, SMMU_PTB_DATA); in tegra_smmu_as_prepare()
443 smmu_flush(smmu); in tegra_smmu_as_prepare()
445 as->smmu = smmu; in tegra_smmu_as_prepare()
448 mutex_unlock(&smmu->lock); in tegra_smmu_as_prepare()
453 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_prepare()
455 mutex_unlock(&smmu->lock); in tegra_smmu_as_prepare()
460 static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, in tegra_smmu_as_unprepare() argument
463 mutex_lock(&smmu->lock); in tegra_smmu_as_unprepare()
466 mutex_unlock(&smmu->lock); in tegra_smmu_as_unprepare()
470 tegra_smmu_free_asid(smmu, as->id); in tegra_smmu_as_unprepare()
472 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_unprepare()
474 as->smmu = NULL; in tegra_smmu_as_unprepare()
476 mutex_unlock(&smmu->lock); in tegra_smmu_as_unprepare()
483 struct tegra_smmu *smmu = dev_iommu_priv_get(dev); in tegra_smmu_attach_dev() local
492 err = tegra_smmu_as_prepare(smmu, as); in tegra_smmu_attach_dev()
496 tegra_smmu_enable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
506 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
507 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_attach_dev()
519 struct tegra_smmu *smmu; in tegra_smmu_identity_attach() local
529 smmu = as->smmu; in tegra_smmu_identity_attach()
531 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_identity_attach()
532 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_identity_attach()
550 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pde() local
558 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, in tegra_smmu_set_pde()
562 smmu_flush_ptc(smmu, as->pd_dma, offset); in tegra_smmu_set_pde()
563 smmu_flush_tlb_section(smmu, as->id, iova); in tegra_smmu_set_pde()
564 smmu_flush(smmu); in tegra_smmu_set_pde()
578 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_lookup() local
587 *dmap = smmu_pde_to_dma(smmu, pd[pd_index]); in tegra_smmu_pte_lookup()
596 struct tegra_smmu *smmu = as->smmu; in as_get_pte() local
601 dma = dma_map_page(smmu->dev, page, 0, SMMU_SIZE_PT, in as_get_pte()
603 if (dma_mapping_error(smmu->dev, dma)) { in as_get_pte()
608 if (!smmu_dma_addr_valid(smmu, dma)) { in as_get_pte()
609 dma_unmap_page(smmu->dev, dma, SMMU_SIZE_PT, in as_get_pte()
624 *dmap = smmu_pde_to_dma(smmu, pd[pde]); in as_get_pte()
647 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_put_use() local
649 dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]); in tegra_smmu_pte_put_use()
653 dma_unmap_page(smmu->dev, pte_dma, SMMU_SIZE_PT, DMA_TO_DEVICE); in tegra_smmu_pte_put_use()
662 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pte() local
667 dma_sync_single_range_for_device(smmu->dev, pte_dma, offset, in tegra_smmu_set_pte()
669 smmu_flush_ptc(smmu, pte_dma, offset); in tegra_smmu_set_pte()
670 smmu_flush_tlb_group(smmu, as->id, iova); in tegra_smmu_set_pte()
671 smmu_flush(smmu); in tegra_smmu_set_pte()
811 pfn = *pte & as->smmu->pfn_mask; in tegra_smmu_iova_to_phys()
831 return mc->smmu; in tegra_smmu_find()
834 static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, in tegra_smmu_configure() argument
837 const struct iommu_ops *ops = smmu->iommu.ops; in tegra_smmu_configure()
840 err = iommu_fwspec_init(dev, dev_fwnode(smmu->dev)); in tegra_smmu_configure()
859 struct tegra_smmu *smmu = NULL; in tegra_smmu_probe_device() local
866 smmu = tegra_smmu_find(args.np); in tegra_smmu_probe_device()
867 if (smmu) { in tegra_smmu_probe_device()
868 err = tegra_smmu_configure(smmu, dev, &args); in tegra_smmu_probe_device()
880 smmu = dev_iommu_priv_get(dev); in tegra_smmu_probe_device()
881 if (!smmu) in tegra_smmu_probe_device()
884 return &smmu->iommu; in tegra_smmu_probe_device()
888 tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) in tegra_smmu_find_group() argument
892 for (i = 0; i < smmu->soc->num_groups; i++) in tegra_smmu_find_group()
893 for (j = 0; j < smmu->soc->groups[i].num_swgroups; j++) in tegra_smmu_find_group()
894 if (smmu->soc->groups[i].swgroups[j] == swgroup) in tegra_smmu_find_group()
895 return &smmu->soc->groups[i]; in tegra_smmu_find_group()
903 struct tegra_smmu *smmu = group->smmu; in tegra_smmu_group_release() local
905 mutex_lock(&smmu->lock); in tegra_smmu_group_release()
907 mutex_unlock(&smmu->lock); in tegra_smmu_group_release()
913 struct tegra_smmu *smmu = dev_iommu_priv_get(dev); in tegra_smmu_device_group() local
920 soc = tegra_smmu_find_group(smmu, swgroup); in tegra_smmu_device_group()
922 mutex_lock(&smmu->lock); in tegra_smmu_device_group()
925 list_for_each_entry(group, &smmu->groups, list) in tegra_smmu_device_group()
928 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
932 group = devm_kzalloc(smmu->dev, sizeof(*group), GFP_KERNEL); in tegra_smmu_device_group()
934 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
940 group->smmu = smmu; in tegra_smmu_device_group()
949 devm_kfree(smmu->dev, group); in tegra_smmu_device_group()
950 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
957 list_add_tail(&group->list, &smmu->groups); in tegra_smmu_device_group()
958 mutex_unlock(&smmu->lock); in tegra_smmu_device_group()
974 * the SMMU parent device is the same as the MC, so the reference count in tegra_smmu_of_xlate()
979 dev_iommu_priv_set(dev, mc->smmu); in tegra_smmu_of_xlate()
1028 struct tegra_smmu *smmu = s->private; in tegra_smmu_swgroups_show() local
1035 for (i = 0; i < smmu->soc->num_swgroups; i++) { in tegra_smmu_swgroups_show()
1036 const struct tegra_smmu_swgroup *group = &smmu->soc->swgroups[i]; in tegra_smmu_swgroups_show()
1040 value = smmu_readl(smmu, group->reg); in tegra_smmu_swgroups_show()
1060 struct tegra_smmu *smmu = s->private; in tegra_smmu_clients_show() local
1067 for (i = 0; i < smmu->soc->num_clients; i++) { in tegra_smmu_clients_show()
1068 const struct tegra_mc_client *client = &smmu->soc->clients[i]; in tegra_smmu_clients_show()
1071 value = smmu_readl(smmu, client->regs.smmu.reg); in tegra_smmu_clients_show()
1073 if (value & BIT(client->regs.smmu.bit)) in tegra_smmu_clients_show()
1086 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu) in tegra_smmu_debugfs_init() argument
1088 smmu->debugfs = debugfs_create_dir("smmu", NULL); in tegra_smmu_debugfs_init()
1090 debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu, in tegra_smmu_debugfs_init()
1092 debugfs_create_file("clients", S_IRUGO, smmu->debugfs, smmu, in tegra_smmu_debugfs_init()
1096 static void tegra_smmu_debugfs_exit(struct tegra_smmu *smmu) in tegra_smmu_debugfs_exit() argument
1098 debugfs_remove_recursive(smmu->debugfs); in tegra_smmu_debugfs_exit()
1105 struct tegra_smmu *smmu; in tegra_smmu_probe() local
1109 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); in tegra_smmu_probe()
1110 if (!smmu) in tegra_smmu_probe()
1121 mc->smmu = smmu; in tegra_smmu_probe()
1123 smmu->asids = devm_bitmap_zalloc(dev, soc->num_asids, GFP_KERNEL); in tegra_smmu_probe()
1124 if (!smmu->asids) in tegra_smmu_probe()
1127 INIT_LIST_HEAD(&smmu->groups); in tegra_smmu_probe()
1128 mutex_init(&smmu->lock); in tegra_smmu_probe()
1130 smmu->regs = mc->regs; in tegra_smmu_probe()
1131 smmu->soc = soc; in tegra_smmu_probe()
1132 smmu->dev = dev; in tegra_smmu_probe()
1133 smmu->mc = mc; in tegra_smmu_probe()
1135 smmu->pfn_mask = in tegra_smmu_probe()
1138 mc->soc->num_address_bits, smmu->pfn_mask); in tegra_smmu_probe()
1139 smmu->tlb_mask = (1 << fls(smmu->soc->num_tlb_lines)) - 1; in tegra_smmu_probe()
1140 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, in tegra_smmu_probe()
1141 smmu->tlb_mask); in tegra_smmu_probe()
1148 smmu_writel(smmu, value, SMMU_PTC_CONFIG); in tegra_smmu_probe()
1151 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); in tegra_smmu_probe()
1156 smmu_writel(smmu, value, SMMU_TLB_CONFIG); in tegra_smmu_probe()
1158 smmu_flush_ptc_all(smmu); in tegra_smmu_probe()
1159 smmu_flush_tlb(smmu); in tegra_smmu_probe()
1160 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); in tegra_smmu_probe()
1161 smmu_flush(smmu); in tegra_smmu_probe()
1165 err = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, dev_name(dev)); in tegra_smmu_probe()
1169 err = iommu_device_register(&smmu->iommu, &tegra_smmu_ops, dev); in tegra_smmu_probe()
1171 iommu_device_sysfs_remove(&smmu->iommu); in tegra_smmu_probe()
1176 tegra_smmu_debugfs_init(smmu); in tegra_smmu_probe()
1178 return smmu; in tegra_smmu_probe()
1181 void tegra_smmu_remove(struct tegra_smmu *smmu) in tegra_smmu_remove() argument
1183 iommu_device_unregister(&smmu->iommu); in tegra_smmu_remove()
1184 iommu_device_sysfs_remove(&smmu->iommu); in tegra_smmu_remove()
1187 tegra_smmu_debugfs_exit(smmu); in tegra_smmu_remove()