Lines Matching +full:mc +full:- +full:sid

1 // SPDX-License-Identifier: GPL-2.0-only
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-mapping.h>
38 #include <linux/fsl/mc.h>
40 #include "arm-smmu.h"
41 #include "../../dma-iommu.h"
50 #define QCOM_DUMMY_VAL -1
73 if (pm_runtime_enabled(smmu->dev))
74 return pm_runtime_resume_and_get(smmu->dev);
81 if (pm_runtime_enabled(smmu->dev))
82 pm_runtime_put_autosuspend(smmu->dev);
95 * to 5-10sec worth of reprogramming the context bank, while
98 pm_runtime_set_autosuspend_delay(smmu->dev, 20);
99 pm_runtime_use_autosuspend(smmu->dev);
114 struct pci_bus *bus = to_pci_dev(dev)->bus;
117 bus = bus->parent;
118 return of_node_get(bus->bridge->parent->of_node);
121 return of_node_get(dev->of_node);
133 struct device_node *np = it->node;
136 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
137 "#stream-id-cells", -1)
138 if (it->node == np) {
142 it->node = np;
143 return err == -ENOENT ? 0 : err;
158 if (!np || !of_property_present(np, "#stream-id-cells")) {
160 return -ENODEV;
169 return -ENODEV;
174 /* "mmu-masters" assumes Stream ID == Requester ID */
187 return -ENOMEM;
199 return -ENODEV;
215 if (smmu->impl && unlikely(smmu->impl->tlb_sync))
216 return smmu->impl->tlb_sync(smmu, page, sync, status);
220 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
228 dev_err_ratelimited(smmu->dev,
229 "TLB sync timed out -- SMMU may be deadlocked\n");
236 spin_lock_irqsave(&smmu->global_sync_lock, flags);
239 spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
244 struct arm_smmu_device *smmu = smmu_domain->smmu;
247 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
248 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx),
250 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
261 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
262 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
269 struct arm_smmu_device *smmu = smmu_domain->smmu;
273 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
281 struct arm_smmu_device *smmu = smmu_domain->smmu;
282 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
283 int idx = cfg->cbndx;
285 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
288 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
290 iova |= cfg->asid;
294 } while (size -= granule);
297 iova |= (u64)cfg->asid << 48;
301 } while (size -= granule);
309 struct arm_smmu_device *smmu = smmu_domain->smmu;
310 int idx = smmu_domain->cfg.cbndx;
312 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
317 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)
322 } while (size -= granule);
329 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
331 if (cfg->flush_walk_prefer_tlbiasid) {
370 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
373 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
381 struct arm_smmu_device *smmu = smmu_domain->smmu;
383 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
386 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
411 cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
412 cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
413 cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
414 cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
420 dev_err(smmu->dev,
422 cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
424 dev_err(smmu->dev, "FSR = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n",
425 cfi->fsr,
426 (cfi->fsr & ARM_SMMU_CB_FSR_MULTI) ? "MULTI " : "",
427 (cfi->fsr & ARM_SMMU_CB_FSR_SS) ? "SS " : "",
428 (u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr),
429 (cfi->fsr & ARM_SMMU_CB_FSR_UUT) ? " UUT" : "",
430 (cfi->fsr & ARM_SMMU_CB_FSR_ASF) ? " ASF" : "",
431 (cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "",
432 (cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "",
433 (cfi->fsr & ARM_SMMU_CB_FSR_EF) ? " EF" : "",
434 (cfi->fsr & ARM_SMMU_CB_FSR_PF) ? " PF" : "",
435 (cfi->fsr & ARM_SMMU_CB_FSR_AFF) ? " AFF" : "",
436 (cfi->fsr & ARM_SMMU_CB_FSR_TF) ? " TF" : "",
437 cfi->cbfrsynra);
439 dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n",
440 cfi->fsynr,
441 (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr),
442 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "",
443 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "",
444 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "",
445 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "",
446 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "",
447 (cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "",
448 (u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr));
455 struct arm_smmu_device *smmu = smmu_domain->smmu;
458 int idx = smmu_domain->cfg.cbndx;
466 ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
469 if (ret == -ENOSYS && __ratelimit(&rs))
494 dev_err(smmu->dev,
495 "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
498 dev_err(smmu->dev,
500 dev_err(smmu->dev,
512 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
513 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx];
514 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
516 cb->cfg = cfg;
520 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
521 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr;
523 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg);
524 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg);
525 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
526 cb->tcr[1] |= ARM_SMMU_TCR2_AS;
528 cb->tcr[0] |= ARM_SMMU_TCR_EAE;
531 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg);
536 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
537 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr;
538 cb->ttbr[1] = 0;
540 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
541 cfg->asid);
542 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID,
543 cfg->asid);
545 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
546 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
548 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
551 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
554 /* MAIRs (stage-1 only) */
556 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
557 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr;
558 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr;
560 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair;
561 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32;
570 struct arm_smmu_cb *cb = &smmu->cbs[idx];
571 struct arm_smmu_cfg *cfg = cb->cfg;
579 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
582 if (smmu->version > ARM_SMMU_V1) {
583 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
587 /* 16-bit VMIDs live in CBA2R */
588 if (smmu->features & ARM_SMMU_FEAT_VMID16)
589 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid);
595 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar);
596 if (smmu->version < ARM_SMMU_V2)
597 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx);
608 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
609 /* 8-bit VMIDs live in CBAR */
610 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
619 if (stage1 && smmu->version > ARM_SMMU_V1)
620 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
621 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
624 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
625 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid);
626 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
627 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]);
629 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]);
632 cb->ttbr[1]);
635 /* MAIRs (stage-1 only) */
637 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]);
638 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]);
649 if (smmu->impl && smmu->impl->write_sctlr)
650 smmu->impl->write_sctlr(smmu, idx, reg);
659 if (smmu->impl && smmu->impl->alloc_context_bank)
660 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start);
662 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks);
674 struct iommu_domain *domain = &smmu_domain->domain;
675 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
678 mutex_lock(&smmu_domain->init_mutex);
679 if (smmu_domain->smmu)
698 * Note that you can't actually request stage-2 mappings.
700 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
701 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
702 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
703 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
708 * the decision into the io-pgtable code where it arguably belongs,
713 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
714 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
717 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
718 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
719 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
720 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
721 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
724 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
726 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
727 ret = -EINVAL;
731 switch (smmu_domain->stage) {
733 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
734 start = smmu->num_s2_context_banks;
735 ias = smmu->va_size;
736 oas = smmu->ipa_size;
737 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
739 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
748 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops;
756 cfg->cbar = CBAR_TYPE_S2_TRANS;
758 ias = smmu->ipa_size;
759 oas = smmu->pa_size;
760 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
767 if (smmu->version == ARM_SMMU_V2)
768 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2;
770 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1;
773 ret = -EINVAL;
782 smmu_domain->smmu = smmu;
784 cfg->cbndx = ret;
785 if (smmu->version < ARM_SMMU_V2) {
786 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
787 cfg->irptndx %= smmu->num_context_irqs;
789 cfg->irptndx = cfg->cbndx;
792 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
793 cfg->vmid = cfg->cbndx + 1;
795 cfg->asid = cfg->cbndx;
798 .pgsize_bitmap = smmu->pgsize_bitmap,
801 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
802 .tlb = smmu_domain->flush_ops,
803 .iommu_dev = smmu->dev,
806 if (smmu->impl && smmu->impl->init_context) {
807 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev);
812 if (smmu_domain->pgtbl_quirks)
813 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks;
817 ret = -ENOMEM;
822 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
825 domain->geometry.aperture_start = ~0UL << ias;
826 domain->geometry.aperture_end = ~0UL;
828 domain->geometry.aperture_end = (1UL << ias) - 1;
831 domain->geometry.force_aperture = true;
835 arm_smmu_write_context_bank(smmu, cfg->cbndx);
839 * handler seeing a half-initialised domain state.
841 irq = smmu->irqs[cfg->irptndx];
843 if (smmu->impl && smmu->impl->context_fault)
844 context_fault = smmu->impl->context_fault;
848 if (smmu->impl && smmu->impl->context_fault_needs_threaded_irq)
849 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
852 "arm-smmu-context-fault",
855 ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED,
856 "arm-smmu-context-fault", smmu_domain);
859 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
860 cfg->irptndx, irq);
861 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX;
864 mutex_unlock(&smmu_domain->init_mutex);
867 smmu_domain->pgtbl_ops = pgtbl_ops;
871 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
872 smmu_domain->smmu = NULL;
874 mutex_unlock(&smmu_domain->init_mutex);
880 struct arm_smmu_device *smmu = smmu_domain->smmu;
881 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
895 smmu->cbs[cfg->cbndx].cfg = NULL;
896 arm_smmu_write_context_bank(smmu, cfg->cbndx);
898 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
899 irq = smmu->irqs[cfg->irptndx];
900 devm_free_irq(smmu->dev, irq, smmu_domain);
903 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
904 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
922 mutex_init(&smmu_domain->init_mutex);
923 spin_lock_init(&smmu_domain->cb_lock);
925 return &smmu_domain->domain;
942 struct arm_smmu_smr *smr = smmu->smrs + idx;
943 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) |
944 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask);
946 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
953 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
956 if (smmu->impl && smmu->impl->write_s2cr) {
957 smmu->impl->write_s2cr(smmu, idx);
961 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) |
962 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) |
963 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg);
965 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
966 smmu->smrs[idx].valid)
974 if (smmu->smrs)
987 if (!smmu->smrs)
997 for (i = 0; i < smmu->num_mapping_groups; i++)
998 if (!smmu->smrs[i].valid)
1007 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
1010 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
1012 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask);
1015 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
1020 struct arm_smmu_smr *smrs = smmu->smrs;
1021 int i, free_idx = -ENOSPC;
1028 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1054 return -EINVAL;
1062 if (--smmu->s2crs[idx].count)
1065 smmu->s2crs[idx] = s2cr_init_val;
1066 if (smmu->smrs)
1067 smmu->smrs[idx].valid = false;
1076 struct arm_smmu_device *smmu = cfg->smmu;
1077 struct arm_smmu_smr *smrs = smmu->smrs;
1080 mutex_lock(&smmu->stream_map_mutex);
1083 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1084 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1087 ret = -EEXIST;
1091 ret = arm_smmu_find_sme(smmu, sid, mask);
1096 if (smrs && smmu->s2crs[idx].count == 0) {
1097 smrs[idx].id = sid;
1101 smmu->s2crs[idx].count++;
1102 cfg->smendx[i] = (s16)idx;
1109 mutex_unlock(&smmu->stream_map_mutex);
1113 while (i--) {
1114 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1115 cfg->smendx[i] = INVALID_SMENDX;
1117 mutex_unlock(&smmu->stream_map_mutex);
1124 struct arm_smmu_device *smmu = cfg->smmu;
1127 mutex_lock(&smmu->stream_map_mutex);
1131 cfg->smendx[i] = INVALID_SMENDX;
1133 mutex_unlock(&smmu->stream_map_mutex);
1140 struct arm_smmu_device *smmu = cfg->smmu;
1141 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1165 * domains between of_xlate() and probe_device() - we have no way to cope
1172 return -ENODEV;
1174 smmu = cfg->smmu;
1189 if (smmu_domain->smmu != smmu) {
1190 ret = -EINVAL;
1196 smmu_domain->cfg.cbndx, fwspec);
1212 return -ENODEV;
1213 smmu = cfg->smmu;
1259 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1260 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1264 return -ENODEV;
1267 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped);
1277 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1278 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
1285 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather);
1294 struct arm_smmu_device *smmu = smmu_domain->smmu;
1296 if (smmu_domain->flush_ops) {
1298 smmu_domain->flush_ops->tlb_flush_all(smmu_domain);
1307 struct arm_smmu_device *smmu = smmu_domain->smmu;
1313 if (smmu->version == ARM_SMMU_V2 ||
1314 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1325 struct arm_smmu_device *smmu = smmu_domain->smmu;
1326 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1327 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1328 struct device *dev = smmu->dev;
1333 int ret, idx = cfg->cbndx;
1340 spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1342 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
1350 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1355 return ops->iova_to_phys(ops, iova);
1359 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1377 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1382 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1383 smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1386 return ops->iova_to_phys(ops, iova);
1401 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
1431 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1439 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1442 ret = -EINVAL;
1443 for (i = 0; i < fwspec->num_ids; i++) {
1444 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
1445 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
1447 if (sid & ~smmu->streamid_mask) {
1449 sid, smmu->streamid_mask);
1452 if (mask & ~smmu->smr_mask_mask) {
1454 mask, smmu->smr_mask_mask);
1459 ret = -ENOMEM;
1465 cfg->smmu = smmu;
1467 while (i--)
1468 cfg->smendx[i] = INVALID_SMENDX;
1480 device_link_add(dev, smmu->dev,
1483 return &smmu->iommu;
1498 ret = arm_smmu_rpm_get(cfg->smmu);
1504 arm_smmu_rpm_put(cfg->smmu);
1515 smmu = cfg->smmu;
1517 if (smmu->impl && smmu->impl->probe_finalize)
1518 smmu->impl->probe_finalize(smmu, dev);
1525 struct arm_smmu_device *smmu = cfg->smmu;
1529 mutex_lock(&smmu->stream_map_mutex);
1531 if (group && smmu->s2crs[idx].group &&
1532 group != smmu->s2crs[idx].group) {
1533 mutex_unlock(&smmu->stream_map_mutex);
1534 return ERR_PTR(-EINVAL);
1537 group = smmu->s2crs[idx].group;
1541 mutex_unlock(&smmu->stream_map_mutex);
1555 smmu->s2crs[idx].group = group;
1557 mutex_unlock(&smmu->stream_map_mutex);
1566 mutex_lock(&smmu_domain->init_mutex);
1567 if (smmu_domain->smmu)
1568 ret = -EPERM;
1570 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1571 mutex_unlock(&smmu_domain->init_mutex);
1582 mutex_lock(&smmu_domain->init_mutex);
1583 if (smmu_domain->smmu)
1584 ret = -EPERM;
1586 smmu_domain->pgtbl_quirks = quirks;
1587 mutex_unlock(&smmu_domain->init_mutex);
1597 if (args->args_count > 0)
1598 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]);
1600 if (args->args_count > 1)
1601 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]);
1602 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1619 list_add_tail(&region->list, head);
1627 const struct arm_smmu_impl *impl = cfg->smmu->impl;
1632 if (impl && impl->def_domain_type)
1633 return impl->def_domain_type(dev);
1650 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1678 for (i = 0; i < smmu->num_mapping_groups; ++i)
1682 for (i = 0; i < smmu->num_context_banks; ++i) {
1713 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1716 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1719 if (smmu->impl && smmu->impl->reset)
1720 smmu->impl->reset(smmu);
1750 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1753 dev_notice(smmu->dev, "probing hardware configuration...\n");
1754 dev_notice(smmu->dev, "SMMUv%d with:\n",
1755 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1767 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1768 dev_notice(smmu->dev, "\tstage 1 translation\n");
1772 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1773 dev_notice(smmu->dev, "\tstage 2 translation\n");
1777 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1778 dev_notice(smmu->dev, "\tnested translation\n");
1781 if (!(smmu->features &
1783 dev_err(smmu->dev, "\tno translation support!\n");
1784 return -ENODEV;
1788 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) {
1789 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1790 dev_notice(smmu->dev, "\taddress translation ops\n");
1801 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1802 cttw_fw ? "" : "non-");
1804 dev_notice(smmu->dev,
1808 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
1809 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1814 smmu->streamid_mask = size - 1;
1816 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1819 dev_err(smmu->dev,
1820 "stream-matching supported, but no SMRs present!\n");
1821 return -ENODEV;
1824 /* Zero-initialised to mark as invalid */
1825 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1827 if (!smmu->smrs)
1828 return -ENOMEM;
1830 dev_notice(smmu->dev,
1833 /* s2cr->type == 0 means translation, so initialise explicitly */
1834 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1836 if (!smmu->s2crs)
1837 return -ENOMEM;
1839 smmu->s2crs[i] = s2cr_init_val;
1841 smmu->num_mapping_groups = size;
1842 mutex_init(&smmu->stream_map_mutex);
1843 spin_lock_init(&smmu->global_sync_lock);
1845 if (smmu->version < ARM_SMMU_V2 ||
1847 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1849 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1854 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12;
1858 if (smmu->numpage != 2 * size << smmu->pgshift)
1859 dev_warn(smmu->dev,
1861 2 * size << smmu->pgshift, smmu->numpage);
1863 smmu->numpage = size;
1865 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id);
1866 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id);
1867 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1868 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1869 return -ENODEV;
1871 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1872 smmu->num_context_banks, smmu->num_s2_context_banks);
1873 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks,
1874 sizeof(*smmu->cbs), GFP_KERNEL);
1875 if (!smmu->cbs)
1876 return -ENOMEM;
1881 smmu->ipa_size = size;
1885 smmu->pa_size = size;
1888 smmu->features |= ARM_SMMU_FEAT_VMID16;
1895 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1896 dev_warn(smmu->dev,
1899 if (smmu->version < ARM_SMMU_V2) {
1900 smmu->va_size = smmu->ipa_size;
1901 if (smmu->version == ARM_SMMU_V1_64K)
1902 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1905 smmu->va_size = arm_smmu_id_size_to_bits(size);
1907 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1909 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1911 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1914 if (smmu->impl && smmu->impl->cfg_probe) {
1915 ret = smmu->impl->cfg_probe(smmu);
1921 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1922 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1923 if (smmu->features &
1925 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1926 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1927 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1928 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1929 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1931 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1932 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1934 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1935 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1936 smmu->pgsize_bitmap);
1939 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1940 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1941 smmu->va_size, smmu->ipa_size);
1943 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1944 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1945 smmu->ipa_size, smmu->pa_size);
1966 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1967 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1968 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1969 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1970 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1971 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1972 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1973 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1986 smmu->version = ARM_SMMU_V1;
1987 smmu->model = GENERIC_SMMU;
1990 smmu->version = ARM_SMMU_V1_64K;
1991 smmu->model = GENERIC_SMMU;
1994 smmu->version = ARM_SMMU_V2;
1995 smmu->model = GENERIC_SMMU;
1998 smmu->version = ARM_SMMU_V2;
1999 smmu->model = ARM_MMU500;
2002 smmu->version = ARM_SMMU_V2;
2003 smmu->model = CAVIUM_SMMUV2;
2006 ret = -ENODEV;
2015 struct device *dev = smmu->dev;
2022 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2024 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2032 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2033 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2041 return -ENODEV;
2049 struct device *dev = smmu->dev;
2052 if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
2053 return dev_err_probe(dev, -ENODEV,
2054 "missing #global-interrupts property\n");
2058 smmu->version = data->version;
2059 smmu->model = data->model;
2061 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2064 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
2072 return -ENODEV;
2075 if (of_dma_is_coherent(dev->of_node))
2076 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2089 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2106 for (i = 0; i < rmr->num_sids; i++) {
2107 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0);
2111 if (smmu->s2crs[idx].count == 0) {
2112 smmu->smrs[idx].id = rmr->sids[i];
2113 smmu->smrs[idx].mask = 0;
2114 smmu->smrs[idx].valid = true;
2116 smmu->s2crs[idx].count++;
2117 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS;
2118 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
2124 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt,
2126 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list);
2133 struct device *dev = &pdev->dev;
2141 return -ENOMEM;
2143 smmu->dev = dev;
2145 if (dev->of_node)
2152 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
2153 if (IS_ERR(smmu->base))
2154 return PTR_ERR(smmu->base);
2155 smmu->ioaddr = res->start;
2161 smmu->numpage = resource_size(res);
2169 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
2170 if (smmu->num_context_irqs <= 0)
2171 return dev_err_probe(dev, -ENODEV,
2175 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
2176 sizeof(*smmu->irqs), GFP_KERNEL);
2177 if (!smmu->irqs)
2178 return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
2179 smmu->num_context_irqs);
2181 for (i = 0; i < smmu->num_context_irqs; i++) {
2186 smmu->irqs[i] = irq;
2189 err = devm_clk_bulk_get_all(dev, &smmu->clks);
2194 smmu->num_clks = err;
2196 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks);
2204 if (smmu->version == ARM_SMMU_V2) {
2205 if (smmu->num_context_banks > smmu->num_context_irqs) {
2208 smmu->num_context_irqs, smmu->num_context_banks);
2209 return -ENODEV;
2213 smmu->num_context_irqs = smmu->num_context_banks;
2216 if (smmu->impl && smmu->impl->global_fault)
2217 global_fault = smmu->impl->global_fault;
2228 "arm-smmu global fault", smmu);
2235 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2236 "smmu.%pa", &smmu->ioaddr);
2242 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops,
2246 iommu_device_sysfs_remove(&smmu->iommu);
2259 * We want to avoid touching dev->power.lock in fastpaths unless
2260 * it's really going to do something useful - pm_runtime_enabled()
2264 if (dev->pm_domain) {
2276 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2277 dev_notice(&pdev->dev, "disabling translation\n");
2284 if (pm_runtime_enabled(smmu->dev))
2285 pm_runtime_force_suspend(smmu->dev);
2287 clk_bulk_disable(smmu->num_clks, smmu->clks);
2289 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2296 iommu_device_unregister(&smmu->iommu);
2297 iommu_device_sysfs_remove(&smmu->iommu);
2307 ret = clk_bulk_enable(smmu->num_clks, smmu->clks);
2320 clk_bulk_disable(smmu->num_clks, smmu->clks);
2330 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks);
2339 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2357 clk_bulk_unprepare(smmu->num_clks, smmu->clks);
2369 .name = "arm-smmu",
2382 MODULE_ALIAS("platform:arm-smmu");