/linux-6.12.1/Documentation/devicetree/bindings/iommu/ |
D | arm,smmu.yaml | 4 $id: http://devicetree.org/schemas/iommu/arm,smmu.yaml# 18 The SMMU may also raise interrupts in response to various fault 26 - description: Qcom SoCs implementing "arm,smmu-v2" 29 - qcom,msm8996-smmu-v2 30 - qcom,msm8998-smmu-v2 31 - qcom,sdm630-smmu-v2 32 - qcom,sm6375-smmu-v2 33 - const: qcom,smmu-v2 35 - description: Qcom SoCs implementing "qcom,smmu-500" and "arm,mmu-500" 38 - qcom,qcm2290-smmu-500 [all …]
|
D | arm,smmu-v3.yaml | 4 $id: http://devicetree.org/schemas/iommu/arm,smmu-v3.yaml# 23 const: arm,smmu-v3 53 Present if page table walks made by the SMMU are cache coherent with the 56 NOTE: this only applies to the SMMU itself, not masters connected 57 upstream of the SMMU. 63 description: Avoid sending CMD_PREFETCH_* commands to the SMMU. 70 doesn't support SMMU page1 register space. 85 compatible = "arm,smmu-v3";
|
D | nvidia,tegra30-smmu.txt | 1 NVIDIA Tegra 30 IOMMU H/W, SMMU (System Memory Management Unit) 4 - compatible : "nvidia,tegra30-smmu" 6 of the SMMU register blocks. 10 - nvidia,ahb : phandle to the ahb bus connected to SMMU. 13 smmu { 14 compatible = "nvidia,tegra30-smmu";
|
/linux-6.12.1/drivers/iommu/arm/arm-smmu/ |
D | arm-smmu.c | 3 * IOMMU API for ARM architected SMMU implementations. 13 * - Non-secure access to the SMMU 18 #define pr_fmt(fmt) "arm-smmu: " fmt 40 #include "arm-smmu.h" 44 * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU 58 …"Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' f… 63 …domain will report an abort back to the device and will not be allowed to pass through the SMMU."); 71 static inline int arm_smmu_rpm_get(struct arm_smmu_device *smmu) in arm_smmu_rpm_get() argument 73 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get() 74 return pm_runtime_resume_and_get(smmu->dev); in arm_smmu_rpm_get() [all …]
|
D | arm-smmu-qcom.c | 7 #include <linux/adreno-smmu-priv.h> 14 #include "arm-smmu.h" 15 #include "arm-smmu-qcom.h" 19 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) in to_qcom_smmu() argument 21 return container_of(smmu, struct qcom_smmu, smmu); in to_qcom_smmu() 24 static void qcom_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, in qcom_smmu_tlb_sync() argument 30 arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); in qcom_smmu_tlb_sync() 33 reg = arm_smmu_readl(smmu, page, status); in qcom_smmu_tlb_sync() 41 qcom_smmu_tlb_sync_debug(smmu); in qcom_smmu_tlb_sync() 44 static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx, in qcom_adreno_smmu_write_sctlr() argument [all …]
|
D | arm-smmu-impl.c | 2 // Miscellaneous Arm SMMU implementation and integration quirks 5 #define pr_fmt(fmt) "arm-smmu: " fmt 10 #include "arm-smmu.h" 28 static u32 arm_smmu_read_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_read_ns() argument 33 return readl_relaxed(arm_smmu_page(smmu, page) + offset); in arm_smmu_read_ns() 36 static void arm_smmu_write_ns(struct arm_smmu_device *smmu, int page, in arm_smmu_write_ns() argument 41 writel_relaxed(val, arm_smmu_page(smmu, page) + offset); in arm_smmu_write_ns() 52 struct arm_smmu_device smmu; member 56 static int cavium_cfg_probe(struct arm_smmu_device *smmu) in cavium_cfg_probe() argument 59 struct cavium_smmu *cs = container_of(smmu, struct cavium_smmu, smmu); in cavium_cfg_probe() [all …]
|
D | arm-smmu-nvidia.c | 12 #include "arm-smmu.h" 21 * In addition, the SMMU driver needs to coordinate with the memory controller 30 * SMMU instance. 35 struct arm_smmu_device smmu; member 41 static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu) in to_nvidia_smmu() argument 43 return container_of(smmu, struct nvidia_smmu, smmu); in to_nvidia_smmu() 46 static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu, in nvidia_smmu_page() argument 51 nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu); in nvidia_smmu_page() 52 return nvidia_smmu->bases[inst] + (page << smmu->pgshift); in nvidia_smmu_page() 55 static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu, in nvidia_smmu_read_reg() argument [all …]
|
D | arm-smmu-qcom-debug.c | 19 #include "arm-smmu.h" 20 #include "arm-smmu-qcom.h" 59 static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) in to_qcom_smmu() argument 61 return container_of(smmu, struct qcom_smmu, smmu); in to_qcom_smmu() 64 void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) in qcom_smmu_tlb_sync_debug() argument 68 struct qcom_smmu *qsmmu = container_of(smmu, struct qcom_smmu, smmu); in qcom_smmu_tlb_sync_debug() 74 dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n"); in qcom_smmu_tlb_sync_debug() 80 ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS], in qcom_smmu_tlb_sync_debug() 83 dev_err(smmu->dev, in qcom_smmu_tlb_sync_debug() 86 ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK], in qcom_smmu_tlb_sync_debug() [all …]
|
D | arm-smmu.h | 3 * IOMMU API for ARM architected SMMU implementations. 252 /* Maximum number of context banks per SMMU */ 377 struct arm_smmu_device *smmu; member 383 struct mutex init_mutex; /* Protects smmu pointer */ 389 struct arm_smmu_device *smmu; member 434 u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset); 435 void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset, 437 u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset); 438 void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset, 440 int (*cfg_probe)(struct arm_smmu_device *smmu); [all …]
|
D | Makefile | 4 arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o 5 arm_smmu-$(CONFIG_ARM_SMMU_QCOM) += arm-smmu-qcom.o 6 arm_smmu-$(CONFIG_ARM_SMMU_QCOM_DEBUG) += arm-smmu-qcom-debug.o
|
/linux-6.12.1/drivers/iommu/ |
D | tegra-smmu.c | 26 struct tegra_smmu *smmu; member 56 struct tegra_smmu *smmu; member 72 static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, in smmu_writel() argument 75 writel(value, smmu->regs + offset); in smmu_writel() 78 static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) in smmu_readl() argument 80 return readl(smmu->regs + offset); in smmu_readl() 89 #define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ argument 90 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) 168 static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr) in smmu_dma_addr_valid() argument 171 return (addr & smmu->pfn_mask) == addr; in smmu_dma_addr_valid() [all …]
|
D | Kconfig | 248 bool "NVIDIA Tegra SMMU Support" 254 This driver supports the IOMMU hardware (SMMU) found on NVIDIA Tegra 319 tristate "ARM Ltd. System MMU (SMMU) Support" 330 the ARM SMMU architecture. 338 to the SMMU but does not provide any support via the DMA API. 345 bool "Default to disabling bypass on ARM SMMU v1 and v2" 352 will not be allowed to pass through the SMMU. 366 'arm-smmu.disable_bypass' will continue to override this 375 of the ARM SMMU, this needs to be built into the SMMU driver. 378 bool "ARM SMMU QCOM implementation defined debug support" [all …]
|
/linux-6.12.1/drivers/iommu/arm/arm-smmu-v3/ |
D | arm-smmu-v3.c | 32 #include "arm-smmu-v3.h" 87 struct arm_smmu_device *smmu, u32 flags); 90 static void parse_driver_options(struct arm_smmu_device *smmu) in parse_driver_options() argument 95 if (of_property_read_bool(smmu->dev->of_node, in parse_driver_options() 97 smmu->options |= arm_smmu_options[i].opt; in parse_driver_options() 98 dev_notice(smmu->dev, "option %s\n", in parse_driver_options() 193 static void queue_poll_init(struct arm_smmu_device *smmu, in queue_poll_init() argument 198 qp->wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV); in queue_poll_init() 349 static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu, in arm_smmu_get_cmdq() argument 354 if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq) in arm_smmu_get_cmdq() [all …]
|
D | tegra241-cmdqv.c | 15 #include "arm-smmu-v3.h" 109 "This allows to disable CMDQV HW and use default SMMU internal CMDQ."); 164 * @smmu: SMMUv3 device 175 struct arm_smmu_device smmu; member 268 __arm_smmu_cmdq_skip_err(&vintf->cmdqv->smmu, &vcmdq->cmdq); in tegra241_vintf0_handle_error() 319 tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu, in tegra241_cmdqv_get_cmdq() argument 323 container_of(smmu, struct tegra241_cmdqv, smmu); in tegra241_cmdqv_get_cmdq() 331 /* Use SMMU CMDQ if VINTF0 is uninitialized */ in tegra241_cmdqv_get_cmdq() 347 /* Unsupported CMD goes for smmu->cmdq pathway */ in tegra241_cmdqv_get_cmdq() 458 static int tegra241_cmdqv_hw_reset(struct arm_smmu_device *smmu) in tegra241_cmdqv_hw_reset() argument [all …]
|
D | arm-smmu-v3-sva.c | 13 #include "arm-smmu-v3.h" 105 if (!(master->smmu->features & ARM_SMMU_FEAT_STALL_FORCE)) in arm_smmu_make_sva_cd() 121 * command queue with an address-space TLBI command, when SMMU w/o a range 142 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) { in arm_smmu_mm_arch_invalidate_secondary_tlbs() 151 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_arch_invalidate_secondary_tlbs() 187 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid); in arm_smmu_mm_release() 202 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument 212 if ((smmu->features & feat_mask) != feat_mask) in arm_smmu_sva_supported() 215 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) in arm_smmu_sva_supported() 226 if (smmu->oas < oas) in arm_smmu_sva_supported() [all …]
|
/linux-6.12.1/drivers/memory/tegra/ |
D | tegra210.c | 20 .smmu = { 36 .smmu = { 52 .smmu = { 68 .smmu = { 84 .smmu = { 100 .smmu = { 116 .smmu = { 132 .smmu = { 148 .smmu = { 164 .smmu = { [all …]
|
D | tegra114.c | 31 .smmu = { 47 .smmu = { 63 .smmu = { 79 .smmu = { 95 .smmu = { 111 .smmu = { 127 .smmu = { 143 .smmu = { 159 .smmu = { 175 .smmu = { [all …]
|
D | tegra124.c | 32 .smmu = { 48 .smmu = { 64 .smmu = { 80 .smmu = { 96 .smmu = { 112 .smmu = { 128 .smmu = { 144 .smmu = { 160 .smmu = { 176 .smmu = { [all …]
|
D | tegra30.c | 54 .smmu = { 71 .smmu = { 88 .smmu = { 105 .smmu = { 122 .smmu = { 139 .smmu = { 156 .smmu = { 173 .smmu = { 190 .smmu = { 207 .smmu = { [all …]
|
/linux-6.12.1/drivers/acpi/arm64/ |
D | iort.c | 412 struct acpi_iort_smmu_v3 *smmu; in iort_get_id_mapping_index() local 424 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in iort_get_id_mapping_index() 430 if (smmu->event_gsiv && smmu->pri_gsiv && in iort_get_id_mapping_index() 431 smmu->gerr_gsiv && smmu->sync_gsiv) in iort_get_id_mapping_index() 433 } else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) { in iort_get_id_mapping_index() 437 if (smmu->id_mapping_index >= node->mapping_count) { in iort_get_id_mapping_index() 443 return smmu->id_mapping_index; in iort_get_id_mapping_index() 536 * as NC (named component) -> SMMU -> ITS. If the type is matched, in iort_node_map_platform_id() 556 * device (such as SMMU, PMCG),its iort node already cached in iort_find_dev_node() 888 struct acpi_iort_node *smmu, in iort_get_rmrs() argument [all …]
|
/linux-6.12.1/arch/arm64/boot/dts/marvell/ |
D | armada-8040.dtsi | 20 <0x0 &smmu 0x480 0x20>, 21 <0x100 &smmu 0x4a0 0x20>, 22 <0x200 &smmu 0x4c0 0x20>; 36 iommus = <&smmu 0x444>; 40 iommus = <&smmu 0x445>; 44 iommus = <&smmu 0x440>; 48 iommus = <&smmu 0x441>; 52 iommus = <&smmu 0x454>; 56 iommus = <&smmu 0x450>; 60 iommus = <&smmu 0x451>;
|
D | armada-7040.dtsi | 20 <0x0 &smmu 0x480 0x20>, 21 <0x100 &smmu 0x4a0 0x20>, 22 <0x200 &smmu 0x4c0 0x20>; 27 iommus = <&smmu 0x444>; 31 iommus = <&smmu 0x445>; 35 iommus = <&smmu 0x440>; 39 iommus = <&smmu 0x441>;
|
/linux-6.12.1/Documentation/devicetree/bindings/display/ |
D | arm,komeda.yaml | 102 iommus = <&smmu 0>, <&smmu 1>, <&smmu 2>, <&smmu 3>, 103 <&smmu 8>, 104 <&smmu 4>, <&smmu 5>, <&smmu 6>, <&smmu 7>, 105 <&smmu 9>;
|
/linux-6.12.1/arch/arm64/boot/dts/freescale/ |
D | imx8qm-ss-conn.dtsi | 9 iommus = <&smmu 0x12 0x7f80>; 14 iommus = <&smmu 0x12 0x7f80>; 19 iommus = <&smmu 0x11 0x7f80>; 24 iommus = <&smmu 0x11 0x7f80>; 29 iommus = <&smmu 0x11 0x7f80>;
|
/linux-6.12.1/include/linux/ |
D | adreno-smmu-priv.h | 38 * struct adreno_smmu_priv - private interface between adreno-smmu and GPU 40 * @cookie: An opque token provided by adreno-smmu and passed 54 * The GPU driver (drm/msm) and adreno-smmu work together for controlling 55 * the GPU's SMMU instance. This is by necessity, as the GPU is directly 56 * updating the SMMU for context switches, while on the other hand we do 57 * not want to duplicate all of the initial setup logic from arm-smmu.
|