Lines Matching full:smmu
12 #include "arm-smmu.h"
21 * In addition, the SMMU driver needs to coordinate with the memory controller
30 * SMMU instance.
35 struct arm_smmu_device smmu; member
41 static inline struct nvidia_smmu *to_nvidia_smmu(struct arm_smmu_device *smmu) in to_nvidia_smmu() argument
43 return container_of(smmu, struct nvidia_smmu, smmu); in to_nvidia_smmu()
46 static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu, in nvidia_smmu_page() argument
51 nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu); in nvidia_smmu_page()
52 return nvidia_smmu->bases[inst] + (page << smmu->pgshift); in nvidia_smmu_page()
55 static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu, in nvidia_smmu_read_reg() argument
58 void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset; in nvidia_smmu_read_reg()
63 static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu, in nvidia_smmu_write_reg() argument
66 struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_write_reg()
70 void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset; in nvidia_smmu_write_reg()
76 static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu, in nvidia_smmu_read_reg64() argument
79 void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset; in nvidia_smmu_read_reg64()
84 static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu, in nvidia_smmu_write_reg64() argument
87 struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_write_reg64()
91 void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset; in nvidia_smmu_write_reg64()
97 static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, in nvidia_smmu_tlb_sync() argument
100 struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_tlb_sync()
103 arm_smmu_writel(smmu, page, sync, 0); in nvidia_smmu_tlb_sync()
115 reg = nvidia_smmu_page(smmu, i, page) + status; in nvidia_smmu_tlb_sync()
128 dev_err_ratelimited(smmu->dev, in nvidia_smmu_tlb_sync()
129 "TLB sync timed out -- SMMU may be deadlocked\n"); in nvidia_smmu_tlb_sync()
132 static int nvidia_smmu_reset(struct arm_smmu_device *smmu) in nvidia_smmu_reset() argument
134 struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_reset()
139 void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) + in nvidia_smmu_reset()
151 struct arm_smmu_device *smmu, in nvidia_smmu_global_fault_inst() argument
155 void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0); in nvidia_smmu_global_fault_inst()
165 dev_err_ratelimited(smmu->dev, in nvidia_smmu_global_fault_inst()
167 dev_err_ratelimited(smmu->dev, in nvidia_smmu_global_fault_inst()
179 struct arm_smmu_device *smmu = dev; in nvidia_smmu_global_fault() local
180 struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_global_fault()
185 irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst); in nvidia_smmu_global_fault()
194 struct arm_smmu_device *smmu, in nvidia_smmu_context_fault_bank() argument
199 void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1); in nvidia_smmu_context_fault_bank()
200 void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx); in nvidia_smmu_context_fault_bank()
210 dev_err_ratelimited(smmu->dev, in nvidia_smmu_context_fault_bank()
223 struct arm_smmu_device *smmu; in nvidia_smmu_context_fault() local
227 smmu = smmu_domain->smmu; in nvidia_smmu_context_fault()
228 nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_context_fault()
237 for (idx = 0; idx < smmu->num_context_banks; idx++) { in nvidia_smmu_context_fault()
238 irq_ret = nvidia_smmu_context_fault_bank(irq, smmu, in nvidia_smmu_context_fault()
248 static void nvidia_smmu_probe_finalize(struct arm_smmu_device *smmu, struct device *dev) in nvidia_smmu_probe_finalize() argument
250 struct nvidia_smmu *nvidia = to_nvidia_smmu(smmu); in nvidia_smmu_probe_finalize()
255 dev_err(smmu->dev, "memory controller probe failed for %s: %d\n", in nvidia_smmu_probe_finalize()
263 struct arm_smmu_device *smmu = smmu_domain->smmu; in nvidia_smmu_init_context() local
264 const struct device_node *np = smmu->dev->of_node; in nvidia_smmu_init_context()
278 if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || in nvidia_smmu_init_context()
279 of_device_is_compatible(np, "nvidia,tegra194-smmu")) { in nvidia_smmu_init_context()
280 smmu->pgsize_bitmap &= GENMASK(PAGE_SHIFT, 0); in nvidia_smmu_init_context()
281 pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap; in nvidia_smmu_init_context()
305 struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) in nvidia_smmu_impl_init() argument
308 struct device *dev = smmu->dev; in nvidia_smmu_impl_init()
313 nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL); in nvidia_smmu_impl_init()
321 /* Instance 0 is ioremapped by arm-smmu.c. */ in nvidia_smmu_impl_init()
322 nvidia_smmu->bases[0] = smmu->base; in nvidia_smmu_impl_init()
338 nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl; in nvidia_smmu_impl_init()
340 nvidia_smmu->smmu.impl = &nvidia_smmu_impl; in nvidia_smmu_impl_init()
342 return &nvidia_smmu->smmu; in nvidia_smmu_impl_init()