Lines Matching full:smmu
3 * IOMMU API for ARM architected SMMU implementations.
252 /* Maximum number of context banks per SMMU */
377 struct arm_smmu_device *smmu; member
383 struct mutex init_mutex; /* Protects smmu pointer */
389 struct arm_smmu_device *smmu; member
434 u32 (*read_reg)(struct arm_smmu_device *smmu, int page, int offset);
435 void (*write_reg)(struct arm_smmu_device *smmu, int page, int offset,
437 u64 (*read_reg64)(struct arm_smmu_device *smmu, int page, int offset);
438 void (*write_reg64)(struct arm_smmu_device *smmu, int page, int offset,
440 int (*cfg_probe)(struct arm_smmu_device *smmu);
441 int (*reset)(struct arm_smmu_device *smmu);
444 void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync,
451 struct arm_smmu_device *smmu,
453 void (*write_s2cr)(struct arm_smmu_device *smmu, int idx);
454 void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg);
455 void (*probe_finalize)(struct arm_smmu_device *smmu, struct device *dev);
477 static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) in arm_smmu_page() argument
479 return smmu->base + (n << smmu->pgshift); in arm_smmu_page()
482 static inline u32 arm_smmu_readl(struct arm_smmu_device *smmu, int page, int offset) in arm_smmu_readl() argument
484 if (smmu->impl && unlikely(smmu->impl->read_reg)) in arm_smmu_readl()
485 return smmu->impl->read_reg(smmu, page, offset); in arm_smmu_readl()
486 return readl_relaxed(arm_smmu_page(smmu, page) + offset); in arm_smmu_readl()
489 static inline void arm_smmu_writel(struct arm_smmu_device *smmu, int page, in arm_smmu_writel() argument
492 if (smmu->impl && unlikely(smmu->impl->write_reg)) in arm_smmu_writel()
493 smmu->impl->write_reg(smmu, page, offset, val); in arm_smmu_writel()
495 writel_relaxed(val, arm_smmu_page(smmu, page) + offset); in arm_smmu_writel()
498 static inline u64 arm_smmu_readq(struct arm_smmu_device *smmu, int page, int offset) in arm_smmu_readq() argument
500 if (smmu->impl && unlikely(smmu->impl->read_reg64)) in arm_smmu_readq()
501 return smmu->impl->read_reg64(smmu, page, offset); in arm_smmu_readq()
502 return readq_relaxed(arm_smmu_page(smmu, page) + offset); in arm_smmu_readq()
505 static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, in arm_smmu_writeq() argument
508 if (smmu->impl && unlikely(smmu->impl->write_reg64)) in arm_smmu_writeq()
509 smmu->impl->write_reg64(smmu, page, offset, val); in arm_smmu_writeq()
511 writeq_relaxed(val, arm_smmu_page(smmu, page) + offset); in arm_smmu_writeq()
537 struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu);
538 struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu);
539 struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
541 void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
542 int arm_mmu500_reset(struct arm_smmu_device *smmu);
551 void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
554 void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,