Lines Matching full:smmu

412 	struct acpi_iort_smmu_v3 *smmu;  in iort_get_id_mapping_index()  local
424 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in iort_get_id_mapping_index()
430 if (smmu->event_gsiv && smmu->pri_gsiv && in iort_get_id_mapping_index()
431 smmu->gerr_gsiv && smmu->sync_gsiv) in iort_get_id_mapping_index()
433 } else if (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) { in iort_get_id_mapping_index()
437 if (smmu->id_mapping_index >= node->mapping_count) { in iort_get_id_mapping_index()
443 return smmu->id_mapping_index; in iort_get_id_mapping_index()
536 * as NC (named component) -> SMMU -> ITS. If the type is matched, in iort_node_map_platform_id()
556 * device (such as SMMU, PMCG),its iort node already cached in iort_find_dev_node()
888 struct acpi_iort_node *smmu, in iort_get_rmrs() argument
981 struct acpi_iort_node *smmu = NULL; in iort_node_get_rmr_info() local
1002 * Go through the ID mappings and see if we have a match for SMMU in iort_node_get_rmr_info()
1032 iort_get_rmrs(node, smmu, sids, num_sids, head); in iort_node_get_rmr_info()
1093 struct acpi_iort_smmu_v3 *smmu; in iort_get_msi_resv_iommu() local
1095 smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data; in iort_get_msi_resv_iommu()
1096 if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X) in iort_get_msi_resv_iommu()
1208 pr_warn("IORT node type %u does not describe an SMMU\n", type); in iort_iommu_driver_enabled()
1226 /* If there's no SMMU driver at all, give up now */ in iort_iommu_xlate()
1235 * If the SMMU drivers are enabled but not loaded/probed in iort_iommu_xlate()
1444 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_count_resources() local
1449 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_count_resources()
1451 if (smmu->event_gsiv) in arm_smmu_v3_count_resources()
1454 if (smmu->pri_gsiv) in arm_smmu_v3_count_resources()
1457 if (smmu->gerr_gsiv) in arm_smmu_v3_count_resources()
1460 if (smmu->sync_gsiv) in arm_smmu_v3_count_resources()
1466 static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) in arm_smmu_v3_is_combined_irq() argument
1472 if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_is_combined_irq()
1476 * ThunderX2 doesn't support MSIs from the SMMU, so we're checking in arm_smmu_v3_is_combined_irq()
1479 return smmu->event_gsiv == smmu->pri_gsiv && in arm_smmu_v3_is_combined_irq()
1480 smmu->event_gsiv == smmu->gerr_gsiv && in arm_smmu_v3_is_combined_irq()
1481 smmu->event_gsiv == smmu->sync_gsiv; in arm_smmu_v3_is_combined_irq()
1484 static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) in arm_smmu_v3_resource_size() argument
1488 * which doesn't support the page 1 SMMU register space. in arm_smmu_v3_resource_size()
1490 if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) in arm_smmu_v3_resource_size()
1499 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_init_resources() local
1503 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_init_resources()
1505 res[num_res].start = smmu->base_address; in arm_smmu_v3_init_resources()
1506 res[num_res].end = smmu->base_address + in arm_smmu_v3_init_resources()
1507 arm_smmu_v3_resource_size(smmu) - 1; in arm_smmu_v3_init_resources()
1511 if (arm_smmu_v3_is_combined_irq(smmu)) { in arm_smmu_v3_init_resources()
1512 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1513 acpi_iort_register_irq(smmu->event_gsiv, "combined", in arm_smmu_v3_init_resources()
1518 if (smmu->event_gsiv) in arm_smmu_v3_init_resources()
1519 acpi_iort_register_irq(smmu->event_gsiv, "eventq", in arm_smmu_v3_init_resources()
1523 if (smmu->pri_gsiv) in arm_smmu_v3_init_resources()
1524 acpi_iort_register_irq(smmu->pri_gsiv, "priq", in arm_smmu_v3_init_resources()
1528 if (smmu->gerr_gsiv) in arm_smmu_v3_init_resources()
1529 acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", in arm_smmu_v3_init_resources()
1533 if (smmu->sync_gsiv) in arm_smmu_v3_init_resources()
1534 acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", in arm_smmu_v3_init_resources()
1543 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_dma_configure() local
1547 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_dma_configure()
1549 attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ? in arm_smmu_v3_dma_configure()
1566 struct acpi_iort_smmu_v3 *smmu; in arm_smmu_v3_set_proximity() local
1568 smmu = (struct acpi_iort_smmu_v3 *)node->node_data; in arm_smmu_v3_set_proximity()
1569 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { in arm_smmu_v3_set_proximity()
1570 int dev_node = pxm_to_node(smmu->pxm); in arm_smmu_v3_set_proximity()
1576 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", in arm_smmu_v3_set_proximity()
1577 smmu->base_address, in arm_smmu_v3_set_proximity()
1578 smmu->pxm); in arm_smmu_v3_set_proximity()
1588 struct acpi_iort_smmu *smmu; in arm_smmu_count_resources() local
1590 /* Retrieve SMMU specific data */ in arm_smmu_count_resources()
1591 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_count_resources()
1601 return smmu->context_interrupt_count + 2; in arm_smmu_count_resources()
1607 struct acpi_iort_smmu *smmu; in arm_smmu_init_resources() local
1611 /* Retrieve SMMU specific data */ in arm_smmu_init_resources()
1612 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_init_resources()
1614 res[num_res].start = smmu->base_address; in arm_smmu_init_resources()
1615 res[num_res].end = smmu->base_address + smmu->span - 1; in arm_smmu_init_resources()
1619 glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset); in arm_smmu_init_resources()
1624 acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger, in arm_smmu_init_resources()
1628 ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset); in arm_smmu_init_resources()
1629 for (i = 0; i < smmu->context_interrupt_count; i++) { in arm_smmu_init_resources()
1633 acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger, in arm_smmu_init_resources()
1641 struct acpi_iort_smmu *smmu; in arm_smmu_dma_configure() local
1644 /* Retrieve SMMU specific data */ in arm_smmu_dma_configure()
1645 smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_dma_configure()
1647 attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ? in arm_smmu_dma_configure()
1650 /* We expect the dma masks to be equivalent for SMMU set-ups */ in arm_smmu_dma_configure()
1706 /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
1744 .name = "arm-smmu-v3",
1752 .name = "arm-smmu",
1759 .name = "arm-smmu-v3-pmcg",
1890 * If we detect a RC->SMMU mapping, make sure in iort_enable_acs()