Lines Matching refs:pci_seg

109 	u16 pci_seg;  member
146 u16 pci_seg; member
232 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg);
295 iommu->index, iommu->pci_seg->id, in get_global_efr()
413 u32 dev_table_size = iommu->pci_seg->dev_table_size; in iommu_set_device_table()
616 static int __init find_last_devid_acpi(struct acpi_table_header *table, u16 pci_seg) in find_last_devid_acpi() argument
627 if (h->pci_seg == pci_seg && in find_last_devid_acpi()
653 static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg) in alloc_dev_table() argument
655 pci_seg->dev_table = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, in alloc_dev_table()
656 get_order(pci_seg->dev_table_size)); in alloc_dev_table()
657 if (!pci_seg->dev_table) in alloc_dev_table()
663 static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg) in free_dev_table() argument
665 iommu_free_pages(pci_seg->dev_table, in free_dev_table()
666 get_order(pci_seg->dev_table_size)); in free_dev_table()
667 pci_seg->dev_table = NULL; in free_dev_table()
671 static inline int __init alloc_rlookup_table(struct amd_iommu_pci_seg *pci_seg) in alloc_rlookup_table() argument
673 pci_seg->rlookup_table = iommu_alloc_pages(GFP_KERNEL, in alloc_rlookup_table()
674 get_order(pci_seg->rlookup_table_size)); in alloc_rlookup_table()
675 if (pci_seg->rlookup_table == NULL) in alloc_rlookup_table()
681 static inline void free_rlookup_table(struct amd_iommu_pci_seg *pci_seg) in free_rlookup_table() argument
683 iommu_free_pages(pci_seg->rlookup_table, in free_rlookup_table()
684 get_order(pci_seg->rlookup_table_size)); in free_rlookup_table()
685 pci_seg->rlookup_table = NULL; in free_rlookup_table()
688 static inline int __init alloc_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) in alloc_irq_lookup_table() argument
690 pci_seg->irq_lookup_table = iommu_alloc_pages(GFP_KERNEL, in alloc_irq_lookup_table()
691 get_order(pci_seg->rlookup_table_size)); in alloc_irq_lookup_table()
692 kmemleak_alloc(pci_seg->irq_lookup_table, in alloc_irq_lookup_table()
693 pci_seg->rlookup_table_size, 1, GFP_KERNEL); in alloc_irq_lookup_table()
694 if (pci_seg->irq_lookup_table == NULL) in alloc_irq_lookup_table()
700 static inline void free_irq_lookup_table(struct amd_iommu_pci_seg *pci_seg) in free_irq_lookup_table() argument
702 kmemleak_free(pci_seg->irq_lookup_table); in free_irq_lookup_table()
703 iommu_free_pages(pci_seg->irq_lookup_table, in free_irq_lookup_table()
704 get_order(pci_seg->rlookup_table_size)); in free_irq_lookup_table()
705 pci_seg->irq_lookup_table = NULL; in free_irq_lookup_table()
708 static int __init alloc_alias_table(struct amd_iommu_pci_seg *pci_seg) in alloc_alias_table() argument
712 pci_seg->alias_table = iommu_alloc_pages(GFP_KERNEL, in alloc_alias_table()
713 get_order(pci_seg->alias_table_size)); in alloc_alias_table()
714 if (!pci_seg->alias_table) in alloc_alias_table()
720 for (i = 0; i <= pci_seg->last_bdf; ++i) in alloc_alias_table()
721 pci_seg->alias_table[i] = i; in alloc_alias_table()
726 static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg) in free_alias_table() argument
728 iommu_free_pages(pci_seg->alias_table, in free_alias_table()
729 get_order(pci_seg->alias_table_size)); in free_alias_table()
730 pci_seg->alias_table = NULL; in free_alias_table()
1031 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in __copy_device_table() local
1044 if (old_devtb_size != pci_seg->dev_table_size) { in __copy_device_table()
1063 pci_seg->dev_table_size) in __copy_device_table()
1064 : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB); in __copy_device_table()
1069 pci_seg->old_dev_tbl_cpy = iommu_alloc_pages(GFP_KERNEL | GFP_DMA32, in __copy_device_table()
1070 get_order(pci_seg->dev_table_size)); in __copy_device_table()
1071 if (pci_seg->old_dev_tbl_cpy == NULL) { in __copy_device_table()
1077 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in __copy_device_table()
1078 pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid]; in __copy_device_table()
1083 pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; in __copy_device_table()
1084 pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; in __copy_device_table()
1090 pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp; in __copy_device_table()
1093 pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp; in __copy_device_table()
1108 pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; in __copy_device_table()
1119 struct amd_iommu_pci_seg *pci_seg; in copy_device_table() local
1130 for_each_pci_segment(pci_seg) { in copy_device_table()
1132 if (pci_seg->id != iommu->pci_seg->id) in copy_device_table()
1301 struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg; in init_iommu_from_acpi() local
1333 seg_id = pci_seg->id; in init_iommu_from_acpi()
1340 for (dev_i = 0; dev_i <= pci_seg->last_bdf; ++dev_i) in init_iommu_from_acpi()
1385 pci_seg->alias_table[devid] = devid_to; in init_iommu_from_acpi()
1443 pci_seg->alias_table[dev_i] = devid_to; in init_iommu_from_acpi()
1567 struct amd_iommu_pci_seg *pci_seg; in alloc_pci_segment() local
1579 pci_seg = kzalloc(sizeof(struct amd_iommu_pci_seg), GFP_KERNEL); in alloc_pci_segment()
1580 if (pci_seg == NULL) in alloc_pci_segment()
1583 pci_seg->last_bdf = last_bdf; in alloc_pci_segment()
1585 pci_seg->dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1586 pci_seg->alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1587 pci_seg->rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE, last_bdf); in alloc_pci_segment()
1589 pci_seg->id = id; in alloc_pci_segment()
1590 init_llist_head(&pci_seg->dev_data_list); in alloc_pci_segment()
1591 INIT_LIST_HEAD(&pci_seg->unity_map); in alloc_pci_segment()
1592 list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list); in alloc_pci_segment()
1594 if (alloc_dev_table(pci_seg)) in alloc_pci_segment()
1596 if (alloc_alias_table(pci_seg)) in alloc_pci_segment()
1598 if (alloc_rlookup_table(pci_seg)) in alloc_pci_segment()
1601 return pci_seg; in alloc_pci_segment()
1607 struct amd_iommu_pci_seg *pci_seg; in get_pci_segment() local
1609 for_each_pci_segment(pci_seg) { in get_pci_segment()
1610 if (pci_seg->id == id) in get_pci_segment()
1611 return pci_seg; in get_pci_segment()
1619 struct amd_iommu_pci_seg *pci_seg, *next; in free_pci_segments() local
1621 for_each_pci_segment_safe(pci_seg, next) { in free_pci_segments()
1622 list_del(&pci_seg->list); in free_pci_segments()
1623 free_irq_lookup_table(pci_seg); in free_pci_segments()
1624 free_rlookup_table(pci_seg); in free_pci_segments()
1625 free_alias_table(pci_seg); in free_pci_segments()
1626 free_dev_table(pci_seg); in free_pci_segments()
1627 kfree(pci_seg); in free_pci_segments()
1728 struct amd_iommu_pci_seg *pci_seg; in init_iommu_one() local
1730 pci_seg = get_pci_segment(h->pci_seg, ivrs_base); in init_iommu_one()
1731 if (pci_seg == NULL) in init_iommu_one()
1733 iommu->pci_seg = pci_seg; in init_iommu_one()
1847 iommu->pci_seg->rlookup_table[iommu->devid] = NULL; in init_iommu_one_late()
1899 h->pci_seg, PCI_BUS_NUM(h->devid), in init_iommu_all()
2023 iommu->dev = pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2085 pci_get_domain_bus_and_slot(iommu->pci_seg->id, in iommu_init_pci()
2172 struct amd_iommu_pci_seg *pci_seg; in amd_iommu_init_pci() local
2196 for_each_pci_segment(pci_seg) in amd_iommu_init_pci()
2197 init_device_table_dma(pci_seg); in amd_iommu_init_pci()
2485 struct amd_iommu_pci_seg *p, *pci_seg; in free_unity_maps() local
2487 for_each_pci_segment_safe(pci_seg, p) { in free_unity_maps()
2488 list_for_each_entry_safe(entry, next, &pci_seg->unity_map, list) { in free_unity_maps()
2500 struct amd_iommu_pci_seg *pci_seg; in init_unity_map_range() local
2503 pci_seg = get_pci_segment(m->pci_seg, ivrs_base); in init_unity_map_range()
2504 if (pci_seg == NULL) in init_unity_map_range()
2522 e->devid_end = pci_seg->last_bdf; in init_unity_map_range()
2546 " flags: %x\n", s, m->pci_seg, in init_unity_map_range()
2548 PCI_FUNC(e->devid_start), m->pci_seg, in init_unity_map_range()
2553 list_add_tail(&e->list, &pci_seg->unity_map); in init_unity_map_range()
2581 static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) in init_device_table_dma() argument
2584 struct dev_table_entry *dev_table = pci_seg->dev_table; in init_device_table_dma()
2589 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in init_device_table_dma()
2596 static void __init uninit_device_table_dma(struct amd_iommu_pci_seg *pci_seg) in uninit_device_table_dma() argument
2599 struct dev_table_entry *dev_table = pci_seg->dev_table; in uninit_device_table_dma()
2604 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { in uninit_device_table_dma()
2612 struct amd_iommu_pci_seg *pci_seg; in init_device_table() local
2618 for_each_pci_segment(pci_seg) { in init_device_table()
2619 for (devid = 0; devid <= pci_seg->last_bdf; ++devid) in init_device_table()
2620 __set_dev_entry_bit(pci_seg->dev_table, in init_device_table()
2765 struct amd_iommu_pci_seg *pci_seg; in early_enable_iommus() local
2776 for_each_pci_segment(pci_seg) { in early_enable_iommus()
2777 if (pci_seg->old_dev_tbl_cpy != NULL) { in early_enable_iommus()
2778 iommu_free_pages(pci_seg->old_dev_tbl_cpy, in early_enable_iommus()
2779 get_order(pci_seg->dev_table_size)); in early_enable_iommus()
2780 pci_seg->old_dev_tbl_cpy = NULL; in early_enable_iommus()
2791 for_each_pci_segment(pci_seg) { in early_enable_iommus()
2792 iommu_free_pages(pci_seg->dev_table, in early_enable_iommus()
2793 get_order(pci_seg->dev_table_size)); in early_enable_iommus()
2794 pci_seg->dev_table = pci_seg->old_dev_tbl_cpy; in early_enable_iommus()
3102 struct amd_iommu_pci_seg *pci_seg; in early_amd_iommu_init() local
3119 for_each_pci_segment(pci_seg) { in early_amd_iommu_init()
3120 if (alloc_irq_lookup_table(pci_seg)) in early_amd_iommu_init()
3299 struct amd_iommu_pci_seg *pci_seg; in state_next() local
3301 for_each_pci_segment(pci_seg) in state_next()
3302 uninit_device_table_dma(pci_seg); in state_next()