Lines Matching +full:pcie +full:- +full:mirror
1 // SPDX-License-Identifier: GPL-2.0-or-later
18 #include <asm/pnv-pci.h>
89 #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
90 #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
161 dev_info(&dev->dev, "dump_cxl_config_space\n"); in dump_cxl_config_space()
164 dev_info(&dev->dev, "BAR0: %#.8x\n", val); in dump_cxl_config_space()
166 dev_info(&dev->dev, "BAR1: %#.8x\n", val); in dump_cxl_config_space()
168 dev_info(&dev->dev, "BAR2: %#.8x\n", val); in dump_cxl_config_space()
170 dev_info(&dev->dev, "BAR3: %#.8x\n", val); in dump_cxl_config_space()
172 dev_info(&dev->dev, "BAR4: %#.8x\n", val); in dump_cxl_config_space()
174 dev_info(&dev->dev, "BAR5: %#.8x\n", val); in dump_cxl_config_space()
176 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n", in dump_cxl_config_space()
178 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n", in dump_cxl_config_space()
180 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n", in dump_cxl_config_space()
187 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what) in dump_cxl_config_space()
265 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what) in dump_afu_descriptor()
282 show_reg("Reserved", (val >> (63-7)) & 0xff); in dump_afu_descriptor()
291 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff); in dump_afu_descriptor()
298 show_reg("Reserved", (val >> (63-7)) & 0xff); in dump_afu_descriptor()
319 if (of_property_read_u32(np, "ibm,phb-index", phb_index)) in get_phb_index()
320 return -ENODEV; in get_phb_index()
328 * - For chips other than POWER8NVL, we only have CAPP 0, in get_capp_unit_id()
330 * - For POWER8NVL, assume CAPP 0 is attached to PHB0 and in get_capp_unit_id()
347 * PEC1 (PHB1 - PHB2). No capi mode in get_capp_unit_id()
348 * PEC2 (PHB3 - PHB4 - PHB5): Capi mode on PHB3 only. Capp ID = CAPP1 (0b1110_0000) in get_capp_unit_id()
369 return -ENODEV; in cxl_calc_capp_routing()
371 while (np && of_property_read_u32(np, "ibm,chip-id", &id)) in cxl_calc_capp_routing()
374 return -ENODEV; in cxl_calc_capp_routing()
388 …pr_err("cxl: No capp unit found for PHB[%lld,%d]. Make sure the adapter is on a capi-compatible sl… in cxl_calc_capp_routing()
390 return -ENODEV; in cxl_calc_capp_routing()
408 return -ENODEV; in get_phb_indications()
411 if (of_property_read_u32_array(np, "ibm,phb-indications", val, 3)) { in get_phb_indications()
432 * bit 61:60 MSI bits --> 0 in cxl_get_xsl9_dsnctl()
433 * bit 59 TVT selector --> 0 in cxl_get_xsl9_dsnctl()
436 return -ENODEV; in cxl_get_xsl9_dsnctl()
442 xsl_dsnctl = (capiind << (63-15)); /* Bit 57 */ in cxl_get_xsl9_dsnctl()
443 xsl_dsnctl |= (capp_unit_id << (63-15)); in cxl_get_xsl9_dsnctl()
446 xsl_dsnctl |= ((u64)0x09 << (63-28)); in cxl_get_xsl9_dsnctl()
450 * the Non-Blocking queues by the PHB. This field should match in cxl_get_xsl9_dsnctl()
455 xsl_dsnctl |= (nbwind << (63-55)); in cxl_get_xsl9_dsnctl()
489 psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ in init_implementation_adapter_regs_psl9()
490 psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ in init_implementation_adapter_regs_psl9()
494 /* Setup the PSL to transmit packets on the PCIe before the in init_implementation_adapter_regs_psl9()
513 /* disable machines 31-47 and 20-27 for DMA */ in init_implementation_adapter_regs_psl9()
524 * Check if PSL has data-cache. We need to flush adapter datacache in init_implementation_adapter_regs_psl9()
529 dev_dbg(&dev->dev, "No data-cache present\n"); in init_implementation_adapter_regs_psl9()
530 adapter->native->no_data_cache = true; in init_implementation_adapter_regs_psl9()
549 psl_dsnctl |= (0x2ULL << (63-38)); /* MMIO hang pulse: 256 us */ in init_implementation_adapter_regs_psl8()
551 psl_dsnctl |= (chipid << (63-5)); in init_implementation_adapter_regs_psl8()
552 psl_dsnctl |= (capp_unit_id << (63-13)); in init_implementation_adapter_regs_psl8()
559 psl_fircntl = (0x2ULL << (63-3)); /* ce_report */ in init_implementation_adapter_regs_psl8()
560 psl_fircntl |= (0x1ULL << (63-6)); /* FIR_report */ in init_implementation_adapter_regs_psl8()
570 #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
571 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
595 adapter->psl_timebase_synced = false; in cxl_setup_psl_timebase()
602 if (!of_property_present(np, "ibm,capp-timebase-sync")) { in cxl_setup_psl_timebase()
604 dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n"); in cxl_setup_psl_timebase()
613 if (adapter->native->sl_ops->write_timebase_ctrl) in cxl_setup_psl_timebase()
614 adapter->native->sl_ops->write_timebase_ctrl(adapter); in cxl_setup_psl_timebase()
644 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_setup_irq()
651 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_update_image_control()
657 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); in cxl_update_image_control()
658 return -ENODEV; in cxl_update_image_control()
662 dev_err(&dev->dev, "failed to read image state: %i\n", rc); in cxl_update_image_control()
666 if (adapter->perst_loads_image) in cxl_update_image_control()
671 if (adapter->perst_select_user) in cxl_update_image_control()
677 dev_err(&dev->dev, "failed to update image control: %i\n", rc); in cxl_update_image_control()
686 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_alloc_one_irq()
693 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_release_one_irq()
701 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_alloc_irq_ranges()
709 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_release_irq_ranges()
719 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n"); in setup_cxl_bars()
720 return -ENODEV; in setup_cxl_bars()
734 /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
741 dev_info(&dev->dev, "switch card to CXL\n"); in switch_card_to_cxl()
744 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); in switch_card_to_cxl()
745 return -ENODEV; in switch_card_to_cxl()
749 dev_err(&dev->dev, "failed to read current mode control: %i", rc); in switch_card_to_cxl()
755 dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc); in switch_card_to_cxl()
759 * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states in switch_card_to_cxl()
761 * PCIe config space. in switch_card_to_cxl()
774 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size); in pci_map_slice_regs()
775 p2n_base = p2_base(dev) + (afu->slice * p2n_size); in pci_map_slice_regs()
776 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size)); in pci_map_slice_regs()
777 …afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_… in pci_map_slice_regs()
779 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size))) in pci_map_slice_regs()
781 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size))) in pci_map_slice_regs()
784 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size))) in pci_map_slice_regs()
790 iounmap(afu->p2n_mmio); in pci_map_slice_regs()
792 iounmap(afu->native->p1n_mmio); in pci_map_slice_regs()
794 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n"); in pci_map_slice_regs()
795 return -ENOMEM; in pci_map_slice_regs()
800 if (afu->p2n_mmio) { in pci_unmap_slice_regs()
801 iounmap(afu->p2n_mmio); in pci_unmap_slice_regs()
802 afu->p2n_mmio = NULL; in pci_unmap_slice_regs()
804 if (afu->native->p1n_mmio) { in pci_unmap_slice_regs()
805 iounmap(afu->native->p1n_mmio); in pci_unmap_slice_regs()
806 afu->native->p1n_mmio = NULL; in pci_unmap_slice_regs()
808 if (afu->native->afu_desc_mmio) { in pci_unmap_slice_regs()
809 iounmap(afu->native->afu_desc_mmio); in pci_unmap_slice_regs()
810 afu->native->afu_desc_mmio = NULL; in pci_unmap_slice_regs()
820 idr_destroy(&afu->contexts_idr); in cxl_pci_release_afu()
823 kfree(afu->native); in cxl_pci_release_afu()
833 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val); in cxl_read_afu_descriptor()
834 afu->max_procs_virtualised = AFUD_NUM_PROCS(val); in cxl_read_afu_descriptor()
835 afu->crs_num = AFUD_NUM_CRS(val); in cxl_read_afu_descriptor()
838 afu->modes_supported |= CXL_MODE_DIRECTED; in cxl_read_afu_descriptor()
840 afu->modes_supported |= CXL_MODE_DEDICATED; in cxl_read_afu_descriptor()
842 afu->modes_supported |= CXL_MODE_TIME_SLICED; in cxl_read_afu_descriptor()
845 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096; in cxl_read_afu_descriptor()
846 afu->psa = AFUD_PPPSA_PSA(val); in cxl_read_afu_descriptor()
847 if ((afu->pp_psa = AFUD_PPPSA_PP(val))) in cxl_read_afu_descriptor()
848 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu); in cxl_read_afu_descriptor()
851 afu->crs_len = AFUD_CR_LEN(val) * 256; in cxl_read_afu_descriptor()
852 afu->crs_offset = AFUD_READ_CR_OFF(afu); in cxl_read_afu_descriptor()
856 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096; in cxl_read_afu_descriptor()
857 afu->eb_offset = AFUD_READ_EB_OFF(afu); in cxl_read_afu_descriptor()
860 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) { in cxl_read_afu_descriptor()
861 dev_warn(&afu->dev, in cxl_read_afu_descriptor()
863 afu->eb_offset); in cxl_read_afu_descriptor()
864 dev_info(&afu->dev, in cxl_read_afu_descriptor()
867 afu->eb_len = 0; in cxl_read_afu_descriptor()
878 if (afu->psa && afu->adapter->ps_size < in cxl_afu_descriptor_looks_ok()
879 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) { in cxl_afu_descriptor_looks_ok()
880 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n"); in cxl_afu_descriptor_looks_ok()
881 return -ENODEV; in cxl_afu_descriptor_looks_ok()
884 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE)) in cxl_afu_descriptor_looks_ok()
885 dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size); in cxl_afu_descriptor_looks_ok()
887 for (i = 0; i < afu->crs_num; i++) { in cxl_afu_descriptor_looks_ok()
888 rc = cxl_ops->afu_cr_read32(afu, i, 0, &val); in cxl_afu_descriptor_looks_ok()
890 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i); in cxl_afu_descriptor_looks_ok()
891 return -EINVAL; in cxl_afu_descriptor_looks_ok()
895 if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) { in cxl_afu_descriptor_looks_ok()
906 dev_err(&afu->dev, "AFU does not support any processes\n"); in cxl_afu_descriptor_looks_ok()
907 return -EINVAL; in cxl_afu_descriptor_looks_ok()
924 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); in sanitise_afu_regs_psl9()
925 if (cxl_ops->afu_reset(afu)) in sanitise_afu_regs_psl9()
926 return -EIO; in sanitise_afu_regs_psl9()
928 return -EIO; in sanitise_afu_regs_psl9()
930 return -EIO; in sanitise_afu_regs_psl9()
936 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); in sanitise_afu_regs_psl9()
942 if (afu->adapter->native->sl_ops->register_serr_irq) { in sanitise_afu_regs_psl9()
946 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); in sanitise_afu_regs_psl9()
952 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); in sanitise_afu_regs_psl9()
970 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg); in sanitise_afu_regs_psl8()
971 if (cxl_ops->afu_reset(afu)) in sanitise_afu_regs_psl8()
972 return -EIO; in sanitise_afu_regs_psl8()
974 return -EIO; in sanitise_afu_regs_psl8()
976 return -EIO; in sanitise_afu_regs_psl8()
991 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg); in sanitise_afu_regs_psl8()
997 if (afu->adapter->native->sl_ops->register_serr_irq) { in sanitise_afu_regs_psl8()
1001 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg); in sanitise_afu_regs_psl8()
1007 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg); in sanitise_afu_regs_psl8()
1027 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset; in cxl_pci_afu_read_err_buffer()
1029 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len) in cxl_pci_afu_read_err_buffer()
1033 count = min((size_t)(afu->eb_len - off), count); in cxl_pci_afu_read_err_buffer()
1036 aligned_length = aligned_end - aligned_start; in cxl_pci_afu_read_err_buffer()
1041 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7); in cxl_pci_afu_read_err_buffer()
1047 return -ENOMEM; in cxl_pci_afu_read_err_buffer()
1065 if (adapter->native->sl_ops->sanitise_afu_regs) { in pci_configure_afu()
1066 rc = adapter->native->sl_ops->sanitise_afu_regs(afu); in pci_configure_afu()
1072 if ((rc = cxl_ops->afu_reset(afu))) in pci_configure_afu()
1084 if (adapter->native->sl_ops->afu_regs_init) in pci_configure_afu()
1085 if ((rc = adapter->native->sl_ops->afu_regs_init(afu))) in pci_configure_afu()
1088 if (adapter->native->sl_ops->register_serr_irq) in pci_configure_afu()
1089 if ((rc = adapter->native->sl_ops->register_serr_irq(afu))) in pci_configure_afu()
1095 atomic_set(&afu->configured_state, 0); in pci_configure_afu()
1099 if (adapter->native->sl_ops->release_serr_irq) in pci_configure_afu()
1100 adapter->native->sl_ops->release_serr_irq(afu); in pci_configure_afu()
1112 if (atomic_read(&afu->configured_state) != -1) { in pci_deconfigure_afu()
1113 while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1) in pci_deconfigure_afu()
1117 if (afu->adapter->native->sl_ops->release_serr_irq) in pci_deconfigure_afu()
1118 afu->adapter->native->sl_ops->release_serr_irq(afu); in pci_deconfigure_afu()
1125 int rc = -ENOMEM; in pci_init_afu()
1129 return -ENOMEM; in pci_init_afu()
1131 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL); in pci_init_afu()
1132 if (!afu->native) in pci_init_afu()
1135 mutex_init(&afu->native->spa_mutex); in pci_init_afu()
1137 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice); in pci_init_afu()
1158 adapter->afu[afu->slice] = afu; in pci_init_afu()
1161 dev_info(&afu->dev, "Can't register vPHB\n"); in pci_init_afu()
1166 device_del(&afu->dev); in pci_init_afu()
1170 put_device(&afu->dev); in pci_init_afu()
1174 kfree(afu->native); in pci_init_afu()
1192 spin_lock(&afu->adapter->afu_list_lock); in cxl_pci_remove_afu()
1193 afu->adapter->afu[afu->slice] = NULL; in cxl_pci_remove_afu()
1194 spin_unlock(&afu->adapter->afu_list_lock); in cxl_pci_remove_afu()
1197 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); in cxl_pci_remove_afu()
1200 device_unregister(&afu->dev); in cxl_pci_remove_afu()
1205 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_pci_reset()
1208 if (adapter->perst_same_image) { in cxl_pci_reset()
1209 dev_warn(&dev->dev, in cxl_pci_reset()
1211 return -EINVAL; in cxl_pci_reset()
1214 dev_info(&dev->dev, "CXL reset\n"); in cxl_pci_reset()
1225 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n"); in cxl_pci_reset()
1242 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev)))) in cxl_map_adapter_regs()
1245 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev)))) in cxl_map_adapter_regs()
1251 iounmap(adapter->native->p1_mmio); in cxl_map_adapter_regs()
1252 adapter->native->p1_mmio = NULL; in cxl_map_adapter_regs()
1258 return -ENOMEM; in cxl_map_adapter_regs()
1263 if (adapter->native->p1_mmio) { in cxl_unmap_adapter_regs()
1264 iounmap(adapter->native->p1_mmio); in cxl_unmap_adapter_regs()
1265 adapter->native->p1_mmio = NULL; in cxl_unmap_adapter_regs()
1266 pci_release_region(to_pci_dev(adapter->dev.parent), 2); in cxl_unmap_adapter_regs()
1268 if (adapter->native->p2_mmio) { in cxl_unmap_adapter_regs()
1269 iounmap(adapter->native->p2_mmio); in cxl_unmap_adapter_regs()
1270 adapter->native->p2_mmio = NULL; in cxl_unmap_adapter_regs()
1271 pci_release_region(to_pci_dev(adapter->dev.parent), 0); in cxl_unmap_adapter_regs()
1284 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n"); in cxl_read_vsec()
1285 return -ENODEV; in cxl_read_vsec()
1290 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n"); in cxl_read_vsec()
1291 return -EINVAL; in cxl_read_vsec()
1294 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status); in cxl_read_vsec()
1295 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev); in cxl_read_vsec()
1296 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major); in cxl_read_vsec()
1297 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor); in cxl_read_vsec()
1298 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image); in cxl_read_vsec()
1300 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); in cxl_read_vsec()
1301 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED); in cxl_read_vsec()
1302 adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE); in cxl_read_vsec()
1304 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices); in cxl_read_vsec()
1311 * code a month later and forget what units these are in ;-) */ in cxl_read_vsec()
1312 adapter->native->ps_off = ps_off * 64 * 1024; in cxl_read_vsec()
1313 adapter->ps_size = ps_size * 64 * 1024; in cxl_read_vsec()
1314 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024; in cxl_read_vsec()
1315 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024; in cxl_read_vsec()
1317 /* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */ in cxl_read_vsec()
1318 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices; in cxl_read_vsec()
1324 * Workaround a PCIe Host Bridge defect on some cards, that can cause
1336 if (adapter->psl_rev & 0xf000) in cxl_fixup_malformed_tlp()
1351 if (cxl_is_power8() && (adapter->caia_major == 1)) in cxl_compatible_caia_version()
1354 if (cxl_is_power9() && (adapter->caia_major == 2)) in cxl_compatible_caia_version()
1362 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT) in cxl_vsec_looks_ok()
1363 return -EBUSY; in cxl_vsec_looks_ok()
1365 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) { in cxl_vsec_looks_ok()
1366 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n"); in cxl_vsec_looks_ok()
1367 return -EINVAL; in cxl_vsec_looks_ok()
1371 dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n", in cxl_vsec_looks_ok()
1372 adapter->caia_major); in cxl_vsec_looks_ok()
1373 return -ENODEV; in cxl_vsec_looks_ok()
1376 if (!adapter->slices) { in cxl_vsec_looks_ok()
1379 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n"); in cxl_vsec_looks_ok()
1380 return -EINVAL; in cxl_vsec_looks_ok()
1383 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) { in cxl_vsec_looks_ok()
1384 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n"); in cxl_vsec_looks_ok()
1385 return -EINVAL; in cxl_vsec_looks_ok()
1388 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) { in cxl_vsec_looks_ok()
1389 dev_err(&dev->dev, "ABORTING: Problem state size larger than " in cxl_vsec_looks_ok()
1391 adapter->ps_size, p2_size(dev) - adapter->native->ps_off); in cxl_vsec_looks_ok()
1392 return -EINVAL; in cxl_vsec_looks_ok()
1400 return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf); in cxl_pci_read_adapter_vpd()
1411 kfree(adapter->native); in cxl_release_adapter()
1415 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1424 if (adapter->native->sl_ops->invalidate_all) { in sanitise_adapter_regs()
1426 if (cxl_is_power9() && (adapter->perst_loads_image)) in sanitise_adapter_regs()
1428 rc = adapter->native->sl_ops->invalidate_all(adapter); in sanitise_adapter_regs()
1441 adapter->dev.parent = &dev->dev; in cxl_configure_adapter()
1442 adapter->dev.release = cxl_release_adapter; in cxl_configure_adapter()
1447 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc); in cxl_configure_adapter()
1474 if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev))) in cxl_configure_adapter()
1480 adapter->tunneled_ops_supported = false; in cxl_configure_adapter()
1484 dev_info(&dev->dev, "Tunneled operations unsupported\n"); in cxl_configure_adapter()
1486 adapter->tunneled_ops_supported = true; in cxl_configure_adapter()
1489 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode))) in cxl_configure_adapter()
1493 * In the non-recovery case this has no effect */ in cxl_configure_adapter()
1513 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent); in cxl_deconfigure_adapter()
1528 struct pci_dev *dev = to_pci_dev(adapter->dev.parent); in cxl_stop_trace_psl9()
1533 trace_mask = (0x3ULL << (62 - traceid * 2)); in cxl_stop_trace_psl9()
1534 trace_state = (trace_state & trace_mask) >> (62 - traceid * 2); in cxl_stop_trace_psl9()
1535 dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n", in cxl_stop_trace_psl9()
1553 spin_lock(&adapter->afu_list_lock); in cxl_stop_trace_psl8()
1554 for (slice = 0; slice < adapter->slices; slice++) { in cxl_stop_trace_psl8()
1555 if (adapter->afu[slice]) in cxl_stop_trace_psl8()
1556 cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE, in cxl_stop_trace_psl8()
1559 spin_unlock(&adapter->afu_list_lock); in cxl_stop_trace_psl8()
1612 dev_info(&dev->dev, "Device uses a PSL8\n"); in set_sl_ops()
1613 adapter->native->sl_ops = &psl8_ops; in set_sl_ops()
1615 dev_info(&dev->dev, "Device uses a PSL9\n"); in set_sl_ops()
1616 adapter->native->sl_ops = &psl9_ops; in set_sl_ops()
1628 return ERR_PTR(-ENOMEM); in cxl_pci_init_adapter()
1630 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL); in cxl_pci_init_adapter()
1631 if (!adapter->native) { in cxl_pci_init_adapter()
1632 rc = -ENOMEM; in cxl_pci_init_adapter()
1641 adapter->perst_loads_image = true; in cxl_pci_init_adapter()
1642 adapter->perst_same_image = false; in cxl_pci_init_adapter()
1669 device_del(&adapter->dev); in cxl_pci_init_adapter()
1671 /* This should mirror cxl_remove_adapter, except without the in cxl_pci_init_adapter()
1676 put_device(&adapter->dev); in cxl_pci_init_adapter()
1680 cxl_release_adapter(&adapter->dev); in cxl_pci_init_adapter()
1698 device_unregister(&adapter->dev); in cxl_pci_remove_adapter()
1710 return -ENODEV; in cxl_slot_is_switched()
1730 dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n"); in cxl_probe()
1731 return -ENODEV; in cxl_probe()
1735 dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n"); in cxl_probe()
1736 return -ENODEV; in cxl_probe()
1740 dev_info(&dev->dev, "Only Radix mode supported\n"); in cxl_probe()
1741 return -ENODEV; in cxl_probe()
1749 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter)); in cxl_probe()
1753 for (slice = 0; slice < adapter->slices; slice++) { in cxl_probe()
1755 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc); in cxl_probe()
1759 rc = cxl_afu_select_best_mode(adapter->afu[slice]); in cxl_probe()
1761 dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc); in cxl_probe()
1777 for (i = 0; i < adapter->slices; i++) { in cxl_remove()
1778 afu = adapter->afu[i]; in cxl_remove()
1796 if (afu == NULL || afu->phb == NULL) in cxl_vphb_error_detected()
1799 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in cxl_vphb_error_detected()
1800 afu_drv = to_pci_driver(afu_dev->dev.driver); in cxl_vphb_error_detected()
1804 afu_dev->error_state = state; in cxl_vphb_error_detected()
1806 err_handler = afu_drv->err_handler; in cxl_vphb_error_detected()
1808 afu_result = err_handler->error_detected(afu_dev, in cxl_vphb_error_detected()
1837 spin_lock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1838 for (i = 0; i < adapter->slices; i++) { in cxl_pci_error_detected()
1839 afu = adapter->afu[i]; in cxl_pci_error_detected()
1846 spin_unlock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1853 * different, including a non-CAPI card. As such, by default in cxl_pci_error_detected()
1855 * the slot re-probed. (TODO: check EEH doesn't blindly rebind in cxl_pci_error_detected()
1860 * order to get back to a more reliable known-good state. in cxl_pci_error_detected()
1863 * trust that we'll come back the same - we could have a new in cxl_pci_error_detected()
1866 * back the same - for example a regular EEH event. in cxl_pci_error_detected()
1872 if (adapter->perst_loads_image && !adapter->perst_same_image) { in cxl_pci_error_detected()
1874 dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n"); in cxl_pci_error_detected()
1883 * - We send the driver, if bound, an error_detected callback. in cxl_pci_error_detected()
1888 * - We detach all contexts associated with the AFU. This in cxl_pci_error_detected()
1894 * - We clean up our side: releasing and unmapping resources we hold in cxl_pci_error_detected()
1899 * - Any contexts you create in your kernel driver (except in cxl_pci_error_detected()
1904 * - We will take responsibility for re-initialising the in cxl_pci_error_detected()
1930 spin_lock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1932 for (i = 0; i < adapter->slices; i++) { in cxl_pci_error_detected()
1933 afu = adapter->afu[i]; in cxl_pci_error_detected()
1940 cxl_ops->afu_deactivate_mode(afu, afu->current_mode); in cxl_pci_error_detected()
1950 spin_unlock(&adapter->afu_list_lock); in cxl_pci_error_detected()
1954 dev_warn(&adapter->dev, in cxl_pci_error_detected()
1955 "Couldn't take context lock with %d active-contexts\n", in cxl_pci_error_detected()
1956 atomic_read(&adapter->contexts_num)); in cxl_pci_error_detected()
1985 spin_lock(&adapter->afu_list_lock); in cxl_pci_slot_reset()
1986 for (i = 0; i < adapter->slices; i++) { in cxl_pci_slot_reset()
1987 afu = adapter->afu[i]; in cxl_pci_slot_reset()
1998 if (afu->phb == NULL) in cxl_pci_slot_reset()
2001 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in cxl_pci_slot_reset()
2014 afu_dev->dev.archdata.cxl_ctx = ctx; in cxl_pci_slot_reset()
2016 if (cxl_ops->afu_check_and_enable(afu)) in cxl_pci_slot_reset()
2019 afu_dev->error_state = pci_channel_io_normal; in cxl_pci_slot_reset()
2027 afu_drv = to_pci_driver(afu_dev->dev.driver); in cxl_pci_slot_reset()
2031 err_handler = afu_drv->err_handler; in cxl_pci_slot_reset()
2032 if (err_handler && err_handler->slot_reset) in cxl_pci_slot_reset()
2033 afu_result = err_handler->slot_reset(afu_dev); in cxl_pci_slot_reset()
2040 spin_unlock(&adapter->afu_list_lock); in cxl_pci_slot_reset()
2044 spin_unlock(&adapter->afu_list_lock); in cxl_pci_slot_reset()
2051 dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n"); in cxl_pci_slot_reset()
2068 spin_lock(&adapter->afu_list_lock); in cxl_pci_resume()
2069 for (i = 0; i < adapter->slices; i++) { in cxl_pci_resume()
2070 afu = adapter->afu[i]; in cxl_pci_resume()
2072 if (afu == NULL || afu->phb == NULL) in cxl_pci_resume()
2075 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) { in cxl_pci_resume()
2076 afu_drv = to_pci_driver(afu_dev->dev.driver); in cxl_pci_resume()
2080 err_handler = afu_drv->err_handler; in cxl_pci_resume()
2081 if (err_handler && err_handler->resume) in cxl_pci_resume()
2082 err_handler->resume(afu_dev); in cxl_pci_resume()
2085 spin_unlock(&adapter->afu_list_lock); in cxl_pci_resume()
2095 .name = "cxl-pci",