Lines Matching +full:host +full:- +full:port

1 // SPDX-License-Identifier: GPL-2.0-only
26 * cross-device interleave coordination. The CXL core also establishes and
42 int cxl_num_decoders_committed(struct cxl_port *port) in cxl_num_decoders_committed() argument
46 return port->commit_end + 1; in cxl_num_decoders_committed()
52 return sysfs_emit(buf, "%s\n", dev->type->name); in devtype_show()
58 if (dev->type == &cxl_nvdimm_bridge_type) in cxl_device_id()
60 if (dev->type == &cxl_nvdimm_type) in cxl_device_id()
62 if (dev->type == CXL_PMEM_REGION_TYPE()) in cxl_device_id()
64 if (dev->type == CXL_DAX_REGION_TYPE()) in cxl_device_id()
73 if (dev->type == CXL_REGION_TYPE()) in cxl_device_id()
75 if (dev->type == &cxl_pmu_type) in cxl_device_id()
102 return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start); in start_show()
111 return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range)); in size_show()
122 (cxld->flags & (flag)) ? "1" : "0"); \
137 switch (cxld->target_type) { in target_type_show()
143 return -ENXIO; in target_type_show()
149 struct cxl_decoder *cxld = &cxlsd->cxld; in emit_target_list()
153 for (i = 0; i < cxld->interleave_ways; i++) { in emit_target_list()
154 struct cxl_dport *dport = cxlsd->target[i]; in emit_target_list()
160 if (i + 1 < cxld->interleave_ways) in emit_target_list()
161 next = cxlsd->target[i + 1]; in emit_target_list()
162 rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, in emit_target_list()
198 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxled->mode)); in mode_show()
213 return -EINVAL; in mode_store()
254 return -EINVAL; in dpa_size_store()
277 return sysfs_emit(buf, "%d\n", cxld->interleave_granularity); in interleave_granularity_show()
287 return sysfs_emit(buf, "%d\n", cxld->interleave_ways); in interleave_ways_show()
297 return sysfs_emit(buf, "%d\n", cxlrd->qos_class); in qos_class_show()
331 return (cxlrd->cxlsd.cxld.flags & flags) == flags; in can_create_pmem()
338 return (cxlrd->cxlsd.cxld.flags & flags) == flags; in can_create_ram()
356 return a->mode; in cxl_root_decoder_visible()
411 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in __cxl_decoder_release() local
413 ida_free(&port->decoder_ida, cxld->id); in __cxl_decoder_release()
414 put_device(&port->dev); in __cxl_decoder_release()
421 __cxl_decoder_release(&cxled->cxld); in cxl_endpoint_decoder_release()
429 __cxl_decoder_release(&cxlsd->cxld); in cxl_switch_decoder_release()
446 if (atomic_read(&cxlrd->region_id) >= 0) in cxl_root_decoder_release()
447 memregion_free(atomic_read(&cxlrd->region_id)); in cxl_root_decoder_release()
448 __cxl_decoder_release(&cxlrd->cxlsd.cxld); in cxl_root_decoder_release()
472 return dev->type == &cxl_decoder_endpoint_type; in is_endpoint_decoder()
478 return dev->type == &cxl_decoder_root_type; in is_root_decoder()
484 return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; in is_switch_decoder()
518 put_device(ep->ep); in cxl_ep_release()
522 static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) in cxl_ep_remove() argument
526 xa_erase(&port->endpoints, (unsigned long) ep->ep); in cxl_ep_remove()
532 struct cxl_port *port = to_cxl_port(dev); in cxl_port_release() local
536 xa_for_each(&port->endpoints, index, ep) in cxl_port_release()
537 cxl_ep_remove(port, ep); in cxl_port_release()
538 xa_destroy(&port->endpoints); in cxl_port_release()
539 xa_destroy(&port->dports); in cxl_port_release()
540 xa_destroy(&port->regions); in cxl_port_release()
541 ida_free(&cxl_port_ida, port->id); in cxl_port_release()
542 if (is_cxl_root(port)) in cxl_port_release()
543 kfree(to_cxl_root(port)); in cxl_port_release()
545 kfree(port); in cxl_port_release()
551 struct cxl_port *port = to_cxl_port(dev); in decoders_committed_show() local
555 rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port)); in decoders_committed_show()
586 return dev->type == &cxl_port_type; in is_cxl_port()
592 if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type, in to_cxl_port()
601 struct cxl_port *port = _port; in unregister_port() local
605 if (is_cxl_root(port)) in unregister_port()
608 parent = to_cxl_port(port->dev.parent); in unregister_port()
611 * CXL root port's and the first level of ports are unregistered in unregister_port()
613 * unregistered while holding their parent port lock. in unregister_port()
616 lock_dev = port->uport_dev; in unregister_port()
618 lock_dev = parent->uport_dev; in unregister_port()
620 lock_dev = &parent->dev; in unregister_port()
623 port->dead = true; in unregister_port()
624 device_unregister(&port->dev); in unregister_port()
629 struct cxl_port *port = _port; in cxl_unlink_uport() local
631 sysfs_remove_link(&port->dev.kobj, "uport"); in cxl_unlink_uport()
634 static int devm_cxl_link_uport(struct device *host, struct cxl_port *port) in devm_cxl_link_uport() argument
638 rc = sysfs_create_link(&port->dev.kobj, &port->uport_dev->kobj, in devm_cxl_link_uport()
642 return devm_add_action_or_reset(host, cxl_unlink_uport, port); in devm_cxl_link_uport()
647 struct cxl_port *port = _port; in cxl_unlink_parent_dport() local
649 sysfs_remove_link(&port->dev.kobj, "parent_dport"); in cxl_unlink_parent_dport()
652 static int devm_cxl_link_parent_dport(struct device *host, in devm_cxl_link_parent_dport() argument
653 struct cxl_port *port, in devm_cxl_link_parent_dport() argument
661 rc = sysfs_create_link(&port->dev.kobj, &parent_dport->dport_dev->kobj, in devm_cxl_link_parent_dport()
665 return devm_add_action_or_reset(host, cxl_unlink_parent_dport, port); in devm_cxl_link_parent_dport()
674 struct cxl_port *port, *_port __free(kfree) = NULL; in cxl_port_alloc() local
682 return ERR_PTR(-ENOMEM); in cxl_port_alloc()
684 _port = kzalloc(sizeof(*port), GFP_KERNEL); in cxl_port_alloc()
686 return ERR_PTR(-ENOMEM); in cxl_port_alloc()
694 port = &no_free_ptr(cxl_root)->port; in cxl_port_alloc()
696 port = no_free_ptr(_port); in cxl_port_alloc()
698 port->id = rc; in cxl_port_alloc()
699 port->uport_dev = uport_dev; in cxl_port_alloc()
702 * The top-level cxl_port "cxl_root" does not have a cxl_port as in cxl_port_alloc()
707 dev = &port->dev; in cxl_port_alloc()
709 struct cxl_port *parent_port = parent_dport->port; in cxl_port_alloc()
712 dev->parent = &parent_port->dev; in cxl_port_alloc()
713 port->depth = parent_port->depth + 1; in cxl_port_alloc()
714 port->parent_dport = parent_dport; in cxl_port_alloc()
717 * walk to the host bridge, or the first ancestor that knows in cxl_port_alloc()
718 * the host bridge in cxl_port_alloc()
720 iter = port; in cxl_port_alloc()
721 while (!iter->host_bridge && in cxl_port_alloc()
722 !is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_port_alloc()
723 iter = to_cxl_port(iter->dev.parent); in cxl_port_alloc()
724 if (iter->host_bridge) in cxl_port_alloc()
725 port->host_bridge = iter->host_bridge; in cxl_port_alloc()
726 else if (parent_dport->rch) in cxl_port_alloc()
727 port->host_bridge = parent_dport->dport_dev; in cxl_port_alloc()
729 port->host_bridge = iter->uport_dev; in cxl_port_alloc()
730 dev_dbg(uport_dev, "host-bridge: %s\n", in cxl_port_alloc()
731 dev_name(port->host_bridge)); in cxl_port_alloc()
733 dev->parent = uport_dev; in cxl_port_alloc()
735 ida_init(&port->decoder_ida); in cxl_port_alloc()
736 port->hdm_end = -1; in cxl_port_alloc()
737 port->commit_end = -1; in cxl_port_alloc()
738 xa_init(&port->dports); in cxl_port_alloc()
739 xa_init(&port->endpoints); in cxl_port_alloc()
740 xa_init(&port->regions); in cxl_port_alloc()
743 lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); in cxl_port_alloc()
745 dev->bus = &cxl_bus_type; in cxl_port_alloc()
746 dev->type = &cxl_port_type; in cxl_port_alloc()
748 return port; in cxl_port_alloc()
751 static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, in cxl_setup_comp_regs() argument
755 .host = host, in cxl_setup_comp_regs()
763 map->reg_type = CXL_REGLOC_RBI_COMPONENT; in cxl_setup_comp_regs()
764 map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE; in cxl_setup_comp_regs()
769 static int cxl_port_setup_regs(struct cxl_port *port, in cxl_port_setup_regs() argument
772 if (dev_is_platform(port->uport_dev)) in cxl_port_setup_regs()
774 return cxl_setup_comp_regs(&port->dev, &port->reg_map, in cxl_port_setup_regs()
778 static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, in cxl_dport_setup_regs() argument
783 if (dev_is_platform(dport->dport_dev)) in cxl_dport_setup_regs()
787 * use @dport->dport_dev for the context for error messages during in cxl_dport_setup_regs()
788 * register probing, and fixup @host after the fact, since @host may be in cxl_dport_setup_regs()
791 rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map, in cxl_dport_setup_regs()
793 dport->reg_map.host = host; in cxl_dport_setup_regs()
803 if (dport->rch) in cxl_einj_inject()
804 return einj_cxl_inject_rch_error(dport->rcrb.base, type); in cxl_einj_inject()
806 return einj_cxl_inject_error(to_pci_dev(dport->dport_dev), type); in cxl_einj_inject()
819 * dport_dev needs to be a PCIe port for CXL 2.0+ ports because in cxl_debugfs_create_dport_dir()
822 if (!dport->rch && !dev_is_pci(dport->dport_dev)) in cxl_debugfs_create_dport_dir()
825 dir = cxl_debugfs_create_dir(dev_name(dport->dport_dev)); in cxl_debugfs_create_dport_dir()
831 static int cxl_port_add(struct cxl_port *port, in cxl_port_add() argument
835 struct device *dev __free(put_device) = &port->dev; in cxl_port_add()
838 if (is_cxl_memdev(port->uport_dev)) { in cxl_port_add()
839 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); in cxl_port_add()
840 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_port_add()
842 rc = dev_set_name(dev, "endpoint%d", port->id); in cxl_port_add()
851 port->reg_map = cxlds->reg_map; in cxl_port_add()
852 port->reg_map.host = &port->dev; in cxl_port_add()
853 cxlmd->endpoint = port; in cxl_port_add()
855 rc = dev_set_name(dev, "port%d", port->id); in cxl_port_add()
859 rc = cxl_port_setup_regs(port, component_reg_phys); in cxl_port_add()
863 rc = dev_set_name(dev, "root%d", port->id); in cxl_port_add()
877 static struct cxl_port *__devm_cxl_add_port(struct device *host, in __devm_cxl_add_port() argument
882 struct cxl_port *port; in __devm_cxl_add_port() local
885 port = cxl_port_alloc(uport_dev, parent_dport); in __devm_cxl_add_port()
886 if (IS_ERR(port)) in __devm_cxl_add_port()
887 return port; in __devm_cxl_add_port()
889 rc = cxl_port_add(port, component_reg_phys, parent_dport); in __devm_cxl_add_port()
893 rc = devm_add_action_or_reset(host, unregister_port, port); in __devm_cxl_add_port()
897 rc = devm_cxl_link_uport(host, port); in __devm_cxl_add_port()
901 rc = devm_cxl_link_parent_dport(host, port, parent_dport); in __devm_cxl_add_port()
906 port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev)); in __devm_cxl_add_port()
908 return port; in __devm_cxl_add_port()
912 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
913 * @host: host device for devm operations
914 * @uport_dev: "physical" device implementing this upstream port
918 struct cxl_port *devm_cxl_add_port(struct device *host, in devm_cxl_add_port() argument
923 struct cxl_port *port, *parent_port; in devm_cxl_add_port() local
925 port = __devm_cxl_add_port(host, uport_dev, component_reg_phys, in devm_cxl_add_port()
928 parent_port = parent_dport ? parent_dport->port : NULL; in devm_cxl_add_port()
929 if (IS_ERR(port)) { in devm_cxl_add_port()
931 parent_port ? " port to " : "", in devm_cxl_add_port()
932 parent_port ? dev_name(&parent_port->dev) : "", in devm_cxl_add_port()
933 parent_port ? "" : " root port", in devm_cxl_add_port()
934 PTR_ERR(port)); in devm_cxl_add_port()
937 dev_name(&port->dev), in devm_cxl_add_port()
939 parent_port ? dev_name(&parent_port->dev) : "", in devm_cxl_add_port()
940 parent_port ? "" : " (root port)"); in devm_cxl_add_port()
943 return port; in devm_cxl_add_port()
947 struct cxl_root *devm_cxl_add_root(struct device *host, in devm_cxl_add_root() argument
951 struct cxl_port *port; in devm_cxl_add_root() local
953 port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL); in devm_cxl_add_root()
954 if (IS_ERR(port)) in devm_cxl_add_root()
955 return ERR_CAST(port); in devm_cxl_add_root()
957 cxl_root = to_cxl_root(port); in devm_cxl_add_root()
958 cxl_root->ops = ops; in devm_cxl_add_root()
963 struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port) in cxl_port_to_pci_bus() argument
965 /* There is no pci_bus associated with a CXL platform-root port */ in cxl_port_to_pci_bus()
966 if (is_cxl_root(port)) in cxl_port_to_pci_bus()
969 if (dev_is_pci(port->uport_dev)) { in cxl_port_to_pci_bus()
970 struct pci_dev *pdev = to_pci_dev(port->uport_dev); in cxl_port_to_pci_bus()
972 return pdev->subordinate; in cxl_port_to_pci_bus()
975 return xa_load(&cxl_root_buses, (unsigned long)port->uport_dev); in cxl_port_to_pci_bus()
984 int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev, in devm_cxl_register_pci_bus() argument
990 return -EINVAL; in devm_cxl_register_pci_bus()
996 return devm_add_action_or_reset(host, unregister_pci_bus, uport_dev); in devm_cxl_register_pci_bus()
1002 struct cxl_port *port, *parent; in dev_is_cxl_root_child() local
1007 port = to_cxl_port(dev); in dev_is_cxl_root_child()
1008 if (is_cxl_root(port)) in dev_is_cxl_root_child()
1011 parent = to_cxl_port(port->dev.parent); in dev_is_cxl_root_child()
1018 struct cxl_root *find_cxl_root(struct cxl_port *port) in find_cxl_root() argument
1020 struct cxl_port *iter = port; in find_cxl_root()
1023 iter = to_cxl_port(iter->dev.parent); in find_cxl_root()
1027 get_device(&iter->dev); in find_cxl_root()
1037 put_device(&cxl_root->port.dev); in put_cxl_root()
1041 static struct cxl_dport *find_dport(struct cxl_port *port, int id) in find_dport() argument
1046 device_lock_assert(&port->dev); in find_dport()
1047 xa_for_each(&port->dports, index, dport) in find_dport()
1048 if (dport->port_id == id) in find_dport()
1053 static int add_dport(struct cxl_port *port, struct cxl_dport *dport) in add_dport() argument
1058 device_lock_assert(&port->dev); in add_dport()
1059 dup = find_dport(port, dport->port_id); in add_dport()
1061 dev_err(&port->dev, in add_dport()
1062 "unable to add dport%d-%s non-unique port id (%s)\n", in add_dport()
1063 dport->port_id, dev_name(dport->dport_dev), in add_dport()
1064 dev_name(dup->dport_dev)); in add_dport()
1065 return -EBUSY; in add_dport()
1068 rc = xa_insert(&port->dports, (unsigned long)dport->dport_dev, dport, in add_dport()
1073 port->nr_dports++; in add_dport()
1078 * Since root-level CXL dports cannot be enumerated by PCI they are not
1079 * enumerated by the common port driver that acquires the port lock over
1082 * port lock in that case.
1084 static void cond_cxl_root_lock(struct cxl_port *port) in cond_cxl_root_lock() argument
1086 if (is_cxl_root(port)) in cond_cxl_root_lock()
1087 device_lock(&port->dev); in cond_cxl_root_lock()
1090 static void cond_cxl_root_unlock(struct cxl_port *port) in cond_cxl_root_unlock() argument
1092 if (is_cxl_root(port)) in cond_cxl_root_unlock()
1093 device_unlock(&port->dev); in cond_cxl_root_unlock()
1099 struct cxl_port *port = dport->port; in cxl_dport_remove() local
1101 xa_erase(&port->dports, (unsigned long) dport->dport_dev); in cxl_dport_remove()
1102 put_device(dport->dport_dev); in cxl_dport_remove()
1108 struct cxl_port *port = dport->port; in cxl_dport_unlink() local
1111 sprintf(link_name, "dport%d", dport->port_id); in cxl_dport_unlink()
1112 sysfs_remove_link(&port->dev.kobj, link_name); in cxl_dport_unlink()
1116 __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, in __devm_cxl_add_dport() argument
1122 struct device *host; in __devm_cxl_add_dport() local
1125 if (is_cxl_root(port)) in __devm_cxl_add_dport()
1126 host = port->uport_dev; in __devm_cxl_add_dport()
1128 host = &port->dev; in __devm_cxl_add_dport()
1130 if (!host->driver) { in __devm_cxl_add_dport()
1131 dev_WARN_ONCE(&port->dev, 1, "dport:%s bad devm context\n", in __devm_cxl_add_dport()
1133 return ERR_PTR(-ENXIO); in __devm_cxl_add_dport()
1138 return ERR_PTR(-EINVAL); in __devm_cxl_add_dport()
1140 dport = devm_kzalloc(host, sizeof(*dport), GFP_KERNEL); in __devm_cxl_add_dport()
1142 return ERR_PTR(-ENOMEM); in __devm_cxl_add_dport()
1144 dport->dport_dev = dport_dev; in __devm_cxl_add_dport()
1145 dport->port_id = port_id; in __devm_cxl_add_dport()
1146 dport->port = port; in __devm_cxl_add_dport()
1149 rc = cxl_dport_setup_regs(&port->dev, dport, in __devm_cxl_add_dport()
1154 dport->rcrb.base = rcrb; in __devm_cxl_add_dport()
1155 component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb, in __devm_cxl_add_dport()
1159 return ERR_PTR(-ENXIO); in __devm_cxl_add_dport()
1170 dport->rch = true; in __devm_cxl_add_dport()
1177 cond_cxl_root_lock(port); in __devm_cxl_add_dport()
1178 rc = add_dport(port, dport); in __devm_cxl_add_dport()
1179 cond_cxl_root_unlock(port); in __devm_cxl_add_dport()
1184 rc = devm_add_action_or_reset(host, cxl_dport_remove, dport); in __devm_cxl_add_dport()
1188 rc = sysfs_create_link(&port->dev.kobj, &dport_dev->kobj, link_name); in __devm_cxl_add_dport()
1192 rc = devm_add_action_or_reset(host, cxl_dport_unlink, dport); in __devm_cxl_add_dport()
1197 dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev)); in __devm_cxl_add_dport()
1205 * devm_cxl_add_dport - append VH downstream port data to a cxl_port
1206 * @port: the cxl_port that references this dport
1212 * either the port's host (for root ports), or the port itself (for
1215 struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, in devm_cxl_add_dport() argument
1221 dport = __devm_cxl_add_dport(port, dport_dev, port_id, in devm_cxl_add_dport()
1225 dev_name(&port->dev), PTR_ERR(dport)); in devm_cxl_add_dport()
1228 dev_name(&port->dev)); in devm_cxl_add_dport()
1236 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
1237 * @port: the cxl_port that references this dport
1244 struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port, in devm_cxl_add_rch_dport() argument
1251 dev_dbg(&port->dev, "failed to add RCH dport, missing RCRB\n"); in devm_cxl_add_rch_dport()
1252 return ERR_PTR(-EINVAL); in devm_cxl_add_rch_dport()
1255 dport = __devm_cxl_add_dport(port, dport_dev, port_id, in devm_cxl_add_rch_dport()
1259 dev_name(&port->dev), PTR_ERR(dport)); in devm_cxl_add_rch_dport()
1262 dev_name(&port->dev)); in devm_cxl_add_rch_dport()
1271 struct cxl_port *port = new->dport->port; in add_ep() local
1273 guard(device)(&port->dev); in add_ep()
1274 if (port->dead) in add_ep()
1275 return -ENXIO; in add_ep()
1277 return xa_insert(&port->endpoints, (unsigned long)new->ep, in add_ep()
1282 * cxl_add_ep - register an endpoint's interest in a port
1287 * When those endpoints depart the port can be destroyed once all
1288 * endpoints that care about that port have been removed.
1297 return -ENOMEM; in cxl_add_ep()
1299 ep->ep = get_device(ep_dev); in cxl_add_ep()
1300 ep->dport = dport; in cxl_add_ep()
1318 struct cxl_port *port; in match_port_by_dport() local
1322 if (ctx->parent_port && dev->parent != &ctx->parent_port->dev) in match_port_by_dport()
1325 port = to_cxl_port(dev); in match_port_by_dport()
1326 dport = cxl_find_dport_by_dev(port, ctx->dport_dev); in match_port_by_dport()
1327 if (ctx->dport) in match_port_by_dport()
1328 *ctx->dport = dport; in match_port_by_dport()
1336 if (!ctx->dport_dev) in __find_cxl_port()
1352 struct cxl_port *port; in find_cxl_port() local
1354 port = __find_cxl_port(&ctx); in find_cxl_port()
1355 return port; in find_cxl_port()
1367 struct cxl_port *port; in find_cxl_port_at() local
1369 port = __find_cxl_port(&ctx); in find_cxl_port_at()
1370 return port; in find_cxl_port_at()
1374 * All users of grandparent() are using it to walk PCIe-like switch port
1376 * upstream switch port and N bridges representing downstream switch ports. When
1377 * bridges stack the grand-parent of a downstream switch port is another
1378 * downstream switch port in the immediate ancestor switch.
1382 if (dev && dev->parent) in grandparent()
1383 return dev->parent->parent; in grandparent()
1389 struct cxl_port *port = to_cxl_port(endpoint->dev.parent); in endpoint_host() local
1391 if (is_cxl_root(port)) in endpoint_host()
1392 return port->uport_dev; in endpoint_host()
1393 return &port->dev; in endpoint_host()
1399 struct cxl_port *endpoint = cxlmd->endpoint; in delete_endpoint()
1400 struct device *host = endpoint_host(endpoint); in delete_endpoint() local
1402 scoped_guard(device, host) { in delete_endpoint()
1403 if (host->driver && !endpoint->dead) { in delete_endpoint()
1404 devm_release_action(host, cxl_unlink_parent_dport, endpoint); in delete_endpoint()
1405 devm_release_action(host, cxl_unlink_uport, endpoint); in delete_endpoint()
1406 devm_release_action(host, unregister_port, endpoint); in delete_endpoint()
1408 cxlmd->endpoint = NULL; in delete_endpoint()
1410 put_device(&endpoint->dev); in delete_endpoint()
1411 put_device(host); in delete_endpoint()
1416 struct device *host = endpoint_host(endpoint); in cxl_endpoint_autoremove() local
1417 struct device *dev = &cxlmd->dev; in cxl_endpoint_autoremove()
1419 get_device(host); in cxl_endpoint_autoremove()
1420 get_device(&endpoint->dev); in cxl_endpoint_autoremove()
1421 cxlmd->depth = endpoint->depth; in cxl_endpoint_autoremove()
1427 * The natural end of life of a non-root 'cxl_port' is when its parent port goes
1428 * through a ->remove() event ("top-down" unregistration). The unnatural trigger
1429 * for a port to be unregistered is when all memdevs beneath that port have gone
1430 * through ->remove(). This "bottom-up" removal selectively removes individual
1435 static void delete_switch_port(struct cxl_port *port) in delete_switch_port() argument
1437 devm_release_action(port->dev.parent, cxl_unlink_parent_dport, port); in delete_switch_port()
1438 devm_release_action(port->dev.parent, cxl_unlink_uport, port); in delete_switch_port()
1439 devm_release_action(port->dev.parent, unregister_port, port); in delete_switch_port()
1442 static void reap_dports(struct cxl_port *port) in reap_dports() argument
1447 device_lock_assert(&port->dev); in reap_dports()
1449 xa_for_each(&port->dports, index, dport) { in reap_dports()
1450 devm_release_action(&port->dev, cxl_dport_unlink, dport); in reap_dports()
1451 devm_release_action(&port->dev, cxl_dport_remove, dport); in reap_dports()
1452 devm_kfree(&port->dev, dport); in reap_dports()
1464 struct cxl_port *port; in port_has_memdev() local
1469 port = to_cxl_port(dev); in port_has_memdev()
1470 if (port->depth != ctx->depth) in port_has_memdev()
1473 return !!cxl_ep_load(port, ctx->cxlmd); in port_has_memdev()
1480 for (int i = cxlmd->depth - 1; i >= 1; i--) { in cxl_detach_ep()
1481 struct cxl_port *port, *parent_port; in cxl_detach_ep() local
1493 port = to_cxl_port(dev); in cxl_detach_ep()
1495 parent_port = to_cxl_port(port->dev.parent); in cxl_detach_ep()
1496 device_lock(&parent_port->dev); in cxl_detach_ep()
1497 device_lock(&port->dev); in cxl_detach_ep()
1498 ep = cxl_ep_load(port, cxlmd); in cxl_detach_ep()
1499 dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", in cxl_detach_ep()
1500 ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); in cxl_detach_ep()
1501 cxl_ep_remove(port, ep); in cxl_detach_ep()
1502 if (ep && !port->dead && xa_empty(&port->endpoints) && in cxl_detach_ep()
1503 !is_cxl_root(parent_port) && parent_port->dev.driver) { in cxl_detach_ep()
1506 * enumerated port. Block new cxl_add_ep() and garbage in cxl_detach_ep()
1507 * collect the port. in cxl_detach_ep()
1510 port->dead = true; in cxl_detach_ep()
1511 reap_dports(port); in cxl_detach_ep()
1513 device_unlock(&port->dev); in cxl_detach_ep()
1516 dev_dbg(&cxlmd->dev, "delete %s\n", in cxl_detach_ep()
1517 dev_name(&port->dev)); in cxl_detach_ep()
1518 delete_switch_port(port); in cxl_detach_ep()
1520 device_unlock(&parent_port->dev); in cxl_detach_ep()
1531 * non-PCI device, in practice, only cxl_test hits this case. in find_component_registers()
1554 * CXL-root 'cxl_port' on a previous iteration, fail for now to in add_port_attach_ep()
1555 * be re-probed after platform driver attaches. in add_port_attach_ep()
1557 dev_dbg(&cxlmd->dev, "%s is a root dport\n", in add_port_attach_ep()
1559 return -ENXIO; in add_port_attach_ep()
1566 return -EAGAIN; in add_port_attach_ep()
1571 * dereferencing the device of the port before the parent_port releasing. in add_port_attach_ep()
1573 struct cxl_port *port __free(put_cxl_port) = NULL; in add_port_attach_ep()
1574 scoped_guard(device, &parent_port->dev) { in add_port_attach_ep()
1575 if (!parent_port->dev.driver) { in add_port_attach_ep()
1576 dev_warn(&cxlmd->dev, in add_port_attach_ep()
1577 "port %s:%s disabled, failed to enumerate CXL.mem\n", in add_port_attach_ep()
1578 dev_name(&parent_port->dev), dev_name(uport_dev)); in add_port_attach_ep()
1579 return -ENXIO; in add_port_attach_ep()
1582 port = find_cxl_port_at(parent_port, dport_dev, &dport); in add_port_attach_ep()
1583 if (!port) { in add_port_attach_ep()
1585 port = devm_cxl_add_port(&parent_port->dev, uport_dev, in add_port_attach_ep()
1587 if (IS_ERR(port)) in add_port_attach_ep()
1588 return PTR_ERR(port); in add_port_attach_ep()
1591 port = find_cxl_port_at(parent_port, dport_dev, &dport); in add_port_attach_ep()
1592 if (!port) in add_port_attach_ep()
1593 return -ENXIO; in add_port_attach_ep()
1597 dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", in add_port_attach_ep()
1598 dev_name(&port->dev), dev_name(port->uport_dev)); in add_port_attach_ep()
1599 rc = cxl_add_ep(dport, &cxlmd->dev); in add_port_attach_ep()
1600 if (rc == -EBUSY) { in add_port_attach_ep()
1605 rc = -ENXIO; in add_port_attach_ep()
1613 struct device *dev = &cxlmd->dev; in devm_cxl_enumerate_ports()
1618 * Skip intermediate port enumeration in the RCH case, there in devm_cxl_enumerate_ports()
1619 * are no ports in between a host bridge and an endpoint. in devm_cxl_enumerate_ports()
1621 if (cxlmd->cxlds->rcd) in devm_cxl_enumerate_ports()
1624 rc = devm_add_action_or_reset(&cxlmd->dev, cxl_detach_ep, cxlmd); in devm_cxl_enumerate_ports()
1630 * Repeat until no more ports are added. Abort if a port add in devm_cxl_enumerate_ports()
1646 uport_dev = dport_dev->parent; in devm_cxl_enumerate_ports()
1650 return -ENXIO; in devm_cxl_enumerate_ports()
1656 struct cxl_port *port __free(put_cxl_port) = in devm_cxl_enumerate_ports()
1658 if (port) { in devm_cxl_enumerate_ports()
1659 dev_dbg(&cxlmd->dev, in devm_cxl_enumerate_ports()
1660 "found already registered port %s:%s\n", in devm_cxl_enumerate_ports()
1661 dev_name(&port->dev), in devm_cxl_enumerate_ports()
1662 dev_name(port->uport_dev)); in devm_cxl_enumerate_ports()
1663 rc = cxl_add_ep(dport, &cxlmd->dev); in devm_cxl_enumerate_ports()
1666 * If the endpoint already exists in the port's list, in devm_cxl_enumerate_ports()
1669 * the parent_port lock as the current port may be being in devm_cxl_enumerate_ports()
1672 if (rc && rc != -EBUSY) in devm_cxl_enumerate_ports()
1676 if (!dev_is_cxl_root_child(&port->dev)) in devm_cxl_enumerate_ports()
1683 /* port missing, try to add parent */ in devm_cxl_enumerate_ports()
1684 if (rc == -EAGAIN) in devm_cxl_enumerate_ports()
1686 /* failed to add ep or port */ in devm_cxl_enumerate_ports()
1689 /* port added, new descendants possible, start over */ in devm_cxl_enumerate_ports()
1700 return find_cxl_port(pdev->dev.parent, dport); in cxl_pci_find_port()
1707 return find_cxl_port(grandparent(&cxlmd->dev), dport); in cxl_mem_find_port()
1712 struct cxl_port *port, int *target_map) in decoder_populate_targets() argument
1719 device_lock_assert(&port->dev); in decoder_populate_targets()
1721 if (xa_empty(&port->dports)) in decoder_populate_targets()
1722 return -EINVAL; in decoder_populate_targets()
1725 for (i = 0; i < cxlsd->cxld.interleave_ways; i++) { in decoder_populate_targets()
1726 struct cxl_dport *dport = find_dport(port, target_map[i]); in decoder_populate_targets()
1729 return -ENXIO; in decoder_populate_targets()
1730 cxlsd->target[i] = dport; in decoder_populate_targets()
1739 * cxl_decoder_init - Common decoder setup / initialization
1740 * @port: owning port of this decoder
1743 * A port may contain one or more decoders. Each of those decoders
1748 static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) in cxl_decoder_init() argument
1753 rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); in cxl_decoder_init()
1758 get_device(&port->dev); in cxl_decoder_init()
1759 cxld->id = rc; in cxl_decoder_init()
1761 dev = &cxld->dev; in cxl_decoder_init()
1763 lockdep_set_class(&dev->mutex, &cxl_decoder_key); in cxl_decoder_init()
1765 dev->parent = &port->dev; in cxl_decoder_init()
1766 dev->bus = &cxl_bus_type; in cxl_decoder_init()
1769 cxld->interleave_ways = 1; in cxl_decoder_init()
1770 cxld->interleave_granularity = PAGE_SIZE; in cxl_decoder_init()
1771 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in cxl_decoder_init()
1772 cxld->hpa_range = (struct range) { in cxl_decoder_init()
1774 .end = -1, in cxl_decoder_init()
1780 static int cxl_switch_decoder_init(struct cxl_port *port, in cxl_switch_decoder_init() argument
1785 return -EINVAL; in cxl_switch_decoder_init()
1787 cxlsd->nr_targets = nr_targets; in cxl_switch_decoder_init()
1788 return cxl_decoder_init(port, &cxlsd->cxld); in cxl_switch_decoder_init()
1792 * cxl_root_decoder_alloc - Allocate a root level decoder
1793 * @port: owning CXL root of this decoder
1797 * 'CXL root' decoder is one that decodes from a top-level / static platform
1801 struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, in cxl_root_decoder_alloc() argument
1809 if (!is_cxl_root(port)) in cxl_root_decoder_alloc()
1810 return ERR_PTR(-EINVAL); in cxl_root_decoder_alloc()
1815 return ERR_PTR(-ENOMEM); in cxl_root_decoder_alloc()
1817 cxlsd = &cxlrd->cxlsd; in cxl_root_decoder_alloc()
1818 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); in cxl_root_decoder_alloc()
1824 mutex_init(&cxlrd->range_lock); in cxl_root_decoder_alloc()
1826 cxld = &cxlsd->cxld; in cxl_root_decoder_alloc()
1827 cxld->dev.type = &cxl_decoder_root_type; in cxl_root_decoder_alloc()
1832 atomic_set(&cxlrd->region_id, -1); in cxl_root_decoder_alloc()
1835 put_device(&cxld->dev); in cxl_root_decoder_alloc()
1839 atomic_set(&cxlrd->region_id, rc); in cxl_root_decoder_alloc()
1840 cxlrd->qos_class = CXL_QOS_CLASS_INVALID; in cxl_root_decoder_alloc()
1846 * cxl_switch_decoder_alloc - Allocate a switch level decoder
1847 * @port: owning CXL switch port of this decoder
1854 * Host Bridges / Root Ports.
1856 struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, in cxl_switch_decoder_alloc() argument
1863 if (is_cxl_root(port) || is_cxl_endpoint(port)) in cxl_switch_decoder_alloc()
1864 return ERR_PTR(-EINVAL); in cxl_switch_decoder_alloc()
1868 return ERR_PTR(-ENOMEM); in cxl_switch_decoder_alloc()
1870 rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); in cxl_switch_decoder_alloc()
1876 cxld = &cxlsd->cxld; in cxl_switch_decoder_alloc()
1877 cxld->dev.type = &cxl_decoder_switch_type; in cxl_switch_decoder_alloc()
1883 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
1884 * @port: owning port of this decoder
1888 struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) in cxl_endpoint_decoder_alloc() argument
1894 if (!is_cxl_endpoint(port)) in cxl_endpoint_decoder_alloc()
1895 return ERR_PTR(-EINVAL); in cxl_endpoint_decoder_alloc()
1899 return ERR_PTR(-ENOMEM); in cxl_endpoint_decoder_alloc()
1901 cxled->pos = -1; in cxl_endpoint_decoder_alloc()
1902 cxld = &cxled->cxld; in cxl_endpoint_decoder_alloc()
1903 rc = cxl_decoder_init(port, cxld); in cxl_endpoint_decoder_alloc()
1909 cxld->dev.type = &cxl_decoder_endpoint_type; in cxl_endpoint_decoder_alloc()
1915 * cxl_decoder_add_locked - Add a decoder with targets
1918 * traffic to. These numbers should correspond with the port number
1927 * Context: Process context. Expects the device lock of the port that owns the
1935 struct cxl_port *port; in cxl_decoder_add_locked() local
1940 return -EINVAL; in cxl_decoder_add_locked()
1945 if (cxld->interleave_ways < 1) in cxl_decoder_add_locked()
1946 return -EINVAL; in cxl_decoder_add_locked()
1948 dev = &cxld->dev; in cxl_decoder_add_locked()
1950 port = to_cxl_port(cxld->dev.parent); in cxl_decoder_add_locked()
1954 rc = decoder_populate_targets(cxlsd, port, target_map); in cxl_decoder_add_locked()
1955 if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { in cxl_decoder_add_locked()
1956 dev_err(&port->dev, in cxl_decoder_add_locked()
1962 rc = dev_set_name(dev, "decoder%d.%d", port->id, cxld->id); in cxl_decoder_add_locked()
1971 * cxl_decoder_add - Add a decoder with targets
1974 * traffic to. These numbers should correspond with the port number
1980 * Context: Process context. Takes and releases the device lock of the port that
1985 struct cxl_port *port; in cxl_decoder_add() local
1988 return -EINVAL; in cxl_decoder_add()
1993 port = to_cxl_port(cxld->dev.parent); in cxl_decoder_add()
1995 guard(device)(&port->dev); in cxl_decoder_add()
2012 int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld) in cxl_decoder_autoremove() argument
2014 return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev); in cxl_decoder_autoremove()
2019 * __cxl_driver_register - register a driver for the cxl bus
2027 if (!cxl_drv->probe) { in __cxl_driver_register()
2028 pr_debug("%s ->probe() must be specified\n", modname); in __cxl_driver_register()
2029 return -EINVAL; in __cxl_driver_register()
2032 if (!cxl_drv->name) { in __cxl_driver_register()
2033 pr_debug("%s ->name must be specified\n", modname); in __cxl_driver_register()
2034 return -EINVAL; in __cxl_driver_register()
2037 if (!cxl_drv->id) { in __cxl_driver_register()
2038 pr_debug("%s ->id must be specified\n", modname); in __cxl_driver_register()
2039 return -EINVAL; in __cxl_driver_register()
2042 cxl_drv->drv.bus = &cxl_bus_type; in __cxl_driver_register()
2043 cxl_drv->drv.owner = owner; in __cxl_driver_register()
2044 cxl_drv->drv.mod_name = modname; in __cxl_driver_register()
2045 cxl_drv->drv.name = cxl_drv->name; in __cxl_driver_register()
2047 return driver_register(&cxl_drv->drv); in __cxl_driver_register()
2053 driver_unregister(&cxl_drv->drv); in cxl_driver_unregister()
2065 return cxl_device_id(dev) == to_cxl_drv(drv)->id; in cxl_bus_match()
2072 rc = to_cxl_drv(dev->driver)->probe(dev); in cxl_bus_probe()
2079 struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver); in cxl_bus_remove()
2081 if (cxl_drv->remove) in cxl_bus_remove()
2082 cxl_drv->remove(dev); in cxl_bus_remove()
2117 return queue_work(cxl_bus_wq, &cxlmd->detach_work); in schedule_cxl_memdev_detach()
2156 static bool parent_port_is_cxl_root(struct cxl_port *port) in parent_port_is_cxl_root() argument
2158 return is_cxl_root(to_cxl_port(port->dev.parent)); in parent_port_is_cxl_root()
2162 * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
2164 * @port: endpoint cxl_port
2169 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, in cxl_endpoint_get_perf_coordinates() argument
2172 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); in cxl_endpoint_get_perf_coordinates()
2183 struct cxl_port *iter = port; in cxl_endpoint_get_perf_coordinates()
2190 if (!is_cxl_endpoint(port)) in cxl_endpoint_get_perf_coordinates()
2191 return -EINVAL; in cxl_endpoint_get_perf_coordinates()
2197 if (cxlmd->cxlds->rcd) in cxl_endpoint_get_perf_coordinates()
2201 * Exit the loop when the parent port of the current iter port is cxl in cxl_endpoint_get_perf_coordinates()
2203 * latency of the CXL link from the current device/port to the connected in cxl_endpoint_get_perf_coordinates()
2204 * downstream port each iteration. in cxl_endpoint_get_perf_coordinates()
2207 dport = iter->parent_dport; in cxl_endpoint_get_perf_coordinates()
2208 iter = to_cxl_port(iter->dev.parent); in cxl_endpoint_get_perf_coordinates()
2212 * There's no valid access_coordinate for a root port since RPs do not in cxl_endpoint_get_perf_coordinates()
2216 if (!coordinates_valid(dport->coord)) in cxl_endpoint_get_perf_coordinates()
2217 return -EINVAL; in cxl_endpoint_get_perf_coordinates()
2218 cxl_coordinates_combine(c, c, dport->coord); in cxl_endpoint_get_perf_coordinates()
2220 add_latency(c, dport->link_latency); in cxl_endpoint_get_perf_coordinates()
2223 dport = iter->parent_dport; in cxl_endpoint_get_perf_coordinates()
2225 if (!coordinates_valid(dport->coord)) in cxl_endpoint_get_perf_coordinates()
2226 return -EINVAL; in cxl_endpoint_get_perf_coordinates()
2227 cxl_coordinates_combine(c, c, dport->coord); in cxl_endpoint_get_perf_coordinates()
2229 dev = port->uport_dev->parent; in cxl_endpoint_get_perf_coordinates()
2231 return -ENODEV; in cxl_endpoint_get_perf_coordinates()
2237 return -ENXIO; in cxl_endpoint_get_perf_coordinates()
2247 int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port, in cxl_port_get_switch_dport_bandwidth() argument
2250 struct cxl_dport *dport = port->parent_dport; in cxl_port_get_switch_dport_bandwidth()
2252 /* Check this port is connected to a switch DSP and not an RP */ in cxl_port_get_switch_dport_bandwidth()
2253 if (parent_port_is_cxl_root(to_cxl_port(port->dev.parent))) in cxl_port_get_switch_dport_bandwidth()
2254 return -ENODEV; in cxl_port_get_switch_dport_bandwidth()
2256 if (!coordinates_valid(dport->coord)) in cxl_port_get_switch_dport_bandwidth()
2257 return -EINVAL; in cxl_port_get_switch_dport_bandwidth()
2260 c[i].read_bandwidth = dport->coord[i].read_bandwidth; in cxl_port_get_switch_dport_bandwidth()
2261 c[i].write_bandwidth = dport->coord[i].write_bandwidth; in cxl_port_get_switch_dport_bandwidth()
2267 /* for user tooling to ensure port disable work has completed */
2275 return -EINVAL; in flush_store()
2330 rc = -ENOMEM; in cxl_core_init()