Lines Matching +full:interleave +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/memory-tiers.h>
28 * 1. Interleave granularity
29 * 2. Interleave size
36 .attr = { .name = __stringify(_name), .mode = 0444 }, \
50 if (cxlr->coord[level].attrib == 0) \
51 return -ENOENT; \
53 return sysfs_emit(buf, "%u\n", cxlr->coord[level].attrib); \
94 cxlr->coord[level].read_latency == 0) \
98 cxlr->coord[level].write_latency == 0) \
102 cxlr->coord[level].read_bandwidth == 0) \
106 cxlr->coord[level].write_bandwidth == 0) \
109 return a->mode; \
141 struct cxl_region_params *p = &cxlr->params; in uuid_show()
147 if (cxlr->mode != CXL_DECODER_PMEM) in uuid_show()
150 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); in uuid_show()
167 p = &cxlr->params; in is_dup()
169 if (uuid_equal(&p->uuid, uuid)) { in is_dup()
171 return -EBUSY; in is_dup()
181 struct cxl_region_params *p = &cxlr->params; in uuid_store()
186 return -EINVAL; in uuid_store()
193 return -EINVAL; in uuid_store()
199 if (uuid_equal(&p->uuid, &temp)) in uuid_store()
202 rc = -EBUSY; in uuid_store()
203 if (p->state >= CXL_CONFIG_ACTIVE) in uuid_store()
210 uuid_copy(&p->uuid, &temp); in uuid_store()
223 return xa_load(&port->regions, (unsigned long)cxlr); in cxl_rr_load()
231 &cxlr->dev, in cxl_region_invalidate_memregion()
235 dev_WARN(&cxlr->dev, in cxl_region_invalidate_memregion()
237 return -ENXIO; in cxl_region_invalidate_memregion()
247 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_reset()
257 for (i = count - 1; i >= 0; i--) { in cxl_region_decode_reset()
258 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_reset()
261 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_region_decode_reset()
264 if (cxlds->rcd) in cxl_region_decode_reset()
267 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_decode_reset()
268 iter = to_cxl_port(iter->dev.parent); in cxl_region_decode_reset()
271 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_reset()
276 cxld = cxl_rr->decoder; in cxl_region_decode_reset()
277 if (cxld->reset) in cxl_region_decode_reset()
278 cxld->reset(cxld); in cxl_region_decode_reset()
279 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
283 cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_reset()
284 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
288 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); in cxl_region_decode_reset()
295 if (cxld->commit) in commit_decoder()
296 return cxld->commit(cxld); in commit_decoder()
298 if (is_switch_decoder(&cxld->dev)) in commit_decoder()
299 cxlsd = to_cxl_switch_decoder(&cxld->dev); in commit_decoder()
301 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1, in commit_decoder()
302 "->commit() is required\n")) in commit_decoder()
303 return -ENXIO; in commit_decoder()
309 struct cxl_region_params *p = &cxlr->params; in cxl_region_decode_commit()
312 for (i = 0; i < p->nr_targets; i++) { in cxl_region_decode_commit()
313 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_decode_commit()
322 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_decode_commit()
324 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
333 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_decode_commit()
335 cxld = cxl_rr->decoder; in cxl_region_decode_commit()
336 if (cxld->reset) in cxl_region_decode_commit()
337 cxld->reset(cxld); in cxl_region_decode_commit()
340 cxled->cxld.reset(&cxled->cxld); in cxl_region_decode_commit()
357 struct cxl_region_params *p = &cxlr->params; in commit_store()
370 if (commit && p->state >= CXL_CONFIG_COMMIT) in commit_store()
372 if (!commit && p->state < CXL_CONFIG_COMMIT) in commit_store()
376 if (commit && p->state < CXL_CONFIG_ACTIVE) { in commit_store()
377 rc = -ENXIO; in commit_store()
392 p->state = CXL_CONFIG_COMMIT; in commit_store()
394 p->state = CXL_CONFIG_RESET_PENDING; in commit_store()
396 device_release_driver(&cxlr->dev); in commit_store()
403 if (p->state == CXL_CONFIG_RESET_PENDING) { in commit_store()
404 cxl_region_decode_reset(cxlr, p->interleave_ways); in commit_store()
405 p->state = CXL_CONFIG_ACTIVE; in commit_store()
421 struct cxl_region_params *p = &cxlr->params; in commit_show()
427 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); in commit_show()
442 * regions regardless of mode. in cxl_region_visible()
444 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) in cxl_region_visible()
446 return a->mode; in cxl_region_visible()
453 struct cxl_region_params *p = &cxlr->params; in interleave_ways_show()
459 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); in interleave_ways_show()
471 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_ways_store()
472 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_ways_store()
474 struct cxl_region_params *p = &cxlr->params; in interleave_ways_store()
488 * Even for x3, x6, and x12 interleaves the region interleave must be a in interleave_ways_store()
489 * power of 2 multiple of the host bridge interleave. in interleave_ways_store()
491 if (!is_power_of_2(val / cxld->interleave_ways) || in interleave_ways_store()
492 (val % cxld->interleave_ways)) { in interleave_ways_store()
493 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); in interleave_ways_store()
494 return -EINVAL; in interleave_ways_store()
500 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_ways_store()
501 rc = -EBUSY; in interleave_ways_store()
505 save = p->interleave_ways; in interleave_ways_store()
506 p->interleave_ways = val; in interleave_ways_store()
507 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); in interleave_ways_store()
509 p->interleave_ways = save; in interleave_ways_store()
523 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_show()
529 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); in interleave_granularity_show()
539 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in interleave_granularity_store()
540 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in interleave_granularity_store()
542 struct cxl_region_params *p = &cxlr->params; in interleave_granularity_store()
555 * When the host-bridge is interleaved, disallow region granularity != in interleave_granularity_store()
557 * interleave result in needing multiple endpoints to support a single in interleave_granularity_store()
558 * slot in the interleave (possible to support in the future). Regions in interleave_granularity_store()
559 * with a granularity greater than the root interleave result in invalid in interleave_granularity_store()
562 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) in interleave_granularity_store()
563 return -EINVAL; in interleave_granularity_store()
568 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in interleave_granularity_store()
569 rc = -EBUSY; in interleave_granularity_store()
573 p->interleave_granularity = val; in interleave_granularity_store()
586 struct cxl_region_params *p = &cxlr->params; in resource_show()
587 u64 resource = -1ULL; in resource_show()
593 if (p->res) in resource_show()
594 resource = p->res->start; in resource_show()
607 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); in mode_show()
609 static DEVICE_ATTR_RO(mode);
613 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in alloc_hpa()
614 struct cxl_region_params *p = &cxlr->params; in alloc_hpa()
621 if (p->res && resource_size(p->res) == size) in alloc_hpa()
625 if (p->res) in alloc_hpa()
626 return -EBUSY; in alloc_hpa()
628 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) in alloc_hpa()
629 return -EBUSY; in alloc_hpa()
632 if (!p->interleave_ways || !p->interleave_granularity || in alloc_hpa()
633 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) in alloc_hpa()
634 return -ENXIO; in alloc_hpa()
636 div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder); in alloc_hpa()
638 return -EINVAL; in alloc_hpa()
640 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, in alloc_hpa()
641 dev_name(&cxlr->dev)); in alloc_hpa()
643 dev_dbg(&cxlr->dev, in alloc_hpa()
645 PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res); in alloc_hpa()
649 p->res = res; in alloc_hpa()
650 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in alloc_hpa()
657 struct cxl_region_params *p = &cxlr->params; in cxl_region_iomem_release()
659 if (device_is_registered(&cxlr->dev)) in cxl_region_iomem_release()
661 if (p->res) { in cxl_region_iomem_release()
666 if (p->res->parent) in cxl_region_iomem_release()
667 remove_resource(p->res); in cxl_region_iomem_release()
668 kfree(p->res); in cxl_region_iomem_release()
669 p->res = NULL; in cxl_region_iomem_release()
675 struct cxl_region_params *p = &cxlr->params; in free_hpa()
679 if (!p->res) in free_hpa()
682 if (p->state >= CXL_CONFIG_ACTIVE) in free_hpa()
683 return -EBUSY; in free_hpa()
686 p->state = CXL_CONFIG_IDLE; in free_hpa()
721 struct cxl_region_params *p = &cxlr->params; in size_show()
728 if (p->res) in size_show()
729 size = resource_size(p->res); in size_show()
755 struct cxl_region_params *p = &cxlr->params; in show_targetN()
763 if (pos >= p->interleave_ways) { in show_targetN()
764 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in show_targetN()
765 p->interleave_ways); in show_targetN()
766 rc = -ENXIO; in show_targetN()
770 cxled = p->targets[pos]; in show_targetN()
774 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); in show_targetN()
786 * if port->commit_end is not the only free decoder, then out of in check_commit_order()
790 if (((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) in check_commit_order()
791 return -EBUSY; in check_commit_order()
797 struct cxl_port *port = to_cxl_port(dev->parent); in match_free_decoder()
806 if (cxld->id != port->commit_end + 1) in match_free_decoder()
809 if (cxld->region) { in match_free_decoder()
810 dev_dbg(dev->parent, in match_free_decoder()
812 dev_name(dev), dev_name(&cxld->region->dev)); in match_free_decoder()
816 rc = device_for_each_child_reverse_from(dev->parent, dev, NULL, in match_free_decoder()
819 dev_dbg(dev->parent, in match_free_decoder()
837 r = &cxld->hpa_range; in match_auto_decoder()
839 if (p->res && p->res->start == r->start && p->res->end == r->end) in match_auto_decoder()
853 return &cxled->cxld; in cxl_region_find_decoder()
855 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) in cxl_region_find_decoder()
856 dev = device_find_child(&port->dev, &cxlr->params, in cxl_region_find_decoder()
859 dev = device_find_child(&port->dev, NULL, match_free_decoder); in cxl_region_find_decoder()
876 struct cxl_decoder *cxld_iter = rr->decoder; in auto_order_ok()
879 * Allow the out of order assembly of auto-discovered regions. in auto_order_ok()
884 dev_dbg(&cxld->dev, "check for HPA violation %s:%d < %s:%d\n", in auto_order_ok()
885 dev_name(&cxld->dev), cxld->id, in auto_order_ok()
886 dev_name(&cxld_iter->dev), cxld_iter->id); in auto_order_ok()
888 if (cxld_iter->id > cxld->id) in auto_order_ok()
898 struct cxl_region_params *p = &cxlr->params; in alloc_region_ref()
903 xa_for_each(&port->regions, index, iter) { in alloc_region_ref()
904 struct cxl_region_params *ip = &iter->region->params; in alloc_region_ref()
906 if (!ip->res || ip->res->start < p->res->start) in alloc_region_ref()
909 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in alloc_region_ref()
913 if (auto_order_ok(port, iter->region, cxld)) in alloc_region_ref()
916 dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n", in alloc_region_ref()
917 dev_name(&port->dev), in alloc_region_ref()
918 dev_name(&iter->region->dev), ip->res, p->res); in alloc_region_ref()
920 return ERR_PTR(-EBUSY); in alloc_region_ref()
925 return ERR_PTR(-ENOMEM); in alloc_region_ref()
926 cxl_rr->port = port; in alloc_region_ref()
927 cxl_rr->region = cxlr; in alloc_region_ref()
928 cxl_rr->nr_targets = 1; in alloc_region_ref()
929 xa_init(&cxl_rr->endpoints); in alloc_region_ref()
931 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); in alloc_region_ref()
933 dev_dbg(&cxlr->dev, in alloc_region_ref()
935 dev_name(&port->dev), rc); in alloc_region_ref()
945 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_free_decoder()
946 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_free_decoder()
951 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); in cxl_rr_free_decoder()
952 if (cxld->region == cxlr) { in cxl_rr_free_decoder()
953 cxld->region = NULL; in cxl_rr_free_decoder()
954 put_device(&cxlr->dev); in cxl_rr_free_decoder()
960 struct cxl_port *port = cxl_rr->port; in free_region_ref()
961 struct cxl_region *cxlr = cxl_rr->region; in free_region_ref()
964 xa_erase(&port->regions, (unsigned long)cxlr); in free_region_ref()
965 xa_destroy(&cxl_rr->endpoints); in free_region_ref()
973 struct cxl_port *port = cxl_rr->port; in cxl_rr_ep_add()
974 struct cxl_region *cxlr = cxl_rr->region; in cxl_rr_ep_add()
975 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_rr_ep_add()
979 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, in cxl_rr_ep_add()
984 cxl_rr->nr_eps++; in cxl_rr_ep_add()
986 if (!cxld->region) { in cxl_rr_ep_add()
987 cxld->region = cxlr; in cxl_rr_ep_add()
988 get_device(&cxlr->dev); in cxl_rr_ep_add()
1002 dev_dbg(&cxlr->dev, "%s: no decoder available\n", in cxl_rr_alloc_decoder()
1003 dev_name(&port->dev)); in cxl_rr_alloc_decoder()
1004 return -EBUSY; in cxl_rr_alloc_decoder()
1007 if (cxld->region) { in cxl_rr_alloc_decoder()
1008 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", in cxl_rr_alloc_decoder()
1009 dev_name(&port->dev), dev_name(&cxld->dev), in cxl_rr_alloc_decoder()
1010 dev_name(&cxld->region->dev)); in cxl_rr_alloc_decoder()
1011 return -EBUSY; in cxl_rr_alloc_decoder()
1016 * assumption with an assertion. Switch-decoders change mapping-type in cxl_rr_alloc_decoder()
1019 dev_WARN_ONCE(&cxlr->dev, in cxl_rr_alloc_decoder()
1021 cxld->target_type != cxlr->type, in cxl_rr_alloc_decoder()
1022 "%s:%s mismatch decoder type %d -> %d\n", in cxl_rr_alloc_decoder()
1023 dev_name(&cxled_to_memdev(cxled)->dev), in cxl_rr_alloc_decoder()
1024 dev_name(&cxld->dev), cxld->target_type, cxlr->type); in cxl_rr_alloc_decoder()
1025 cxld->target_type = cxlr->type; in cxl_rr_alloc_decoder()
1026 cxl_rr->decoder = cxld; in cxl_rr_alloc_decoder()
1031 * cxl_port_attach_region() - track a region's interest in a port by endpoint
1035 * @pos: interleave position of @cxled in @cxlr
1043 * - validate that there are no other regions with a higher HPA already
1045 * - establish a region reference if one is not already present
1047 * - additionally allocate a decoder instance that will host @cxlr on
1050 * - pin the region reference by the endpoint
1051 * - account for how many entries in @port's target list are needed to
1064 int rc = -EBUSY; in cxl_port_attach_region()
1079 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_attach_region()
1082 if (ep_iter->next == ep->next) { in cxl_port_attach_region()
1092 if (!found || !ep->next) { in cxl_port_attach_region()
1093 cxl_rr->nr_targets++; in cxl_port_attach_region()
1099 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1101 dev_name(&port->dev)); in cxl_port_attach_region()
1110 cxld = cxl_rr->decoder; in cxl_port_attach_region()
1116 if (is_switch_decoder(&cxld->dev)) { in cxl_port_attach_region()
1119 cxlsd = to_cxl_switch_decoder(&cxld->dev); in cxl_port_attach_region()
1120 if (cxl_rr->nr_targets > cxlsd->nr_targets) { in cxl_port_attach_region()
1121 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1123 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_attach_region()
1124 dev_name(&cxld->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1125 dev_name(&cxled->cxld.dev), pos, in cxl_port_attach_region()
1126 cxlsd->nr_targets); in cxl_port_attach_region()
1127 rc = -ENXIO; in cxl_port_attach_region()
1134 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1136 dev_name(&port->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1137 dev_name(&cxld->dev)); in cxl_port_attach_region()
1141 dev_dbg(&cxlr->dev, in cxl_port_attach_region()
1143 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_attach_region()
1144 dev_name(&cxld->dev), dev_name(&cxlmd->dev), in cxl_port_attach_region()
1145 dev_name(&cxled->cxld.dev), pos, in cxl_port_attach_region()
1146 ep ? ep->next ? dev_name(ep->next->uport_dev) : in cxl_port_attach_region()
1147 dev_name(&cxlmd->dev) : in cxl_port_attach_region()
1149 cxl_rr->nr_eps, cxl_rr->nr_targets); in cxl_port_attach_region()
1154 cxl_rr->nr_targets--; in cxl_port_attach_region()
1155 if (cxl_rr->nr_eps == 0) in cxl_port_attach_region()
1177 if (cxl_rr->decoder == &cxled->cxld) in cxl_port_detach_region()
1178 cxl_rr->nr_eps--; in cxl_port_detach_region()
1180 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); in cxl_port_detach_region()
1186 cxl_rr->nr_eps--; in cxl_port_detach_region()
1187 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { in cxl_port_detach_region()
1188 if (ep_iter->next == ep->next) { in cxl_port_detach_region()
1194 cxl_rr->nr_targets--; in cxl_port_detach_region()
1197 if (cxl_rr->nr_eps == 0) in cxl_port_detach_region()
1206 struct cxl_region *cxlr = cxl_rr->region; in check_last_peer()
1207 struct cxl_region_params *p = &cxlr->params; in check_last_peer()
1209 struct cxl_port *port = cxl_rr->port; in check_last_peer()
1212 int pos = cxled->pos; in check_last_peer()
1216 * then that endpoint, at index 'position - distance', must also be in check_last_peer()
1220 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", in check_last_peer()
1221 dev_name(port->uport_dev), dev_name(&port->dev), in check_last_peer()
1222 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in check_last_peer()
1223 return -ENXIO; in check_last_peer()
1225 cxled_peer = p->targets[pos - distance]; in check_last_peer()
1228 if (ep->dport != ep_peer->dport) { in check_last_peer()
1229 dev_dbg(&cxlr->dev, in check_last_peer()
1231 dev_name(port->uport_dev), dev_name(&port->dev), in check_last_peer()
1232 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, in check_last_peer()
1233 dev_name(&cxlmd_peer->dev), in check_last_peer()
1234 dev_name(&cxled_peer->cxld.dev)); in check_last_peer()
1235 return -ENXIO; in check_last_peer()
1243 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in check_interleave_cap()
1244 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in check_interleave_cap()
1250 if (!test_bit(iw, &cxlhdm->iw_cap_mask)) in check_interleave_cap()
1251 return -ENXIO; in check_interleave_cap()
1259 * interleave bits are none. in check_interleave_cap()
1266 * interleave bits are none. in check_interleave_cap()
1274 high_pos = eiw + eig - 1; in check_interleave_cap()
1279 if (interleave_mask & ~cxlhdm->interleave_mask) in check_interleave_cap()
1280 return -ENXIO; in check_interleave_cap()
1289 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_port_setup_targets()
1290 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; in cxl_port_setup_targets()
1291 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); in cxl_port_setup_targets()
1295 struct cxl_region_params *p = &cxlr->params; in cxl_port_setup_targets()
1296 struct cxl_decoder *cxld = cxl_rr->decoder; in cxl_port_setup_targets()
1305 if (!is_power_of_2(cxl_rr->nr_targets)) { in cxl_port_setup_targets()
1306 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", in cxl_port_setup_targets()
1307 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1308 cxl_rr->nr_targets); in cxl_port_setup_targets()
1309 return -EINVAL; in cxl_port_setup_targets()
1312 cxlsd = to_cxl_switch_decoder(&cxld->dev); in cxl_port_setup_targets()
1313 if (cxl_rr->nr_targets_set) { in cxl_port_setup_targets()
1320 if (cxl_rr->nr_targets == 1) in cxl_port_setup_targets()
1323 distance = p->nr_targets / cxl_rr->nr_targets; in cxl_port_setup_targets()
1324 for (i = 0; i < cxl_rr->nr_targets_set; i++) in cxl_port_setup_targets()
1325 if (ep->dport == cxlsd->target[i]) { in cxl_port_setup_targets()
1340 * does not allow interleaved host-bridges with in cxl_port_setup_targets()
1343 parent_ig = p->interleave_granularity; in cxl_port_setup_targets()
1344 parent_iw = cxlrd->cxlsd.cxld.interleave_ways; in cxl_port_setup_targets()
1346 * For purposes of address bit routing, use power-of-2 math for in cxl_port_setup_targets()
1356 parent_cxld = parent_rr->decoder; in cxl_port_setup_targets()
1357 parent_ig = parent_cxld->interleave_granularity; in cxl_port_setup_targets()
1358 parent_iw = parent_cxld->interleave_ways; in cxl_port_setup_targets()
1363 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", in cxl_port_setup_targets()
1364 dev_name(parent_port->uport_dev), in cxl_port_setup_targets()
1365 dev_name(&parent_port->dev), parent_ig); in cxl_port_setup_targets()
1371 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", in cxl_port_setup_targets()
1372 dev_name(parent_port->uport_dev), in cxl_port_setup_targets()
1373 dev_name(&parent_port->dev), parent_iw); in cxl_port_setup_targets()
1377 iw = cxl_rr->nr_targets; in cxl_port_setup_targets()
1380 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", in cxl_port_setup_targets()
1381 dev_name(port->uport_dev), dev_name(&port->dev), iw); in cxl_port_setup_targets()
1386 * Interleave granularity is a multiple of @parent_port granularity. in cxl_port_setup_targets()
1387 * Multiplier is the parent port interleave ways. in cxl_port_setup_targets()
1391 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1393 dev_name(&parent_port->dev), parent_ig, parent_iw); in cxl_port_setup_targets()
1399 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", in cxl_port_setup_targets()
1400 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1405 if (iw > 8 || iw > cxlsd->nr_targets) { in cxl_port_setup_targets()
1406 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1408 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1409 dev_name(&cxld->dev), iw, cxlsd->nr_targets); in cxl_port_setup_targets()
1410 return -ENXIO; in cxl_port_setup_targets()
1413 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_port_setup_targets()
1414 if (cxld->interleave_ways != iw || in cxl_port_setup_targets()
1415 cxld->interleave_granularity != ig || in cxl_port_setup_targets()
1416 cxld->hpa_range.start != p->res->start || in cxl_port_setup_targets()
1417 cxld->hpa_range.end != p->res->end || in cxl_port_setup_targets()
1418 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { in cxl_port_setup_targets()
1419 dev_err(&cxlr->dev, in cxl_port_setup_targets()
1421 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1422 __func__, iw, ig, p->res); in cxl_port_setup_targets()
1423 dev_err(&cxlr->dev, in cxl_port_setup_targets()
1425 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1426 __func__, cxld->interleave_ways, in cxl_port_setup_targets()
1427 cxld->interleave_granularity, in cxl_port_setup_targets()
1428 (cxld->flags & CXL_DECODER_F_ENABLE) ? in cxl_port_setup_targets()
1431 cxld->hpa_range.start, cxld->hpa_range.end); in cxl_port_setup_targets()
1432 return -ENXIO; in cxl_port_setup_targets()
1437 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1439 dev_name(port->uport_dev), in cxl_port_setup_targets()
1440 dev_name(&port->dev), iw, ig); in cxl_port_setup_targets()
1444 cxld->interleave_ways = iw; in cxl_port_setup_targets()
1445 cxld->interleave_granularity = ig; in cxl_port_setup_targets()
1446 cxld->hpa_range = (struct range) { in cxl_port_setup_targets()
1447 .start = p->res->start, in cxl_port_setup_targets()
1448 .end = p->res->end, in cxl_port_setup_targets()
1451 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), in cxl_port_setup_targets()
1452 dev_name(&port->dev), iw, ig); in cxl_port_setup_targets()
1454 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { in cxl_port_setup_targets()
1455 dev_dbg(&cxlr->dev, in cxl_port_setup_targets()
1457 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1458 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1459 return -ENXIO; in cxl_port_setup_targets()
1461 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_port_setup_targets()
1462 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) { in cxl_port_setup_targets()
1463 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n", in cxl_port_setup_targets()
1464 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1465 dev_name(&cxlsd->cxld.dev), in cxl_port_setup_targets()
1466 dev_name(ep->dport->dport_dev), in cxl_port_setup_targets()
1467 cxl_rr->nr_targets_set); in cxl_port_setup_targets()
1468 return -ENXIO; in cxl_port_setup_targets()
1471 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; in cxl_port_setup_targets()
1474 cxl_rr->nr_targets_set += inc; in cxl_port_setup_targets()
1475 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", in cxl_port_setup_targets()
1476 dev_name(port->uport_dev), dev_name(&port->dev), in cxl_port_setup_targets()
1477 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev), in cxl_port_setup_targets()
1478 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); in cxl_port_setup_targets()
1495 cxl_rr->nr_targets_set = 0; in cxl_port_reset_targets()
1497 cxld = cxl_rr->decoder; in cxl_port_reset_targets()
1498 cxld->hpa_range = (struct range) { in cxl_port_reset_targets()
1500 .end = -1, in cxl_port_reset_targets()
1506 struct cxl_region_params *p = &cxlr->params; in cxl_region_teardown_targets()
1515 * In the auto-discovery case skip automatic teardown since the in cxl_region_teardown_targets()
1518 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) in cxl_region_teardown_targets()
1521 for (i = 0; i < p->nr_targets; i++) { in cxl_region_teardown_targets()
1522 cxled = p->targets[i]; in cxl_region_teardown_targets()
1524 cxlds = cxlmd->cxlds; in cxl_region_teardown_targets()
1526 if (cxlds->rcd) in cxl_region_teardown_targets()
1530 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_teardown_targets()
1531 iter = to_cxl_port(iter->dev.parent); in cxl_region_teardown_targets()
1534 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) in cxl_region_teardown_targets()
1541 struct cxl_region_params *p = &cxlr->params; in cxl_region_setup_targets()
1549 for (i = 0; i < p->nr_targets; i++) { in cxl_region_setup_targets()
1550 cxled = p->targets[i]; in cxl_region_setup_targets()
1552 cxlds = cxlmd->cxlds; in cxl_region_setup_targets()
1555 if (!cxlds->rcd) { in cxl_region_setup_targets()
1563 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) in cxl_region_setup_targets()
1564 iter = to_cxl_port(iter->dev.parent); in cxl_region_setup_targets()
1571 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { in cxl_region_setup_targets()
1581 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n"); in cxl_region_setup_targets()
1583 return -ENXIO; in cxl_region_setup_targets()
1594 struct cxl_region_params *p = &cxlr->params; in cxl_region_validate_position()
1597 if (pos < 0 || pos >= p->interleave_ways) { in cxl_region_validate_position()
1598 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in cxl_region_validate_position()
1599 p->interleave_ways); in cxl_region_validate_position()
1600 return -ENXIO; in cxl_region_validate_position()
1603 if (p->targets[pos] == cxled) in cxl_region_validate_position()
1606 if (p->targets[pos]) { in cxl_region_validate_position()
1607 struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; in cxl_region_validate_position()
1610 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", in cxl_region_validate_position()
1611 pos, dev_name(&cxlmd_target->dev), in cxl_region_validate_position()
1612 dev_name(&cxled_target->cxld.dev)); in cxl_region_validate_position()
1613 return -EBUSY; in cxl_region_validate_position()
1616 for (i = 0; i < p->interleave_ways; i++) { in cxl_region_validate_position()
1620 cxled_target = p->targets[i]; in cxl_region_validate_position()
1626 dev_dbg(&cxlr->dev, in cxl_region_validate_position()
1628 dev_name(&cxlmd->dev), pos, in cxl_region_validate_position()
1629 dev_name(&cxled_target->cxld.dev)); in cxl_region_validate_position()
1630 return -EBUSY; in cxl_region_validate_position()
1643 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; in cxl_region_attach_position()
1644 struct cxl_decoder *cxld = &cxlsd->cxld; in cxl_region_attach_position()
1645 int iw = cxld->interleave_ways; in cxl_region_attach_position()
1649 if (dport != cxlrd->cxlsd.target[pos % iw]) { in cxl_region_attach_position()
1650 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", in cxl_region_attach_position()
1651 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach_position()
1652 dev_name(&cxlrd->cxlsd.cxld.dev)); in cxl_region_attach_position()
1653 return -ENXIO; in cxl_region_attach_position()
1657 iter = to_cxl_port(iter->dev.parent)) { in cxl_region_attach_position()
1667 iter = to_cxl_port(iter->dev.parent)) in cxl_region_attach_position()
1675 struct cxl_region_params *p = &cxlr->params; in cxl_region_attach_auto()
1677 if (cxled->state != CXL_DECODER_STATE_AUTO) { in cxl_region_attach_auto()
1678 dev_err(&cxlr->dev, in cxl_region_attach_auto()
1680 dev_name(&cxled->cxld.dev)); in cxl_region_attach_auto()
1681 return -EINVAL; in cxl_region_attach_auto()
1685 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n", in cxl_region_attach_auto()
1686 dev_name(&cxled->cxld.dev), pos); in cxl_region_attach_auto()
1687 return -EINVAL; in cxl_region_attach_auto()
1690 if (p->nr_targets >= p->interleave_ways) { in cxl_region_attach_auto()
1691 dev_err(&cxlr->dev, "%s: no more target slots available\n", in cxl_region_attach_auto()
1692 dev_name(&cxled->cxld.dev)); in cxl_region_attach_auto()
1693 return -ENXIO; in cxl_region_attach_auto()
1702 pos = p->nr_targets; in cxl_region_attach_auto()
1703 p->targets[pos] = cxled; in cxl_region_attach_auto()
1704 cxled->pos = pos; in cxl_region_attach_auto()
1705 p->nr_targets++; in cxl_region_attach_auto()
1715 return cxled_a->pos - cxled_b->pos; in cmp_interleave_pos()
1720 if (!port->parent_dport) in next_port()
1722 return port->parent_dport->port; in next_port()
1734 r1 = &cxlsd->cxld.hpa_range; in match_switch_decoder_by_range()
1738 return (r1->start == r2->start && r1->end == r2->end); in match_switch_decoder_by_range()
1747 int rc = -ENXIO; in find_pos_and_ways()
1753 dev = device_find_child(&parent->dev, range, in find_pos_and_ways()
1756 dev_err(port->uport_dev, in find_pos_and_ways()
1757 "failed to find decoder mapping %#llx-%#llx\n", in find_pos_and_ways()
1758 range->start, range->end); in find_pos_and_ways()
1762 *ways = cxlsd->cxld.interleave_ways; in find_pos_and_ways()
1765 if (cxlsd->target[i] == port->parent_dport) { in find_pos_and_ways()
1777 * cxl_calc_interleave_pos() - calculate an endpoint position in a region
1789 * -ENXIO on failure
1795 struct range *range = &cxled->cxld.hpa_range; in cxl_calc_interleave_pos()
1800 * Example: the expected interleave order of the 4-way region shown in cxl_calc_interleave_pos()
1810 * uses the mem position in the host-bridge and the ways of the host- in cxl_calc_interleave_pos()
1812 * iteration uses the host-bridge position in the root_port and the ways in cxl_calc_interleave_pos()
1840 dev_dbg(&cxlmd->dev, in cxl_calc_interleave_pos()
1841 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n", in cxl_calc_interleave_pos()
1842 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent), in cxl_calc_interleave_pos()
1843 dev_name(&port->dev), range->start, range->end, pos); in cxl_calc_interleave_pos()
1850 struct cxl_region_params *p = &cxlr->params; in cxl_region_sort_targets()
1853 for (i = 0; i < p->nr_targets; i++) { in cxl_region_sort_targets()
1854 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_sort_targets()
1856 cxled->pos = cxl_calc_interleave_pos(cxled); in cxl_region_sort_targets()
1859 * cxled->pos so that follow-on code paths can reliably in cxl_region_sort_targets()
1860 * do p->targets[cxled->pos] to self-reference their entry. in cxl_region_sort_targets()
1862 if (cxled->pos < 0) in cxl_region_sort_targets()
1863 rc = -ENXIO; in cxl_region_sort_targets()
1865 /* Keep the cxlr target list in interleave position order */ in cxl_region_sort_targets()
1866 sort(p->targets, p->nr_targets, sizeof(p->targets[0]), in cxl_region_sort_targets()
1869 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful"); in cxl_region_sort_targets()
1876 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_region_attach()
1878 struct cxl_region_params *p = &cxlr->params; in cxl_region_attach()
1881 int rc = -ENXIO; in cxl_region_attach()
1883 rc = check_interleave_cap(&cxled->cxld, p->interleave_ways, in cxl_region_attach()
1884 p->interleave_granularity); in cxl_region_attach()
1886 dev_dbg(&cxlr->dev, "%s iw: %d ig: %d is not supported\n", in cxl_region_attach()
1887 dev_name(&cxled->cxld.dev), p->interleave_ways, in cxl_region_attach()
1888 p->interleave_granularity); in cxl_region_attach()
1892 if (cxled->mode != cxlr->mode) { in cxl_region_attach()
1893 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", in cxl_region_attach()
1894 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); in cxl_region_attach()
1895 return -EINVAL; in cxl_region_attach()
1898 if (cxled->mode == CXL_DECODER_DEAD) { in cxl_region_attach()
1899 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1900 return -ENODEV; in cxl_region_attach()
1903 /* all full of members, or interleave config not established? */ in cxl_region_attach()
1904 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1905 dev_dbg(&cxlr->dev, "region already active\n"); in cxl_region_attach()
1906 return -EBUSY; in cxl_region_attach()
1907 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { in cxl_region_attach()
1908 dev_dbg(&cxlr->dev, "interleave config missing\n"); in cxl_region_attach()
1909 return -ENXIO; in cxl_region_attach()
1912 if (p->nr_targets >= p->interleave_ways) { in cxl_region_attach()
1913 dev_dbg(&cxlr->dev, "region already has %d endpoints\n", in cxl_region_attach()
1914 p->nr_targets); in cxl_region_attach()
1915 return -EINVAL; in cxl_region_attach()
1920 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); in cxl_region_attach()
1922 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", in cxl_region_attach()
1923 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1924 dev_name(cxlr->dev.parent)); in cxl_region_attach()
1925 return -ENXIO; in cxl_region_attach()
1928 if (cxled->cxld.target_type != cxlr->type) { in cxl_region_attach()
1929 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", in cxl_region_attach()
1930 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1931 cxled->cxld.target_type, cxlr->type); in cxl_region_attach()
1932 return -ENXIO; in cxl_region_attach()
1935 if (!cxled->dpa_res) { in cxl_region_attach()
1936 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", in cxl_region_attach()
1937 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); in cxl_region_attach()
1938 return -ENXIO; in cxl_region_attach()
1941 if (resource_size(cxled->dpa_res) * p->interleave_ways != in cxl_region_attach()
1942 resource_size(p->res)) { in cxl_region_attach()
1943 dev_dbg(&cxlr->dev, in cxl_region_attach()
1944 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", in cxl_region_attach()
1945 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_attach()
1946 (u64)resource_size(cxled->dpa_res), p->interleave_ways, in cxl_region_attach()
1947 (u64)resource_size(p->res)); in cxl_region_attach()
1948 return -EINVAL; in cxl_region_attach()
1953 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { in cxl_region_attach()
1961 if (p->nr_targets < p->interleave_ways) in cxl_region_attach()
1973 for (i = 0; i < p->nr_targets; i++) { in cxl_region_attach()
1974 cxled = p->targets[i]; in cxl_region_attach()
1977 ep_port->host_bridge); in cxl_region_attach()
1992 p->state = CXL_CONFIG_COMMIT; in cxl_region_attach()
2006 p->targets[pos] = cxled; in cxl_region_attach()
2007 cxled->pos = pos; in cxl_region_attach()
2008 p->nr_targets++; in cxl_region_attach()
2010 if (p->nr_targets == p->interleave_ways) { in cxl_region_attach()
2014 p->state = CXL_CONFIG_ACTIVE; in cxl_region_attach()
2018 cxled->cxld.interleave_ways = p->interleave_ways; in cxl_region_attach()
2019 cxled->cxld.interleave_granularity = p->interleave_granularity; in cxl_region_attach()
2020 cxled->cxld.hpa_range = (struct range) { in cxl_region_attach()
2021 .start = p->res->start, in cxl_region_attach()
2022 .end = p->res->end, in cxl_region_attach()
2025 if (p->nr_targets != p->interleave_ways) in cxl_region_attach()
2029 * Test the auto-discovery position calculator function in cxl_region_attach()
2030 * against this successfully created user-defined region. in cxl_region_attach()
2031 * A fail message here means that this interleave config in cxl_region_attach()
2034 for (int i = 0; i < p->nr_targets; i++) { in cxl_region_attach()
2035 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_region_attach()
2039 dev_dbg(&cxled->cxld.dev, in cxl_region_attach()
2040 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n", in cxl_region_attach()
2041 (test_pos == cxled->pos) ? "success" : "fail", in cxl_region_attach()
2042 test_pos, cxled->pos); in cxl_region_attach()
2051 struct cxl_region *cxlr = cxled->cxld.region; in cxl_region_detach()
2060 p = &cxlr->params; in cxl_region_detach()
2061 get_device(&cxlr->dev); in cxl_region_detach()
2063 if (p->state > CXL_CONFIG_ACTIVE) { in cxl_region_detach()
2064 cxl_region_decode_reset(cxlr, p->interleave_ways); in cxl_region_detach()
2065 p->state = CXL_CONFIG_ACTIVE; in cxl_region_detach()
2069 iter = to_cxl_port(iter->dev.parent)) in cxl_region_detach()
2072 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || in cxl_region_detach()
2073 p->targets[cxled->pos] != cxled) { in cxl_region_detach()
2076 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", in cxl_region_detach()
2077 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in cxl_region_detach()
2078 cxled->pos); in cxl_region_detach()
2082 if (p->state == CXL_CONFIG_ACTIVE) { in cxl_region_detach()
2083 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in cxl_region_detach()
2086 p->targets[cxled->pos] = NULL; in cxl_region_detach()
2087 p->nr_targets--; in cxl_region_detach()
2088 cxled->cxld.hpa_range = (struct range) { in cxl_region_detach()
2090 .end = -1, in cxl_region_detach()
2095 device_release_driver(&cxlr->dev); in cxl_region_detach()
2098 put_device(&cxlr->dev); in cxl_region_detach()
2105 cxled->mode = CXL_DECODER_DEAD; in cxl_decoder_kill_region()
2132 struct cxl_region_params *p = &cxlr->params; in detach_target()
2139 if (pos >= p->interleave_ways) { in detach_target()
2140 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, in detach_target()
2141 p->interleave_ways); in detach_target()
2142 rc = -ENXIO; in detach_target()
2146 if (!p->targets[pos]) { in detach_target()
2151 rc = cxl_region_detach(p->targets[pos]); in detach_target()
2169 return -ENODEV; in store_targetN()
2172 rc = -EINVAL; in store_targetN()
2243 struct cxl_region_params *p = &cxlr->params; in cxl_region_target_visible()
2245 if (n < p->interleave_ways) in cxl_region_target_visible()
2246 return a->mode; in cxl_region_target_visible()
2271 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); in cxl_region_release()
2273 int id = atomic_read(&cxlrd->region_id); in cxl_region_release()
2280 if (cxlr->id < id) in cxl_region_release()
2281 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { in cxl_region_release()
2286 memregion_free(cxlr->id); in cxl_region_release()
2288 put_device(dev->parent); in cxl_region_release()
2300 return dev->type == &cxl_region_type; in is_cxl_region()
2306 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, in to_cxl_region()
2316 struct cxl_region_params *p = &cxlr->params; in unregister_region()
2319 device_del(&cxlr->dev); in unregister_region()
2323 * read-only, so no need to hold the region rwsem to access the in unregister_region()
2326 for (i = 0; i < p->interleave_ways; i++) in unregister_region()
2330 put_device(&cxlr->dev); in unregister_region()
2343 return ERR_PTR(-ENOMEM); in cxl_region_alloc()
2346 dev = &cxlr->dev; in cxl_region_alloc()
2348 lockdep_set_class(&dev->mutex, &cxl_region_key); in cxl_region_alloc()
2349 dev->parent = &cxlrd->cxlsd.cxld.dev; in cxl_region_alloc()
2354 get_device(dev->parent); in cxl_region_alloc()
2356 dev->bus = &cxl_bus_type; in cxl_region_alloc()
2357 dev->type = &cxl_region_type; in cxl_region_alloc()
2358 cxlr->id = id; in cxl_region_alloc()
2369 if (cxlr->coord[i].read_bandwidth) { in cxl_region_update_coordinates()
2372 node_set_perf_attrs(nid, &cxlr->coord[i], i); in cxl_region_update_coordinates()
2384 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access0_group()); in cxl_region_update_coordinates()
2386 dev_dbg(&cxlr->dev, "Failed to update access0 group\n"); in cxl_region_update_coordinates()
2388 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access1_group()); in cxl_region_update_coordinates()
2390 dev_dbg(&cxlr->dev, "Failed to update access1 group\n"); in cxl_region_update_coordinates()
2401 int nid = mnb->status_change_nid; in cxl_region_perf_attrs_callback()
2411 region_nid = phys_to_target_node(cxlr->params.res->start); in cxl_region_perf_attrs_callback()
2434 region_nid = phys_to_target_node(cxlr->params.res->start); in cxl_region_calculate_adistance()
2438 perf = &cxlr->coord[ACCESS_COORDINATE_CPU]; in cxl_region_calculate_adistance()
2447 * devm_cxl_add_region - Adds a region to a decoder
2450 * @mode: mode for the endpoint decoders of this region
2451 * @type: select whether this is an expander or accelerator (type-2 or type-3)
2461 enum cxl_decoder_mode mode, in devm_cxl_add_region() argument
2464 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); in devm_cxl_add_region()
2472 cxlr->mode = mode; in devm_cxl_add_region()
2473 cxlr->type = type; in devm_cxl_add_region()
2475 dev = &cxlr->dev; in devm_cxl_add_region()
2484 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); in devm_cxl_add_region()
2488 dev_dbg(port->uport_dev, "%s: created %s\n", in devm_cxl_add_region()
2489 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); in devm_cxl_add_region()
2499 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); in __create_region_show()
2515 enum cxl_decoder_mode mode, int id) in __create_region() argument
2519 switch (mode) { in __create_region()
2524 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); in __create_region()
2525 return ERR_PTR(-EINVAL); in __create_region()
2532 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { in __create_region()
2534 return ERR_PTR(-EBUSY); in __create_region()
2537 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); in __create_region()
2550 return -EINVAL; in create_pmem_region_store()
2570 return -EINVAL; in create_ram_region_store()
2590 if (cxld->region) in region_show()
2591 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); in region_show()
2603 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in cxl_find_region_by_name()
2606 region_dev = device_find_child_by_name(&cxld->dev, name); in cxl_find_region_by_name()
2608 return ERR_PTR(-ENODEV); in cxl_find_region_by_name()
2618 struct cxl_port *port = to_cxl_port(dev->parent); in delete_region_store()
2625 devm_release_action(port->uport_dev, unregister_region, cxlr); in delete_region_store()
2626 put_device(&cxlr->dev); in delete_region_store()
2637 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { in cxl_pmem_region_release()
2638 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; in cxl_pmem_region_release()
2640 put_device(&cxlmd->dev); in cxl_pmem_region_release()
2659 return dev->type == &cxl_pmem_region_type; in is_cxl_pmem_region()
2674 enum cxl_decoder_mode mode; member
2681 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_get_poison_unmapped()
2690 * for unmapped resources based on the last decoder's mode: in cxl_get_poison_unmapped()
2695 if (ctx->mode == CXL_DECODER_RAM) { in cxl_get_poison_unmapped()
2696 offset = ctx->offset; in cxl_get_poison_unmapped()
2697 length = resource_size(&cxlds->ram_res) - offset; in cxl_get_poison_unmapped()
2699 if (rc == -EFAULT) in cxl_get_poison_unmapped()
2704 if (ctx->mode == CXL_DECODER_PMEM) { in cxl_get_poison_unmapped()
2705 offset = ctx->offset; in cxl_get_poison_unmapped()
2706 length = resource_size(&cxlds->dpa_res) - offset; in cxl_get_poison_unmapped()
2709 } else if (resource_size(&cxlds->pmem_res)) { in cxl_get_poison_unmapped()
2710 offset = cxlds->pmem_res.start; in cxl_get_poison_unmapped()
2711 length = resource_size(&cxlds->pmem_res); in cxl_get_poison_unmapped()
2731 if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) in poison_by_decoder()
2735 * Regions are only created with single mode decoders: pmem or ram. in poison_by_decoder()
2736 * Linux does not support mixed mode decoders. This means that in poison_by_decoder()
2741 if (cxled->mode == CXL_DECODER_MIXED) { in poison_by_decoder()
2742 dev_dbg(dev, "poison list read unsupported in mixed mode\n"); in poison_by_decoder()
2747 if (cxled->skip) { in poison_by_decoder()
2748 offset = cxled->dpa_res->start - cxled->skip; in poison_by_decoder()
2749 length = cxled->skip; in poison_by_decoder()
2751 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) in poison_by_decoder()
2757 offset = cxled->dpa_res->start; in poison_by_decoder()
2758 length = cxled->dpa_res->end - offset + 1; in poison_by_decoder()
2759 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); in poison_by_decoder()
2760 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) in poison_by_decoder()
2766 if (cxled->cxld.id == ctx->port->commit_end) { in poison_by_decoder()
2767 ctx->offset = cxled->dpa_res->end + 1; in poison_by_decoder()
2768 ctx->mode = cxled->mode; in poison_by_decoder()
2784 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); in cxl_get_poison_by_endpoint()
2786 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev), in cxl_get_poison_by_endpoint()
2802 u64 dpa = ctx->dpa; in __cxl_dpa_to_region()
2808 if (!cxled || !cxled->dpa_res || !resource_size(cxled->dpa_res)) in __cxl_dpa_to_region()
2811 if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) in __cxl_dpa_to_region()
2819 cxlr = cxled->cxld.region; in __cxl_dpa_to_region()
2822 dev_name(&cxlr->dev)); in __cxl_dpa_to_region()
2827 ctx->cxlr = cxlr; in __cxl_dpa_to_region()
2840 port = cxlmd->endpoint; in cxl_dpa_to_region()
2842 device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); in cxl_dpa_to_region()
2849 struct cxl_region_params *p = &cxlr->params; in cxl_is_hpa_in_chunk()
2850 int gran = p->interleave_granularity; in cxl_is_hpa_in_chunk()
2851 int ways = p->interleave_ways; in cxl_is_hpa_in_chunk()
2854 /* Is the hpa in an expected chunk for its pos(-ition) */ in cxl_is_hpa_in_chunk()
2855 offset = hpa - p->res->start; in cxl_is_hpa_in_chunk()
2860 dev_dbg(&cxlr->dev, in cxl_is_hpa_in_chunk()
2869 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); in cxl_dpa_to_hpa()
2871 struct cxl_region_params *p = &cxlr->params; in cxl_dpa_to_hpa()
2877 for (int i = 0; i < p->nr_targets; i++) { in cxl_dpa_to_hpa()
2878 cxled = p->targets[i]; in cxl_dpa_to_hpa()
2885 pos = cxled->pos; in cxl_dpa_to_hpa()
2886 ways_to_eiw(p->interleave_ways, &eiw); in cxl_dpa_to_hpa()
2887 granularity_to_eig(p->interleave_granularity, &eig); in cxl_dpa_to_hpa()
2890 * The device position in the region interleave set was removed in cxl_dpa_to_hpa()
2891 * from the offset at HPA->DPA translation. To reconstruct the in cxl_dpa_to_hpa()
2894 * The placement of 'pos' in the HPA is determined by interleave in cxl_dpa_to_hpa()
2900 dpa_offset = dpa - cxl_dpa_resource_start(cxled); in cxl_dpa_to_hpa()
2910 hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); in cxl_dpa_to_hpa()
2917 hpa = hpa_offset + p->res->start; in cxl_dpa_to_hpa()
2920 if (cxlrd->hpa_to_spa) in cxl_dpa_to_hpa()
2921 hpa = cxlrd->hpa_to_spa(cxlrd, hpa); in cxl_dpa_to_hpa()
2923 if (hpa < p->res->start || hpa > p->res->end) { in cxl_dpa_to_hpa()
2924 dev_dbg(&cxlr->dev, in cxl_dpa_to_hpa()
2930 if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos))) in cxl_dpa_to_hpa()
2940 struct cxl_region_params *p = &cxlr->params; in cxl_pmem_region_alloc()
2946 if (p->state != CXL_CONFIG_COMMIT) in cxl_pmem_region_alloc()
2947 return -ENXIO; in cxl_pmem_region_alloc()
2950 kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL); in cxl_pmem_region_alloc()
2952 return -ENOMEM; in cxl_pmem_region_alloc()
2954 cxlr_pmem->hpa_range.start = p->res->start; in cxl_pmem_region_alloc()
2955 cxlr_pmem->hpa_range.end = p->res->end; in cxl_pmem_region_alloc()
2958 cxlr_pmem->nr_mappings = p->nr_targets; in cxl_pmem_region_alloc()
2959 for (i = 0; i < p->nr_targets; i++) { in cxl_pmem_region_alloc()
2960 struct cxl_endpoint_decoder *cxled = p->targets[i]; in cxl_pmem_region_alloc()
2962 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; in cxl_pmem_region_alloc()
2969 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd->endpoint); in cxl_pmem_region_alloc()
2971 return -ENODEV; in cxl_pmem_region_alloc()
2972 cxlr->cxl_nvb = cxl_nvb; in cxl_pmem_region_alloc()
2974 m->cxlmd = cxlmd; in cxl_pmem_region_alloc()
2975 get_device(&cxlmd->dev); in cxl_pmem_region_alloc()
2976 m->start = cxled->dpa_res->start; in cxl_pmem_region_alloc()
2977 m->size = resource_size(cxled->dpa_res); in cxl_pmem_region_alloc()
2978 m->position = i; in cxl_pmem_region_alloc()
2981 dev = &cxlr_pmem->dev; in cxl_pmem_region_alloc()
2983 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); in cxl_pmem_region_alloc()
2985 dev->parent = &cxlr->dev; in cxl_pmem_region_alloc()
2986 dev->bus = &cxl_bus_type; in cxl_pmem_region_alloc()
2987 dev->type = &cxl_pmem_region_type; in cxl_pmem_region_alloc()
2988 cxlr_pmem->cxlr = cxlr; in cxl_pmem_region_alloc()
2989 cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem); in cxl_pmem_region_alloc()
3014 return dev->type == &cxl_dax_region_type; in is_cxl_dax_region()
3030 struct cxl_region_params *p = &cxlr->params; in cxl_dax_region_alloc()
3035 if (p->state != CXL_CONFIG_COMMIT) { in cxl_dax_region_alloc()
3036 cxlr_dax = ERR_PTR(-ENXIO); in cxl_dax_region_alloc()
3042 cxlr_dax = ERR_PTR(-ENOMEM); in cxl_dax_region_alloc()
3046 cxlr_dax->hpa_range.start = p->res->start; in cxl_dax_region_alloc()
3047 cxlr_dax->hpa_range.end = p->res->end; in cxl_dax_region_alloc()
3049 dev = &cxlr_dax->dev; in cxl_dax_region_alloc()
3050 cxlr_dax->cxlr = cxlr; in cxl_dax_region_alloc()
3052 lockdep_set_class(&dev->mutex, &cxl_dax_region_key); in cxl_dax_region_alloc()
3054 dev->parent = &cxlr->dev; in cxl_dax_region_alloc()
3055 dev->bus = &cxl_bus_type; in cxl_dax_region_alloc()
3056 dev->type = &cxl_dax_region_type; in cxl_dax_region_alloc()
3066 struct cxl_region *cxlr = cxlr_pmem->cxlr; in cxlr_pmem_unregister()
3067 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; in cxlr_pmem_unregister()
3070 * Either the bridge is in ->remove() context under the device_lock(), in cxlr_pmem_unregister()
3075 device_lock_assert(&cxl_nvb->dev); in cxlr_pmem_unregister()
3076 cxlr->cxlr_pmem = NULL; in cxlr_pmem_unregister()
3077 cxlr_pmem->cxlr = NULL; in cxlr_pmem_unregister()
3078 device_unregister(&cxlr_pmem->dev); in cxlr_pmem_unregister()
3084 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; in cxlr_release_nvdimm()
3086 scoped_guard(device, &cxl_nvb->dev) { in cxlr_release_nvdimm()
3087 if (cxlr->cxlr_pmem) in cxlr_release_nvdimm()
3088 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, in cxlr_release_nvdimm()
3089 cxlr->cxlr_pmem); in cxlr_release_nvdimm()
3091 cxlr->cxl_nvb = NULL; in cxlr_release_nvdimm()
3092 put_device(&cxl_nvb->dev); in cxlr_release_nvdimm()
3096 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge
3111 cxlr_pmem = cxlr->cxlr_pmem; in devm_cxl_add_pmem_region()
3112 cxl_nvb = cxlr->cxl_nvb; in devm_cxl_add_pmem_region()
3114 dev = &cxlr_pmem->dev; in devm_cxl_add_pmem_region()
3115 rc = dev_set_name(dev, "pmem_region%d", cxlr->id); in devm_cxl_add_pmem_region()
3123 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), in devm_cxl_add_pmem_region()
3126 scoped_guard(device, &cxl_nvb->dev) { in devm_cxl_add_pmem_region()
3127 if (cxl_nvb->dev.driver) in devm_cxl_add_pmem_region()
3128 rc = devm_add_action_or_reset(&cxl_nvb->dev, in devm_cxl_add_pmem_region()
3132 rc = -ENXIO; in devm_cxl_add_pmem_region()
3139 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr); in devm_cxl_add_pmem_region()
3144 put_device(&cxl_nvb->dev); in devm_cxl_add_pmem_region()
3145 cxlr->cxl_nvb = NULL; in devm_cxl_add_pmem_region()
3153 device_unregister(&cxlr_dax->dev); in cxlr_dax_unregister()
3166 dev = &cxlr_dax->dev; in devm_cxl_add_dax_region()
3167 rc = dev_set_name(dev, "dax_region%d", cxlr->id); in devm_cxl_add_dax_region()
3175 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), in devm_cxl_add_dax_region()
3178 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister, in devm_cxl_add_dax_region()
3194 r1 = &cxlrd->cxlsd.cxld.hpa_range; in match_root_decoder_by_range()
3209 p = &cxlr->params; in match_region_by_range()
3212 if (p->res && p->res->start == r->start && p->res->end == r->end) in match_region_by_range()
3225 struct range *hpa = &cxled->cxld.hpa_range; in construct_region()
3232 cxlr = __create_region(cxlrd, cxled->mode, in construct_region()
3233 atomic_read(&cxlrd->region_id)); in construct_region()
3234 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); in construct_region()
3237 dev_err(cxlmd->dev.parent, in construct_region()
3239 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3245 p = &cxlr->params; in construct_region()
3246 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { in construct_region()
3247 dev_err(cxlmd->dev.parent, in construct_region()
3249 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3251 rc = -EBUSY; in construct_region()
3255 set_bit(CXL_REGION_F_AUTO, &cxlr->flags); in construct_region()
3259 rc = -ENOMEM; in construct_region()
3263 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa), in construct_region()
3264 dev_name(&cxlr->dev)); in construct_region()
3265 rc = insert_resource(cxlrd->res, res); in construct_region()
3268 * Platform-firmware may not have split resources like "System in construct_region()
3271 dev_warn(cxlmd->dev.parent, in construct_region()
3273 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), in construct_region()
3274 __func__, dev_name(&cxlr->dev)); in construct_region()
3277 p->res = res; in construct_region()
3278 p->interleave_ways = cxled->cxld.interleave_ways; in construct_region()
3279 p->interleave_granularity = cxled->cxld.interleave_granularity; in construct_region()
3280 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; in construct_region()
3282 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); in construct_region()
3286 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n", in construct_region()
3287 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__, in construct_region()
3288 dev_name(&cxlr->dev), p->res, p->interleave_ways, in construct_region()
3289 p->interleave_granularity); in construct_region()
3292 get_device(&cxlr->dev); in construct_region()
3299 devm_release_action(port->uport_dev, unregister_region, cxlr); in construct_region()
3306 struct range *hpa = &cxled->cxld.hpa_range; in cxl_add_to_region()
3307 struct cxl_decoder *cxld = &cxled->cxld; in cxl_add_to_region()
3315 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, in cxl_add_to_region()
3318 dev_err(cxlmd->dev.parent, in cxl_add_to_region()
3320 dev_name(&cxlmd->dev), dev_name(&cxld->dev), in cxl_add_to_region()
3321 cxld->hpa_range.start, cxld->hpa_range.end); in cxl_add_to_region()
3322 return -ENXIO; in cxl_add_to_region()
3331 mutex_lock(&cxlrd->range_lock); in cxl_add_to_region()
3332 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, in cxl_add_to_region()
3336 region_dev = &cxlr->dev; in cxl_add_to_region()
3339 mutex_unlock(&cxlrd->range_lock); in cxl_add_to_region()
3345 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); in cxl_add_to_region()
3348 p = &cxlr->params; in cxl_add_to_region()
3349 attach = p->state == CXL_CONFIG_COMMIT; in cxl_add_to_region()
3355 * the platform-firmware memory map, otherwise the driver for in cxl_add_to_region()
3358 if (device_attach(&cxlr->dev) < 0) in cxl_add_to_region()
3359 dev_err(&cxlr->dev, "failed to enable, range: %pr\n", in cxl_add_to_region()
3360 p->res); in cxl_add_to_region()
3373 struct cxl_region_params *p = &cxlr->params; in is_system_ram()
3375 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res); in is_system_ram()
3383 unregister_memory_notifier(&cxlr->memory_notifier); in shutdown_notifiers()
3384 unregister_mt_adistance_algorithm(&cxlr->adist_notifier); in shutdown_notifiers()
3390 struct cxl_region_params *p = &cxlr->params; in cxl_region_probe()
3395 dev_dbg(&cxlr->dev, "probe interrupted\n"); in cxl_region_probe()
3399 if (p->state < CXL_CONFIG_COMMIT) { in cxl_region_probe()
3400 dev_dbg(&cxlr->dev, "config state: %d\n", p->state); in cxl_region_probe()
3401 rc = -ENXIO; in cxl_region_probe()
3405 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) { in cxl_region_probe()
3406 dev_err(&cxlr->dev, in cxl_region_probe()
3407 "failed to activate, re-commit region and retry\n"); in cxl_region_probe()
3408 rc = -ENXIO; in cxl_region_probe()
3422 cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; in cxl_region_probe()
3423 cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; in cxl_region_probe()
3424 register_memory_notifier(&cxlr->memory_notifier); in cxl_region_probe()
3426 cxlr->adist_notifier.notifier_call = cxl_region_calculate_adistance; in cxl_region_probe()
3427 cxlr->adist_notifier.priority = 100; in cxl_region_probe()
3428 register_mt_adistance_algorithm(&cxlr->adist_notifier); in cxl_region_probe()
3430 rc = devm_add_action_or_reset(&cxlr->dev, shutdown_notifiers, cxlr); in cxl_region_probe()
3434 switch (cxlr->mode) { in cxl_region_probe()
3444 p->res->start, p->res->end, cxlr, in cxl_region_probe()
3449 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", in cxl_region_probe()
3450 cxlr->mode); in cxl_region_probe()
3451 return -ENXIO; in cxl_region_probe()