Lines Matching +full:port +full:- +full:id
1 // SPDX-License-Identifier: GPL-2.0-only
15 * instances per CXL port and per CXL endpoint. Define common helpers
21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, in add_hdm_decoder() argument
28 put_device(&cxld->dev); in add_hdm_decoder()
29 dev_err(&port->dev, "Failed to add decoder\n"); in add_hdm_decoder()
33 rc = cxl_decoder_autoremove(&port->dev, cxld); in add_hdm_decoder()
37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); in add_hdm_decoder()
44 * single ported host-bridges need not publish a decoder capability when a
49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port) in devm_cxl_add_passthrough_decoder() argument
55 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in devm_cxl_add_passthrough_decoder()
61 cxlhdm->interleave_mask = ~0U; in devm_cxl_add_passthrough_decoder()
62 cxlhdm->iw_cap_mask = ~0UL; in devm_cxl_add_passthrough_decoder()
64 cxlsd = cxl_switch_decoder_alloc(port, 1); in devm_cxl_add_passthrough_decoder()
68 device_lock_assert(&port->dev); in devm_cxl_add_passthrough_decoder()
70 xa_for_each(&port->dports, index, dport) in devm_cxl_add_passthrough_decoder()
72 single_port_map[0] = dport->port_id; in devm_cxl_add_passthrough_decoder()
74 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); in devm_cxl_add_passthrough_decoder()
82 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); in parse_hdm_decoder_caps()
83 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); in parse_hdm_decoder_caps()
84 cxlhdm->target_count = in parse_hdm_decoder_caps()
87 cxlhdm->interleave_mask |= GENMASK(11, 8); in parse_hdm_decoder_caps()
89 cxlhdm->interleave_mask |= GENMASK(14, 12); in parse_hdm_decoder_caps()
90 cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8); in parse_hdm_decoder_caps()
92 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12); in parse_hdm_decoder_caps()
94 cxlhdm->iw_cap_mask |= BIT(16); in parse_hdm_decoder_caps()
107 cxlhdm = dev_get_drvdata(&info->port->dev); in should_emulate_decoders()
108 hdm = cxlhdm->regs.hdm_decoder; in should_emulate_decoders()
117 if (!info->mem_enabled) in should_emulate_decoders()
124 for (i = 0; i < cxlhdm->decoder_count; i++) { in should_emulate_decoders()
126 dev_dbg(&info->port->dev, in should_emulate_decoders()
128 info->port->id, i, in should_emulate_decoders()
142 * devm_cxl_setup_hdm - map HDM decoder component registers
143 * @port: cxl_port to map
146 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, in devm_cxl_setup_hdm() argument
149 struct cxl_register_map *reg_map = &port->reg_map; in devm_cxl_setup_hdm()
150 struct device *dev = &port->dev; in devm_cxl_setup_hdm()
156 return ERR_PTR(-ENOMEM); in devm_cxl_setup_hdm()
157 cxlhdm->port = port; in devm_cxl_setup_hdm()
161 if (reg_map->resource == CXL_RESOURCE_NONE) { in devm_cxl_setup_hdm()
162 if (!info || !info->mem_enabled) { in devm_cxl_setup_hdm()
164 return ERR_PTR(-ENXIO); in devm_cxl_setup_hdm()
167 cxlhdm->decoder_count = info->ranges; in devm_cxl_setup_hdm()
171 if (!reg_map->component_map.hdm_decoder.valid) { in devm_cxl_setup_hdm()
172 dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); in devm_cxl_setup_hdm()
174 return ERR_PTR(-ENODEV); in devm_cxl_setup_hdm()
177 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, in devm_cxl_setup_hdm()
185 if (cxlhdm->decoder_count == 0) { in devm_cxl_setup_hdm()
187 return ERR_PTR(-ENXIO); in devm_cxl_setup_hdm()
195 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, in devm_cxl_setup_hdm()
196 info->ranges > 1 ? "s" : ""); in devm_cxl_setup_hdm()
197 cxlhdm->decoder_count = info->ranges; in devm_cxl_setup_hdm()
206 unsigned long long start = r->start, end = r->end; in __cxl_dpa_debug()
208 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, in __cxl_dpa_debug()
209 r->name); in __cxl_dpa_debug()
217 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { in cxl_dpa_debug()
219 for (p2 = p1->child; p2; p2 = p2->sibling) in cxl_dpa_debug()
228 * port ->remove() callback (like an endpoint decoder sysfs attribute)
233 struct cxl_port *port = cxled_to_port(cxled); in __cxl_dpa_release() local
234 struct cxl_dev_state *cxlds = cxlmd->cxlds; in __cxl_dpa_release()
235 struct resource *res = cxled->dpa_res; in __cxl_dpa_release()
241 skip_start = res->start - cxled->skip; in __cxl_dpa_release()
242 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); in __cxl_dpa_release()
243 if (cxled->skip) in __cxl_dpa_release()
244 __release_region(&cxlds->dpa_res, skip_start, cxled->skip); in __cxl_dpa_release()
245 cxled->skip = 0; in __cxl_dpa_release()
246 cxled->dpa_res = NULL; in __cxl_dpa_release()
247 put_device(&cxled->cxld.dev); in __cxl_dpa_release()
248 port->hdm_end--; in __cxl_dpa_release()
259 * Must be called from context that will not race port device
264 struct cxl_port *port = cxled_to_port(cxled); in devm_cxl_dpa_release() local
267 devm_remove_action(&port->dev, cxl_dpa_release, cxled); in devm_cxl_dpa_release()
276 struct cxl_port *port = cxled_to_port(cxled); in __cxl_dpa_reserve() local
277 struct cxl_dev_state *cxlds = cxlmd->cxlds; in __cxl_dpa_reserve()
278 struct device *dev = &port->dev; in __cxl_dpa_reserve()
285 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
286 return -EINVAL; in __cxl_dpa_reserve()
289 if (cxled->dpa_res) { in __cxl_dpa_reserve()
291 port->id, cxled->cxld.id, cxled->dpa_res); in __cxl_dpa_reserve()
292 return -EBUSY; in __cxl_dpa_reserve()
295 if (port->hdm_end + 1 != cxled->cxld.id) { in __cxl_dpa_reserve()
302 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, in __cxl_dpa_reserve()
303 cxled->cxld.id, port->id, port->hdm_end + 1); in __cxl_dpa_reserve()
304 return -EBUSY; in __cxl_dpa_reserve()
308 res = __request_region(&cxlds->dpa_res, base - skipped, skipped, in __cxl_dpa_reserve()
309 dev_name(&cxled->cxld.dev), 0); in __cxl_dpa_reserve()
313 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
314 return -EBUSY; in __cxl_dpa_reserve()
317 res = __request_region(&cxlds->dpa_res, base, len, in __cxl_dpa_reserve()
318 dev_name(&cxled->cxld.dev), 0); in __cxl_dpa_reserve()
321 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
323 __release_region(&cxlds->dpa_res, base - skipped, in __cxl_dpa_reserve()
325 return -EBUSY; in __cxl_dpa_reserve()
327 cxled->dpa_res = res; in __cxl_dpa_reserve()
328 cxled->skip = skipped; in __cxl_dpa_reserve()
330 if (resource_contains(&cxlds->pmem_res, res)) in __cxl_dpa_reserve()
331 cxled->mode = CXL_DECODER_PMEM; in __cxl_dpa_reserve()
332 else if (resource_contains(&cxlds->ram_res, res)) in __cxl_dpa_reserve()
333 cxled->mode = CXL_DECODER_RAM; in __cxl_dpa_reserve()
336 port->id, cxled->cxld.id, cxled->dpa_res); in __cxl_dpa_reserve()
337 cxled->mode = CXL_DECODER_MIXED; in __cxl_dpa_reserve()
340 port->hdm_end++; in __cxl_dpa_reserve()
341 get_device(&cxled->cxld.dev); in __cxl_dpa_reserve()
349 struct cxl_port *port = cxled_to_port(cxled); in devm_cxl_dpa_reserve() local
359 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); in devm_cxl_dpa_reserve()
368 if (cxled->dpa_res) in cxl_dpa_size()
369 size = resource_size(cxled->dpa_res); in cxl_dpa_size()
377 resource_size_t base = -1; in cxl_dpa_resource_start()
380 if (cxled->dpa_res) in cxl_dpa_resource_start()
381 base = cxled->dpa_res->start; in cxl_dpa_resource_start()
388 struct cxl_port *port = cxled_to_port(cxled); in cxl_dpa_free() local
389 struct device *dev = &cxled->cxld.dev; in cxl_dpa_free()
393 if (!cxled->dpa_res) { in cxl_dpa_free()
397 if (cxled->cxld.region) { in cxl_dpa_free()
399 dev_name(&cxled->cxld.region->dev)); in cxl_dpa_free()
400 rc = -EBUSY; in cxl_dpa_free()
403 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in cxl_dpa_free()
405 rc = -EBUSY; in cxl_dpa_free()
408 if (cxled->cxld.id != port->hdm_end) { in cxl_dpa_free()
409 dev_dbg(dev, "expected decoder%d.%d\n", port->id, in cxl_dpa_free()
410 port->hdm_end); in cxl_dpa_free()
411 rc = -EBUSY; in cxl_dpa_free()
425 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_dpa_set_mode()
426 struct device *dev = &cxled->cxld.dev; in cxl_dpa_set_mode()
435 return -EINVAL; in cxl_dpa_set_mode()
439 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in cxl_dpa_set_mode()
440 rc = -EBUSY; in cxl_dpa_set_mode()
448 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { in cxl_dpa_set_mode()
450 rc = -ENXIO; in cxl_dpa_set_mode()
453 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { in cxl_dpa_set_mode()
455 rc = -ENXIO; in cxl_dpa_set_mode()
459 cxled->mode = mode; in cxl_dpa_set_mode()
471 struct cxl_port *port = cxled_to_port(cxled); in cxl_dpa_alloc() local
472 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_dpa_alloc()
473 struct device *dev = &cxled->cxld.dev; in cxl_dpa_alloc()
479 if (cxled->cxld.region) { in cxl_dpa_alloc()
481 dev_name(&cxled->cxld.region->dev)); in cxl_dpa_alloc()
482 rc = -EBUSY; in cxl_dpa_alloc()
486 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in cxl_dpa_alloc()
488 rc = -EBUSY; in cxl_dpa_alloc()
492 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) in cxl_dpa_alloc()
495 free_ram_start = last->end + 1; in cxl_dpa_alloc()
497 free_ram_start = cxlds->ram_res.start; in cxl_dpa_alloc()
499 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) in cxl_dpa_alloc()
502 free_pmem_start = last->end + 1; in cxl_dpa_alloc()
504 free_pmem_start = cxlds->pmem_res.start; in cxl_dpa_alloc()
506 if (cxled->mode == CXL_DECODER_RAM) { in cxl_dpa_alloc()
508 avail = cxlds->ram_res.end - start + 1; in cxl_dpa_alloc()
510 } else if (cxled->mode == CXL_DECODER_PMEM) { in cxl_dpa_alloc()
514 avail = cxlds->pmem_res.end - start + 1; in cxl_dpa_alloc()
521 if (cxlds->pmem_res.child && in cxl_dpa_alloc()
522 skip_start == cxlds->pmem_res.child->start) in cxl_dpa_alloc()
523 skip_end = skip_start - 1; in cxl_dpa_alloc()
525 skip_end = start - 1; in cxl_dpa_alloc()
526 skip = skip_end - skip_start + 1; in cxl_dpa_alloc()
529 rc = -EINVAL; in cxl_dpa_alloc()
535 cxl_decoder_mode_name(cxled->mode), &avail); in cxl_dpa_alloc()
536 rc = -ENOSPC; in cxl_dpa_alloc()
547 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); in cxl_dpa_alloc()
559 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), in cxld_set_interleave()
560 "invalid interleave_ways: %d\n", cxld->interleave_ways)) in cxld_set_interleave()
562 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), in cxld_set_interleave()
564 cxld->interleave_granularity)) in cxld_set_interleave()
575 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM), in cxld_set_type()
581 struct cxl_dport **t = &cxlsd->target[0]; in cxlsd_set_targets()
582 int ways = cxlsd->cxld.interleave_ways; in cxlsd_set_targets()
584 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); in cxlsd_set_targets()
586 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); in cxlsd_set_targets()
588 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); in cxlsd_set_targets()
590 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); in cxlsd_set_targets()
592 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); in cxlsd_set_targets()
594 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); in cxlsd_set_targets()
596 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); in cxlsd_set_targets()
598 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); in cxlsd_set_targets()
607 static int cxld_await_commit(void __iomem *hdm, int id) in cxld_await_commit() argument
613 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxld_await_commit()
616 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxld_await_commit()
617 return -EIO; in cxld_await_commit()
624 return -ETIMEDOUT; in cxld_await_commit()
629 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_decoder_commit() local
630 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in cxl_decoder_commit()
631 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_decoder_commit()
632 int id = cxld->id, rc; in cxl_decoder_commit() local
636 if (cxld->flags & CXL_DECODER_F_ENABLE) in cxl_decoder_commit()
639 if (cxl_num_decoders_committed(port) != id) { in cxl_decoder_commit()
640 dev_dbg(&port->dev, in cxl_decoder_commit()
642 dev_name(&cxld->dev), port->id, in cxl_decoder_commit()
643 cxl_num_decoders_committed(port)); in cxl_decoder_commit()
644 return -EBUSY; in cxl_decoder_commit()
649 * support the sanitize operation, make sure sanitize is not in-flight. in cxl_decoder_commit()
651 if (is_endpoint_decoder(&cxld->dev)) { in cxl_decoder_commit()
653 to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_commit()
656 to_cxl_memdev_state(cxlmd->cxlds); in cxl_decoder_commit()
658 if (mds && mds->security.sanitize_active) { in cxl_decoder_commit()
659 dev_dbg(&cxlmd->dev, in cxl_decoder_commit()
661 dev_name(&cxld->dev)); in cxl_decoder_commit()
662 return -EBUSY; in cxl_decoder_commit()
668 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); in cxl_decoder_commit()
671 base = cxld->hpa_range.start; in cxl_decoder_commit()
672 size = range_len(&cxld->hpa_range); in cxl_decoder_commit()
674 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); in cxl_decoder_commit()
675 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); in cxl_decoder_commit()
676 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); in cxl_decoder_commit()
677 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); in cxl_decoder_commit()
679 if (is_switch_decoder(&cxld->dev)) { in cxl_decoder_commit()
681 to_cxl_switch_decoder(&cxld->dev); in cxl_decoder_commit()
682 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); in cxl_decoder_commit()
683 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); in cxl_decoder_commit()
691 to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_commit()
692 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); in cxl_decoder_commit()
693 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); in cxl_decoder_commit()
695 writel(upper_32_bits(cxled->skip), sk_hi); in cxl_decoder_commit()
696 writel(lower_32_bits(cxled->skip), sk_lo); in cxl_decoder_commit()
699 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxl_decoder_commit()
702 port->commit_end++; in cxl_decoder_commit()
703 rc = cxld_await_commit(hdm, cxld->id); in cxl_decoder_commit()
705 dev_dbg(&port->dev, "%s: error %d committing decoder\n", in cxl_decoder_commit()
706 dev_name(&cxld->dev), rc); in cxl_decoder_commit()
707 cxld->reset(cxld); in cxl_decoder_commit()
710 cxld->flags |= CXL_DECODER_F_ENABLE; in cxl_decoder_commit()
717 struct cxl_port *port = to_cxl_port(dev->parent); in commit_reap() local
724 if (port->commit_end == cxld->id && in commit_reap()
725 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { in commit_reap()
726 port->commit_end--; in commit_reap()
727 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", in commit_reap()
728 dev_name(&cxld->dev), port->commit_end); in commit_reap()
736 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_port_commit_reap() local
742 * decoders that were pinned allocated by out-of-order release. in cxl_port_commit_reap()
744 port->commit_end--; in cxl_port_commit_reap()
745 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev), in cxl_port_commit_reap()
746 port->commit_end); in cxl_port_commit_reap()
747 device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL, in cxl_port_commit_reap()
754 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_decoder_reset() local
755 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in cxl_decoder_reset()
756 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_decoder_reset()
757 int id = cxld->id; in cxl_decoder_reset() local
760 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) in cxl_decoder_reset()
763 if (port->commit_end == id) in cxl_decoder_reset()
766 dev_dbg(&port->dev, in cxl_decoder_reset()
768 dev_name(&cxld->dev), port->id, port->commit_end); in cxl_decoder_reset()
771 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxl_decoder_reset()
773 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxl_decoder_reset()
775 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); in cxl_decoder_reset()
776 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); in cxl_decoder_reset()
777 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); in cxl_decoder_reset()
778 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); in cxl_decoder_reset()
781 cxld->flags &= ~CXL_DECODER_F_ENABLE; in cxl_decoder_reset()
784 if (is_endpoint_decoder(&cxld->dev)) { in cxl_decoder_reset()
787 cxled = to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_reset()
788 cxled->state = CXL_DECODER_STATE_MANUAL; in cxl_decoder_reset()
793 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base, in cxl_setup_hdm_decoder_from_dvsec() argument
800 if (!is_cxl_endpoint(port)) in cxl_setup_hdm_decoder_from_dvsec()
801 return -EOPNOTSUPP; in cxl_setup_hdm_decoder_from_dvsec()
803 cxled = to_cxl_endpoint_decoder(&cxld->dev); in cxl_setup_hdm_decoder_from_dvsec()
804 len = range_len(&info->dvsec_range[which]); in cxl_setup_hdm_decoder_from_dvsec()
806 return -ENOENT; in cxl_setup_hdm_decoder_from_dvsec()
808 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in cxl_setup_hdm_decoder_from_dvsec()
809 cxld->commit = NULL; in cxl_setup_hdm_decoder_from_dvsec()
810 cxld->reset = NULL; in cxl_setup_hdm_decoder_from_dvsec()
811 cxld->hpa_range = info->dvsec_range[which]; in cxl_setup_hdm_decoder_from_dvsec()
817 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; in cxl_setup_hdm_decoder_from_dvsec()
818 port->commit_end = cxld->id; in cxl_setup_hdm_decoder_from_dvsec()
822 dev_err(&port->dev, in cxl_setup_hdm_decoder_from_dvsec()
823 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", in cxl_setup_hdm_decoder_from_dvsec()
824 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); in cxl_setup_hdm_decoder_from_dvsec()
828 cxled->state = CXL_DECODER_STATE_AUTO; in cxl_setup_hdm_decoder_from_dvsec()
833 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, in init_hdm_decoder() argument
849 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base, in init_hdm_decoder()
860 cxld->commit = cxl_decoder_commit; in init_hdm_decoder()
861 cxld->reset = cxl_decoder_reset; in init_hdm_decoder()
866 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", in init_hdm_decoder()
867 port->id, cxld->id); in init_hdm_decoder()
868 return -ENXIO; in init_hdm_decoder()
872 cxled = to_cxl_endpoint_decoder(&cxld->dev); in init_hdm_decoder()
873 cxld->hpa_range = (struct range) { in init_hdm_decoder()
875 .end = base + size - 1, in init_hdm_decoder()
880 cxld->flags |= CXL_DECODER_F_ENABLE; in init_hdm_decoder()
882 cxld->flags |= CXL_DECODER_F_LOCK; in init_hdm_decoder()
884 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
886 cxld->target_type = CXL_DECODER_DEVMEM; in init_hdm_decoder()
889 if (cxld->id != cxl_num_decoders_committed(port)) { in init_hdm_decoder()
890 dev_warn(&port->dev, in init_hdm_decoder()
892 port->id, cxld->id); in init_hdm_decoder()
893 return -ENXIO; in init_hdm_decoder()
897 dev_warn(&port->dev, in init_hdm_decoder()
899 port->id, cxld->id); in init_hdm_decoder()
900 return -ENXIO; in init_hdm_decoder()
902 port->commit_end = cxld->id; in init_hdm_decoder()
906 struct cxl_dev_state *cxlds = cxlmd->cxlds; in init_hdm_decoder()
912 if (cxlds->type == CXL_DEVTYPE_CLASSMEM) in init_hdm_decoder()
913 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
915 cxld->target_type = CXL_DECODER_DEVMEM; in init_hdm_decoder()
918 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
922 cxld->target_type == CXL_DECODER_HOSTONLYMEM) { in init_hdm_decoder()
928 &cxld->interleave_ways); in init_hdm_decoder()
930 dev_warn(&port->dev, in init_hdm_decoder()
932 port->id, cxld->id, ctrl); in init_hdm_decoder()
936 &cxld->interleave_granularity); in init_hdm_decoder()
938 dev_warn(&port->dev, in init_hdm_decoder()
940 port->id, cxld->id, ctrl); in init_hdm_decoder()
944 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", in init_hdm_decoder()
945 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, in init_hdm_decoder()
946 cxld->interleave_ways, cxld->interleave_granularity); in init_hdm_decoder()
952 for (i = 0; i < cxld->interleave_ways; i++) in init_hdm_decoder()
961 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); in init_hdm_decoder()
963 dev_err(&port->dev, in init_hdm_decoder()
965 port->id, cxld->id, size, cxld->interleave_ways); in init_hdm_decoder()
966 return -ENXIO; in init_hdm_decoder()
973 dev_err(&port->dev, in init_hdm_decoder()
974 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", in init_hdm_decoder()
975 port->id, cxld->id, *dpa_base, in init_hdm_decoder()
976 *dpa_base + dpa_size + skip - 1, rc); in init_hdm_decoder()
981 cxled->state = CXL_DECODER_STATE_AUTO; in init_hdm_decoder()
988 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_settle_decoders()
997 * be careful about trusting the "not-committed" status until the commit in cxl_settle_decoders()
1002 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { in cxl_settle_decoders()
1009 if (committed != cxlhdm->decoder_count) in cxl_settle_decoders()
1014 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1021 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in devm_cxl_enumerate_decoders()
1022 struct cxl_port *port = cxlhdm->port; in devm_cxl_enumerate_decoders() local
1028 for (i = 0; i < cxlhdm->decoder_count; i++) { in devm_cxl_enumerate_decoders()
1030 int rc, target_count = cxlhdm->target_count; in devm_cxl_enumerate_decoders()
1033 if (is_cxl_endpoint(port)) { in devm_cxl_enumerate_decoders()
1036 cxled = cxl_endpoint_decoder_alloc(port); in devm_cxl_enumerate_decoders()
1038 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1040 port->id, i); in devm_cxl_enumerate_decoders()
1043 cxld = &cxled->cxld; in devm_cxl_enumerate_decoders()
1047 cxlsd = cxl_switch_decoder_alloc(port, target_count); in devm_cxl_enumerate_decoders()
1049 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1051 port->id, i); in devm_cxl_enumerate_decoders()
1054 cxld = &cxlsd->cxld; in devm_cxl_enumerate_decoders()
1057 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, in devm_cxl_enumerate_decoders()
1060 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1062 port->id, i); in devm_cxl_enumerate_decoders()
1063 put_device(&cxld->dev); in devm_cxl_enumerate_decoders()
1066 rc = add_hdm_decoder(port, cxld, target_map); in devm_cxl_enumerate_decoders()
1068 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1069 "Failed to add decoder%d.%d\n", port->id, i); in devm_cxl_enumerate_decoders()