Lines Matching +full:add +full:- +full:pmem
1 // SPDX-License-Identifier: GPL-2.0-only
28 put_device(&cxld->dev); in add_hdm_decoder()
29 dev_err(&port->dev, "Failed to add decoder\n"); in add_hdm_decoder()
33 rc = cxl_decoder_autoremove(&port->dev, cxld); in add_hdm_decoder()
37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); in add_hdm_decoder()
44 * single ported host-bridges need not publish a decoder capability when a
60 device_lock_assert(&port->dev); in devm_cxl_add_passthrough_decoder()
62 xa_for_each(&port->dports, index, dport) in devm_cxl_add_passthrough_decoder()
64 single_port_map[0] = dport->port_id; in devm_cxl_add_passthrough_decoder()
66 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); in devm_cxl_add_passthrough_decoder()
74 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); in parse_hdm_decoder_caps()
75 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); in parse_hdm_decoder_caps()
76 cxlhdm->target_count = in parse_hdm_decoder_caps()
79 cxlhdm->interleave_mask |= GENMASK(11, 8); in parse_hdm_decoder_caps()
81 cxlhdm->interleave_mask |= GENMASK(14, 12); in parse_hdm_decoder_caps()
94 cxlhdm = dev_get_drvdata(&info->port->dev); in should_emulate_decoders()
95 hdm = cxlhdm->regs.hdm_decoder; in should_emulate_decoders()
104 if (!info->mem_enabled) in should_emulate_decoders()
111 for (i = 0; i < cxlhdm->decoder_count; i++) { in should_emulate_decoders()
113 dev_dbg(&info->port->dev, in should_emulate_decoders()
115 info->port->id, i, in should_emulate_decoders()
129 * devm_cxl_setup_hdm - map HDM decoder component registers
136 struct cxl_register_map *reg_map = &port->reg_map; in devm_cxl_setup_hdm()
137 struct device *dev = &port->dev; in devm_cxl_setup_hdm()
143 return ERR_PTR(-ENOMEM); in devm_cxl_setup_hdm()
144 cxlhdm->port = port; in devm_cxl_setup_hdm()
148 if (reg_map->resource == CXL_RESOURCE_NONE) { in devm_cxl_setup_hdm()
149 if (!info || !info->mem_enabled) { in devm_cxl_setup_hdm()
151 return ERR_PTR(-ENXIO); in devm_cxl_setup_hdm()
154 cxlhdm->decoder_count = info->ranges; in devm_cxl_setup_hdm()
158 if (!reg_map->component_map.hdm_decoder.valid) { in devm_cxl_setup_hdm()
159 dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); in devm_cxl_setup_hdm()
161 return ERR_PTR(-ENODEV); in devm_cxl_setup_hdm()
164 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, in devm_cxl_setup_hdm()
172 if (cxlhdm->decoder_count == 0) { in devm_cxl_setup_hdm()
174 return ERR_PTR(-ENXIO); in devm_cxl_setup_hdm()
182 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, in devm_cxl_setup_hdm()
183 info->ranges > 1 ? "s" : ""); in devm_cxl_setup_hdm()
184 cxlhdm->decoder_count = info->ranges; in devm_cxl_setup_hdm()
193 unsigned long long start = r->start, end = r->end; in __cxl_dpa_debug()
195 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, in __cxl_dpa_debug()
196 r->name); in __cxl_dpa_debug()
204 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { in cxl_dpa_debug()
206 for (p2 = p1->child; p2; p2 = p2->sibling) in cxl_dpa_debug()
215 * port ->remove() callback (like an endpoint decoder sysfs attribute)
221 struct cxl_dev_state *cxlds = cxlmd->cxlds; in __cxl_dpa_release()
222 struct resource *res = cxled->dpa_res; in __cxl_dpa_release()
228 skip_start = res->start - cxled->skip; in __cxl_dpa_release()
229 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); in __cxl_dpa_release()
230 if (cxled->skip) in __cxl_dpa_release()
231 __release_region(&cxlds->dpa_res, skip_start, cxled->skip); in __cxl_dpa_release()
232 cxled->skip = 0; in __cxl_dpa_release()
233 cxled->dpa_res = NULL; in __cxl_dpa_release()
234 put_device(&cxled->cxld.dev); in __cxl_dpa_release()
235 port->hdm_end--; in __cxl_dpa_release()
254 devm_remove_action(&port->dev, cxl_dpa_release, cxled); in devm_cxl_dpa_release()
264 struct cxl_dev_state *cxlds = cxlmd->cxlds; in __cxl_dpa_reserve()
265 struct device *dev = &port->dev; in __cxl_dpa_reserve()
272 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
273 return -EINVAL; in __cxl_dpa_reserve()
276 if (cxled->dpa_res) { in __cxl_dpa_reserve()
278 port->id, cxled->cxld.id, cxled->dpa_res); in __cxl_dpa_reserve()
279 return -EBUSY; in __cxl_dpa_reserve()
282 if (port->hdm_end + 1 != cxled->cxld.id) { in __cxl_dpa_reserve()
289 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, in __cxl_dpa_reserve()
290 cxled->cxld.id, port->id, port->hdm_end + 1); in __cxl_dpa_reserve()
291 return -EBUSY; in __cxl_dpa_reserve()
295 res = __request_region(&cxlds->dpa_res, base - skipped, skipped, in __cxl_dpa_reserve()
296 dev_name(&cxled->cxld.dev), 0); in __cxl_dpa_reserve()
300 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
301 return -EBUSY; in __cxl_dpa_reserve()
304 res = __request_region(&cxlds->dpa_res, base, len, in __cxl_dpa_reserve()
305 dev_name(&cxled->cxld.dev), 0); in __cxl_dpa_reserve()
308 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
310 __release_region(&cxlds->dpa_res, base - skipped, in __cxl_dpa_reserve()
312 return -EBUSY; in __cxl_dpa_reserve()
314 cxled->dpa_res = res; in __cxl_dpa_reserve()
315 cxled->skip = skipped; in __cxl_dpa_reserve()
317 if (resource_contains(&cxlds->pmem_res, res)) in __cxl_dpa_reserve()
318 cxled->mode = CXL_DECODER_PMEM; in __cxl_dpa_reserve()
319 else if (resource_contains(&cxlds->ram_res, res)) in __cxl_dpa_reserve()
320 cxled->mode = CXL_DECODER_RAM; in __cxl_dpa_reserve()
322 dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, in __cxl_dpa_reserve()
323 cxled->cxld.id, cxled->dpa_res); in __cxl_dpa_reserve()
324 cxled->mode = CXL_DECODER_MIXED; in __cxl_dpa_reserve()
327 port->hdm_end++; in __cxl_dpa_reserve()
328 get_device(&cxled->cxld.dev); in __cxl_dpa_reserve()
346 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); in devm_cxl_dpa_reserve()
355 if (cxled->dpa_res) in cxl_dpa_size()
356 size = resource_size(cxled->dpa_res); in cxl_dpa_size()
364 resource_size_t base = -1; in cxl_dpa_resource_start()
367 if (cxled->dpa_res) in cxl_dpa_resource_start()
368 base = cxled->dpa_res->start; in cxl_dpa_resource_start()
376 struct device *dev = &cxled->cxld.dev; in cxl_dpa_free()
380 if (!cxled->dpa_res) { in cxl_dpa_free()
384 if (cxled->cxld.region) { in cxl_dpa_free()
386 dev_name(&cxled->cxld.region->dev)); in cxl_dpa_free()
387 rc = -EBUSY; in cxl_dpa_free()
390 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in cxl_dpa_free()
392 rc = -EBUSY; in cxl_dpa_free()
395 if (cxled->cxld.id != port->hdm_end) { in cxl_dpa_free()
396 dev_dbg(dev, "expected decoder%d.%d\n", port->id, in cxl_dpa_free()
397 port->hdm_end); in cxl_dpa_free()
398 rc = -EBUSY; in cxl_dpa_free()
412 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_dpa_set_mode()
413 struct device *dev = &cxled->cxld.dev; in cxl_dpa_set_mode()
422 return -EINVAL; in cxl_dpa_set_mode()
426 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in cxl_dpa_set_mode()
427 rc = -EBUSY; in cxl_dpa_set_mode()
435 if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { in cxl_dpa_set_mode()
436 dev_dbg(dev, "no available pmem capacity\n"); in cxl_dpa_set_mode()
437 rc = -ENXIO; in cxl_dpa_set_mode()
440 if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { in cxl_dpa_set_mode()
442 rc = -ENXIO; in cxl_dpa_set_mode()
446 cxled->mode = mode; in cxl_dpa_set_mode()
459 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_dpa_alloc()
460 struct device *dev = &cxled->cxld.dev; in cxl_dpa_alloc()
466 if (cxled->cxld.region) { in cxl_dpa_alloc()
468 dev_name(&cxled->cxld.region->dev)); in cxl_dpa_alloc()
469 rc = -EBUSY; in cxl_dpa_alloc()
473 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in cxl_dpa_alloc()
475 rc = -EBUSY; in cxl_dpa_alloc()
479 for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) in cxl_dpa_alloc()
482 free_ram_start = last->end + 1; in cxl_dpa_alloc()
484 free_ram_start = cxlds->ram_res.start; in cxl_dpa_alloc()
486 for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) in cxl_dpa_alloc()
489 free_pmem_start = last->end + 1; in cxl_dpa_alloc()
491 free_pmem_start = cxlds->pmem_res.start; in cxl_dpa_alloc()
493 if (cxled->mode == CXL_DECODER_RAM) { in cxl_dpa_alloc()
495 avail = cxlds->ram_res.end - start + 1; in cxl_dpa_alloc()
497 } else if (cxled->mode == CXL_DECODER_PMEM) { in cxl_dpa_alloc()
501 avail = cxlds->pmem_res.end - start + 1; in cxl_dpa_alloc()
505 * If some pmem is already allocated, then that allocation in cxl_dpa_alloc()
508 if (cxlds->pmem_res.child && in cxl_dpa_alloc()
509 skip_start == cxlds->pmem_res.child->start) in cxl_dpa_alloc()
510 skip_end = skip_start - 1; in cxl_dpa_alloc()
512 skip_end = start - 1; in cxl_dpa_alloc()
513 skip = skip_end - skip_start + 1; in cxl_dpa_alloc()
516 rc = -EINVAL; in cxl_dpa_alloc()
522 cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", in cxl_dpa_alloc()
524 rc = -ENOSPC; in cxl_dpa_alloc()
535 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); in cxl_dpa_alloc()
547 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), in cxld_set_interleave()
548 "invalid interleave_ways: %d\n", cxld->interleave_ways)) in cxld_set_interleave()
550 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), in cxld_set_interleave()
552 cxld->interleave_granularity)) in cxld_set_interleave()
563 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM), in cxld_set_type()
569 struct cxl_dport **t = &cxlsd->target[0]; in cxlsd_set_targets()
570 int ways = cxlsd->cxld.interleave_ways; in cxlsd_set_targets()
572 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); in cxlsd_set_targets()
574 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); in cxlsd_set_targets()
576 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); in cxlsd_set_targets()
578 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); in cxlsd_set_targets()
580 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); in cxlsd_set_targets()
582 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); in cxlsd_set_targets()
584 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); in cxlsd_set_targets()
586 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); in cxlsd_set_targets()
605 return -EIO; in cxld_await_commit()
612 return -ETIMEDOUT; in cxld_await_commit()
617 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_decoder_commit()
618 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in cxl_decoder_commit()
619 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_decoder_commit()
620 int id = cxld->id, rc; in cxl_decoder_commit()
624 if (cxld->flags & CXL_DECODER_F_ENABLE) in cxl_decoder_commit()
628 dev_dbg(&port->dev, in cxl_decoder_commit()
630 dev_name(&cxld->dev), port->id, in cxl_decoder_commit()
632 return -EBUSY; in cxl_decoder_commit()
637 * support the sanitize operation, make sure sanitize is not in-flight. in cxl_decoder_commit()
639 if (is_endpoint_decoder(&cxld->dev)) { in cxl_decoder_commit()
641 to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_commit()
644 to_cxl_memdev_state(cxlmd->cxlds); in cxl_decoder_commit()
646 if (mds && mds->security.sanitize_active) { in cxl_decoder_commit()
647 dev_dbg(&cxlmd->dev, in cxl_decoder_commit()
649 dev_name(&cxld->dev)); in cxl_decoder_commit()
650 return -EBUSY; in cxl_decoder_commit()
656 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); in cxl_decoder_commit()
659 base = cxld->hpa_range.start; in cxl_decoder_commit()
660 size = range_len(&cxld->hpa_range); in cxl_decoder_commit()
667 if (is_switch_decoder(&cxld->dev)) { in cxl_decoder_commit()
669 to_cxl_switch_decoder(&cxld->dev); in cxl_decoder_commit()
679 to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_commit()
683 writel(upper_32_bits(cxled->skip), sk_hi); in cxl_decoder_commit()
684 writel(lower_32_bits(cxled->skip), sk_lo); in cxl_decoder_commit()
690 port->commit_end++; in cxl_decoder_commit()
691 rc = cxld_await_commit(hdm, cxld->id); in cxl_decoder_commit()
693 dev_dbg(&port->dev, "%s: error %d committing decoder\n", in cxl_decoder_commit()
694 dev_name(&cxld->dev), rc); in cxl_decoder_commit()
695 cxld->reset(cxld); in cxl_decoder_commit()
698 cxld->flags |= CXL_DECODER_F_ENABLE; in cxl_decoder_commit()
705 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_decoder_reset()
706 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in cxl_decoder_reset()
707 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_decoder_reset()
708 int id = cxld->id; in cxl_decoder_reset()
711 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) in cxl_decoder_reset()
714 if (port->commit_end != id) { in cxl_decoder_reset()
715 dev_dbg(&port->dev, in cxl_decoder_reset()
717 dev_name(&cxld->dev), port->id, port->commit_end); in cxl_decoder_reset()
718 return -EBUSY; in cxl_decoder_reset()
732 port->commit_end--; in cxl_decoder_reset()
733 cxld->flags &= ~CXL_DECODER_F_ENABLE; in cxl_decoder_reset()
736 if (is_endpoint_decoder(&cxld->dev)) { in cxl_decoder_reset()
739 cxled = to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_reset()
740 cxled->state = CXL_DECODER_STATE_MANUAL; in cxl_decoder_reset()
755 return -EOPNOTSUPP; in cxl_setup_hdm_decoder_from_dvsec()
757 cxled = to_cxl_endpoint_decoder(&cxld->dev); in cxl_setup_hdm_decoder_from_dvsec()
758 len = range_len(&info->dvsec_range[which]); in cxl_setup_hdm_decoder_from_dvsec()
760 return -ENOENT; in cxl_setup_hdm_decoder_from_dvsec()
762 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in cxl_setup_hdm_decoder_from_dvsec()
763 cxld->commit = NULL; in cxl_setup_hdm_decoder_from_dvsec()
764 cxld->reset = NULL; in cxl_setup_hdm_decoder_from_dvsec()
765 cxld->hpa_range = info->dvsec_range[which]; in cxl_setup_hdm_decoder_from_dvsec()
771 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; in cxl_setup_hdm_decoder_from_dvsec()
772 port->commit_end = cxld->id; in cxl_setup_hdm_decoder_from_dvsec()
776 dev_err(&port->dev, in cxl_setup_hdm_decoder_from_dvsec()
777 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", in cxl_setup_hdm_decoder_from_dvsec()
778 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); in cxl_setup_hdm_decoder_from_dvsec()
782 cxled->state = CXL_DECODER_STATE_AUTO; in cxl_setup_hdm_decoder_from_dvsec()
814 cxld->commit = cxl_decoder_commit; in init_hdm_decoder()
815 cxld->reset = cxl_decoder_reset; in init_hdm_decoder()
820 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", in init_hdm_decoder()
821 port->id, cxld->id); in init_hdm_decoder()
822 return -ENXIO; in init_hdm_decoder()
826 cxled = to_cxl_endpoint_decoder(&cxld->dev); in init_hdm_decoder()
827 cxld->hpa_range = (struct range) { in init_hdm_decoder()
829 .end = base + size - 1, in init_hdm_decoder()
834 cxld->flags |= CXL_DECODER_F_ENABLE; in init_hdm_decoder()
836 cxld->flags |= CXL_DECODER_F_LOCK; in init_hdm_decoder()
838 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
840 cxld->target_type = CXL_DECODER_DEVMEM; in init_hdm_decoder()
843 if (cxld->id != cxl_num_decoders_committed(port)) { in init_hdm_decoder()
844 dev_warn(&port->dev, in init_hdm_decoder()
846 port->id, cxld->id); in init_hdm_decoder()
847 return -ENXIO; in init_hdm_decoder()
851 dev_warn(&port->dev, in init_hdm_decoder()
853 port->id, cxld->id); in init_hdm_decoder()
854 return -ENXIO; in init_hdm_decoder()
856 port->commit_end = cxld->id; in init_hdm_decoder()
860 struct cxl_dev_state *cxlds = cxlmd->cxlds; in init_hdm_decoder()
866 if (cxlds->type == CXL_DEVTYPE_CLASSMEM) in init_hdm_decoder()
867 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
869 cxld->target_type = CXL_DECODER_DEVMEM; in init_hdm_decoder()
872 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
876 cxld->target_type == CXL_DECODER_HOSTONLYMEM) { in init_hdm_decoder()
882 &cxld->interleave_ways); in init_hdm_decoder()
884 dev_warn(&port->dev, in init_hdm_decoder()
886 port->id, cxld->id, ctrl); in init_hdm_decoder()
890 &cxld->interleave_granularity); in init_hdm_decoder()
894 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", in init_hdm_decoder()
895 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, in init_hdm_decoder()
896 cxld->interleave_ways, cxld->interleave_granularity); in init_hdm_decoder()
902 for (i = 0; i < cxld->interleave_ways; i++) in init_hdm_decoder()
911 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); in init_hdm_decoder()
913 dev_err(&port->dev, in init_hdm_decoder()
915 port->id, cxld->id, size, cxld->interleave_ways); in init_hdm_decoder()
916 return -ENXIO; in init_hdm_decoder()
923 dev_err(&port->dev, in init_hdm_decoder()
924 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", in init_hdm_decoder()
925 port->id, cxld->id, *dpa_base, in init_hdm_decoder()
926 *dpa_base + dpa_size + skip - 1, rc); in init_hdm_decoder()
931 cxled->state = CXL_DECODER_STATE_AUTO; in init_hdm_decoder()
938 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_settle_decoders()
947 * be careful about trusting the "not-committed" status until the commit in cxl_settle_decoders()
952 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { in cxl_settle_decoders()
959 if (committed != cxlhdm->decoder_count) in cxl_settle_decoders()
964 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
971 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in devm_cxl_enumerate_decoders()
972 struct cxl_port *port = cxlhdm->port; in devm_cxl_enumerate_decoders()
978 for (i = 0; i < cxlhdm->decoder_count; i++) { in devm_cxl_enumerate_decoders()
980 int rc, target_count = cxlhdm->target_count; in devm_cxl_enumerate_decoders()
988 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
990 port->id, i); in devm_cxl_enumerate_decoders()
993 cxld = &cxled->cxld; in devm_cxl_enumerate_decoders()
999 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1001 port->id, i); in devm_cxl_enumerate_decoders()
1004 cxld = &cxlsd->cxld; in devm_cxl_enumerate_decoders()
1010 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1012 port->id, i); in devm_cxl_enumerate_decoders()
1013 put_device(&cxld->dev); in devm_cxl_enumerate_decoders()
1018 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1019 "Failed to add decoder%d.%d\n", port->id, i); in devm_cxl_enumerate_decoders()