Lines Matching +full:port +full:- +full:id

1 // SPDX-License-Identifier: GPL-2.0-only
15 * instances per CXL port and per CXL endpoint. Define common helpers
21 static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, in add_hdm_decoder() argument
28 put_device(&cxld->dev); in add_hdm_decoder()
29 dev_err(&port->dev, "Failed to add decoder\n"); in add_hdm_decoder()
33 rc = cxl_decoder_autoremove(&port->dev, cxld); in add_hdm_decoder()
37 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev)); in add_hdm_decoder()
44 * single ported host-bridges need not publish a decoder capability when a
49 int devm_cxl_add_passthrough_decoder(struct cxl_port *port) in devm_cxl_add_passthrough_decoder() argument
55 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in devm_cxl_add_passthrough_decoder()
61 cxlhdm->interleave_mask = ~0U; in devm_cxl_add_passthrough_decoder()
62 cxlhdm->iw_cap_mask = ~0UL; in devm_cxl_add_passthrough_decoder()
64 cxlsd = cxl_switch_decoder_alloc(port, 1); in devm_cxl_add_passthrough_decoder()
68 device_lock_assert(&port->dev); in devm_cxl_add_passthrough_decoder()
70 xa_for_each(&port->dports, index, dport) in devm_cxl_add_passthrough_decoder()
72 single_port_map[0] = dport->port_id; in devm_cxl_add_passthrough_decoder()
74 return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); in devm_cxl_add_passthrough_decoder()
82 hdm_cap = readl(cxlhdm->regs.hdm_decoder + CXL_HDM_DECODER_CAP_OFFSET); in parse_hdm_decoder_caps()
83 cxlhdm->decoder_count = cxl_hdm_decoder_count(hdm_cap); in parse_hdm_decoder_caps()
84 cxlhdm->target_count = in parse_hdm_decoder_caps()
87 cxlhdm->interleave_mask |= GENMASK(11, 8); in parse_hdm_decoder_caps()
89 cxlhdm->interleave_mask |= GENMASK(14, 12); in parse_hdm_decoder_caps()
90 cxlhdm->iw_cap_mask = BIT(1) | BIT(2) | BIT(4) | BIT(8); in parse_hdm_decoder_caps()
92 cxlhdm->iw_cap_mask |= BIT(3) | BIT(6) | BIT(12); in parse_hdm_decoder_caps()
94 cxlhdm->iw_cap_mask |= BIT(16); in parse_hdm_decoder_caps()
107 cxlhdm = dev_get_drvdata(&info->port->dev); in should_emulate_decoders()
108 hdm = cxlhdm->regs.hdm_decoder; in should_emulate_decoders()
117 if (!info->mem_enabled) in should_emulate_decoders()
124 for (i = 0; i < cxlhdm->decoder_count; i++) { in should_emulate_decoders()
126 dev_dbg(&info->port->dev, in should_emulate_decoders()
128 info->port->id, i, in should_emulate_decoders()
142 * devm_cxl_setup_hdm - map HDM decoder component registers
143 * @port: cxl_port to map
146 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, in devm_cxl_setup_hdm() argument
149 struct cxl_register_map *reg_map = &port->reg_map; in devm_cxl_setup_hdm()
150 struct device *dev = &port->dev; in devm_cxl_setup_hdm()
156 return ERR_PTR(-ENOMEM); in devm_cxl_setup_hdm()
157 cxlhdm->port = port; in devm_cxl_setup_hdm()
161 if (reg_map->resource == CXL_RESOURCE_NONE) { in devm_cxl_setup_hdm()
162 if (!info || !info->mem_enabled) { in devm_cxl_setup_hdm()
164 return ERR_PTR(-ENXIO); in devm_cxl_setup_hdm()
167 cxlhdm->decoder_count = info->ranges; in devm_cxl_setup_hdm()
171 if (!reg_map->component_map.hdm_decoder.valid) { in devm_cxl_setup_hdm()
172 dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); in devm_cxl_setup_hdm()
174 return ERR_PTR(-ENODEV); in devm_cxl_setup_hdm()
177 rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, in devm_cxl_setup_hdm()
185 if (cxlhdm->decoder_count == 0) { in devm_cxl_setup_hdm()
187 return ERR_PTR(-ENXIO); in devm_cxl_setup_hdm()
195 dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, in devm_cxl_setup_hdm()
196 info->ranges > 1 ? "s" : ""); in devm_cxl_setup_hdm()
197 cxlhdm->decoder_count = info->ranges; in devm_cxl_setup_hdm()
206 unsigned long long start = r->start, end = r->end; in __cxl_dpa_debug()
208 seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, in __cxl_dpa_debug()
209 r->name); in __cxl_dpa_debug()
217 for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { in cxl_dpa_debug()
219 for (p2 = p1->child; p2; p2 = p2->sibling) in cxl_dpa_debug()
225 /* See request_skip() kernel-doc */
231 const resource_size_t skip_end = skip_base + skip_len - 1; in __adjust_skip()
233 for (int i = 0; i < cxlds->nr_partitions; i++) { in __adjust_skip()
234 const struct resource *part_res = &cxlds->part[i].res; in __adjust_skip()
237 adjust_start = max(skip_base, part_res->start); in __adjust_skip()
238 adjust_end = min(skip_end, part_res->end); in __adjust_skip()
243 size = adjust_end - adjust_start + 1; in __adjust_skip()
246 __release_region(&cxlds->dpa_res, adjust_start, size); in __adjust_skip()
247 else if (!__request_region(&cxlds->dpa_res, adjust_start, size, in __adjust_skip()
249 return adjust_start - skip_base; in __adjust_skip()
258 * port ->remove() callback (like an endpoint decoder sysfs attribute)
263 struct cxl_port *port = cxled_to_port(cxled); in __cxl_dpa_release() local
264 struct cxl_dev_state *cxlds = cxlmd->cxlds; in __cxl_dpa_release()
265 struct resource *res = cxled->dpa_res; in __cxl_dpa_release()
271 skip_start = res->start - cxled->skip; in __cxl_dpa_release()
272 __release_region(&cxlds->dpa_res, res->start, resource_size(res)); in __cxl_dpa_release()
273 if (cxled->skip) in __cxl_dpa_release()
274 release_skip(cxlds, skip_start, cxled->skip); in __cxl_dpa_release()
275 cxled->skip = 0; in __cxl_dpa_release()
276 cxled->dpa_res = NULL; in __cxl_dpa_release()
277 put_device(&cxled->cxld.dev); in __cxl_dpa_release()
278 port->hdm_end--; in __cxl_dpa_release()
288 * Must be called from context that will not race port device
293 struct cxl_port *port = cxled_to_port(cxled); in devm_cxl_dpa_release() local
296 devm_remove_action(&port->dev, cxl_dpa_release, cxled); in devm_cxl_dpa_release()
301 * request_skip() - Track DPA 'skip' in @cxlds->dpa_res resource tree
307 * DPA 'skip' arises from out-of-sequence DPA allocation events relative
318 * de-allocated in reverse order (see cxl_dpa_free(), or natural devm
319 * unwind order from forced in-order allocation).
321 * If @cxlds->nr_partitions was guaranteed to be <= 2 then the 'skip'
323 * @cxlds->nr_partitions may be > 2 it results in cases where the 'skip'
325 * all of partition[N-1]" to support allocating from partition[N]. That
327 * within @cxlds->dpa_res whereby 'skip' requests need to be divided by
330 * @cxlds->dpa_res.
338 dev_name(&cxled->cxld.dev)); in request_skip()
343 dev_dbg(cxlds->dev, in request_skip()
345 dev_name(&cxled->cxld.dev), &skip_base, &skip_len, &skipped); in request_skip()
349 return -EBUSY; in request_skip()
357 struct cxl_port *port = cxled_to_port(cxled); in __cxl_dpa_reserve() local
358 struct cxl_dev_state *cxlds = cxlmd->cxlds; in __cxl_dpa_reserve()
359 struct device *dev = &port->dev; in __cxl_dpa_reserve()
367 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
368 return -EINVAL; in __cxl_dpa_reserve()
371 if (cxled->dpa_res) { in __cxl_dpa_reserve()
373 port->id, cxled->cxld.id, cxled->dpa_res); in __cxl_dpa_reserve()
374 return -EBUSY; in __cxl_dpa_reserve()
377 if (port->hdm_end + 1 != cxled->cxld.id) { in __cxl_dpa_reserve()
384 dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, in __cxl_dpa_reserve()
385 cxled->cxld.id, port->id, port->hdm_end + 1); in __cxl_dpa_reserve()
386 return -EBUSY; in __cxl_dpa_reserve()
390 rc = request_skip(cxlds, cxled, base - skipped, skipped); in __cxl_dpa_reserve()
394 res = __request_region(&cxlds->dpa_res, base, len, in __cxl_dpa_reserve()
395 dev_name(&cxled->cxld.dev), 0); in __cxl_dpa_reserve()
398 port->id, cxled->cxld.id); in __cxl_dpa_reserve()
400 release_skip(cxlds, base - skipped, skipped); in __cxl_dpa_reserve()
401 return -EBUSY; in __cxl_dpa_reserve()
403 cxled->dpa_res = res; in __cxl_dpa_reserve()
404 cxled->skip = skipped; in __cxl_dpa_reserve()
407 * When allocating new capacity, ->part is already set, when in __cxl_dpa_reserve()
408 * discovering decoder settings at initial enumeration, ->part in __cxl_dpa_reserve()
411 if (cxled->part < 0) in __cxl_dpa_reserve()
412 for (int i = 0; cxlds->nr_partitions; i++) in __cxl_dpa_reserve()
413 if (resource_contains(&cxlds->part[i].res, res)) { in __cxl_dpa_reserve()
414 cxled->part = i; in __cxl_dpa_reserve()
418 if (cxled->part < 0) in __cxl_dpa_reserve()
420 port->id, cxled->cxld.id, res); in __cxl_dpa_reserve()
422 port->hdm_end++; in __cxl_dpa_reserve()
423 get_device(&cxled->cxld.dev); in __cxl_dpa_reserve()
436 .end = start + size - 1, in add_dpa_res()
440 dev_dbg(dev, "DPA(%s): no capacity\n", res->name); in add_dpa_res()
445 dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, in add_dpa_res()
450 dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); in add_dpa_res()
470 struct device *dev = cxlds->dev; in cxl_dpa_setup()
474 if (cxlds->nr_partitions) in cxl_dpa_setup()
475 return -EBUSY; in cxl_dpa_setup()
477 if (!info->size || !info->nr_partitions) { in cxl_dpa_setup()
478 cxlds->dpa_res = DEFINE_RES_MEM(0, 0); in cxl_dpa_setup()
479 cxlds->nr_partitions = 0; in cxl_dpa_setup()
483 cxlds->dpa_res = DEFINE_RES_MEM(0, info->size); in cxl_dpa_setup()
485 for (int i = 0; i < info->nr_partitions; i++) { in cxl_dpa_setup()
486 const struct cxl_dpa_part_info *part = &info->part[i]; in cxl_dpa_setup()
489 cxlds->part[i].perf.qos_class = CXL_QOS_CLASS_INVALID; in cxl_dpa_setup()
490 cxlds->part[i].mode = part->mode; in cxl_dpa_setup()
494 const struct cxl_dpa_part_info *prev = &info->part[i - 1]; in cxl_dpa_setup()
496 if (prev->range.end + 1 != part->range.start) in cxl_dpa_setup()
497 return -EINVAL; in cxl_dpa_setup()
499 rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->part[i].res, in cxl_dpa_setup()
500 part->range.start, range_len(&part->range), in cxl_dpa_setup()
501 cxl_mode_name(part->mode)); in cxl_dpa_setup()
504 cxlds->nr_partitions++; in cxl_dpa_setup()
515 struct cxl_port *port = cxled_to_port(cxled); in devm_cxl_dpa_reserve() local
525 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); in devm_cxl_dpa_reserve()
532 if (cxled->dpa_res) in cxl_dpa_size()
533 return resource_size(cxled->dpa_res); in cxl_dpa_size()
540 resource_size_t base = -1; in cxl_dpa_resource_start()
543 if (cxled->dpa_res) in cxl_dpa_resource_start()
544 base = cxled->dpa_res->start; in cxl_dpa_resource_start()
551 struct cxl_port *port = cxled_to_port(cxled); in cxl_dpa_free() local
552 struct device *dev = &cxled->cxld.dev; in cxl_dpa_free()
555 if (!cxled->dpa_res) in cxl_dpa_free()
557 if (cxled->cxld.region) { in cxl_dpa_free()
559 dev_name(&cxled->cxld.region->dev)); in cxl_dpa_free()
560 return -EBUSY; in cxl_dpa_free()
562 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in cxl_dpa_free()
564 return -EBUSY; in cxl_dpa_free()
566 if (cxled->cxld.id != port->hdm_end) { in cxl_dpa_free()
567 dev_dbg(dev, "expected decoder%d.%d\n", port->id, in cxl_dpa_free()
568 port->hdm_end); in cxl_dpa_free()
569 return -EBUSY; in cxl_dpa_free()
580 struct cxl_dev_state *cxlds = cxlmd->cxlds; in cxl_dpa_set_part()
581 struct device *dev = &cxled->cxld.dev; in cxl_dpa_set_part()
585 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) in cxl_dpa_set_part()
586 return -EBUSY; in cxl_dpa_set_part()
588 for (part = 0; part < cxlds->nr_partitions; part++) in cxl_dpa_set_part()
589 if (cxlds->part[part].mode == mode) in cxl_dpa_set_part()
592 if (part >= cxlds->nr_partitions) { in cxl_dpa_set_part()
594 return -EINVAL; in cxl_dpa_set_part()
597 if (!resource_size(&cxlds->part[part].res)) { in cxl_dpa_set_part()
599 return -ENXIO; in cxl_dpa_set_part()
602 cxled->part = part; in cxl_dpa_set_part()
609 struct cxl_dev_state *cxlds = cxlmd->cxlds; in __cxl_dpa_alloc()
610 struct device *dev = &cxled->cxld.dev; in __cxl_dpa_alloc()
617 if (cxled->cxld.region) { in __cxl_dpa_alloc()
619 dev_name(&cxled->cxld.region->dev)); in __cxl_dpa_alloc()
620 return -EBUSY; in __cxl_dpa_alloc()
623 if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { in __cxl_dpa_alloc()
625 return -EBUSY; in __cxl_dpa_alloc()
628 part = cxled->part; in __cxl_dpa_alloc()
631 return -EBUSY; in __cxl_dpa_alloc()
634 res = &cxlds->part[part].res; in __cxl_dpa_alloc()
635 for (p = res->child, last = NULL; p; p = p->sibling) in __cxl_dpa_alloc()
638 start = last->end + 1; in __cxl_dpa_alloc()
640 start = res->start; in __cxl_dpa_alloc()
651 for (int i = part; i; i--) { in __cxl_dpa_alloc()
652 prev = &cxlds->part[i - 1].res; in __cxl_dpa_alloc()
653 for (p = prev->child, last = NULL; p; p = p->sibling) in __cxl_dpa_alloc()
656 skip_start = last->end + 1; in __cxl_dpa_alloc()
659 skip_start = prev->start; in __cxl_dpa_alloc()
662 avail = res->end - start + 1; in __cxl_dpa_alloc()
666 skip = res->start - skip_start; in __cxl_dpa_alloc()
670 res->name, &avail); in __cxl_dpa_alloc()
671 return -ENOSPC; in __cxl_dpa_alloc()
679 struct cxl_port *port = cxled_to_port(cxled); in cxl_dpa_alloc() local
686 return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); in cxl_dpa_alloc()
698 if (WARN_ONCE(ways_to_eiw(cxld->interleave_ways, &eiw), in cxld_set_interleave()
699 "invalid interleave_ways: %d\n", cxld->interleave_ways)) in cxld_set_interleave()
701 if (WARN_ONCE(granularity_to_eig(cxld->interleave_granularity, &eig), in cxld_set_interleave()
703 cxld->interleave_granularity)) in cxld_set_interleave()
714 !!(cxld->target_type == CXL_DECODER_HOSTONLYMEM), in cxld_set_type()
720 struct cxl_dport **t = &cxlsd->target[0]; in cxlsd_set_targets()
721 int ways = cxlsd->cxld.interleave_ways; in cxlsd_set_targets()
723 *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); in cxlsd_set_targets()
725 *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); in cxlsd_set_targets()
727 *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); in cxlsd_set_targets()
729 *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); in cxlsd_set_targets()
731 *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); in cxlsd_set_targets()
733 *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); in cxlsd_set_targets()
735 *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); in cxlsd_set_targets()
737 *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); in cxlsd_set_targets()
746 static int cxld_await_commit(void __iomem *hdm, int id) in cxld_await_commit() argument
752 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxld_await_commit()
755 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxld_await_commit()
756 return -EIO; in cxld_await_commit()
763 return -ETIMEDOUT; in cxld_await_commit()
768 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_decoder_commit() local
769 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in cxl_decoder_commit()
770 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_decoder_commit()
771 int id = cxld->id, rc; in cxl_decoder_commit() local
775 if (cxld->flags & CXL_DECODER_F_ENABLE) in cxl_decoder_commit()
778 if (cxl_num_decoders_committed(port) != id) { in cxl_decoder_commit()
779 dev_dbg(&port->dev, in cxl_decoder_commit()
781 dev_name(&cxld->dev), port->id, in cxl_decoder_commit()
782 cxl_num_decoders_committed(port)); in cxl_decoder_commit()
783 return -EBUSY; in cxl_decoder_commit()
788 * support the sanitize operation, make sure sanitize is not in-flight. in cxl_decoder_commit()
790 if (is_endpoint_decoder(&cxld->dev)) { in cxl_decoder_commit()
792 to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_commit()
795 to_cxl_memdev_state(cxlmd->cxlds); in cxl_decoder_commit()
797 if (mds && mds->security.sanitize_active) { in cxl_decoder_commit()
798 dev_dbg(&cxlmd->dev, in cxl_decoder_commit()
800 dev_name(&cxld->dev)); in cxl_decoder_commit()
801 return -EBUSY; in cxl_decoder_commit()
807 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); in cxl_decoder_commit()
810 base = cxld->hpa_range.start; in cxl_decoder_commit()
811 size = range_len(&cxld->hpa_range); in cxl_decoder_commit()
813 writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); in cxl_decoder_commit()
814 writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); in cxl_decoder_commit()
815 writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); in cxl_decoder_commit()
816 writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); in cxl_decoder_commit()
818 if (is_switch_decoder(&cxld->dev)) { in cxl_decoder_commit()
820 to_cxl_switch_decoder(&cxld->dev); in cxl_decoder_commit()
821 void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); in cxl_decoder_commit()
822 void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); in cxl_decoder_commit()
830 to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_commit()
831 void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); in cxl_decoder_commit()
832 void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); in cxl_decoder_commit()
834 writel(upper_32_bits(cxled->skip), sk_hi); in cxl_decoder_commit()
835 writel(lower_32_bits(cxled->skip), sk_lo); in cxl_decoder_commit()
838 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxl_decoder_commit()
841 port->commit_end++; in cxl_decoder_commit()
842 rc = cxld_await_commit(hdm, cxld->id); in cxl_decoder_commit()
844 dev_dbg(&port->dev, "%s: error %d committing decoder\n", in cxl_decoder_commit()
845 dev_name(&cxld->dev), rc); in cxl_decoder_commit()
846 cxld->reset(cxld); in cxl_decoder_commit()
849 cxld->flags |= CXL_DECODER_F_ENABLE; in cxl_decoder_commit()
856 struct cxl_port *port = to_cxl_port(dev->parent); in commit_reap() local
863 if (port->commit_end == cxld->id && in commit_reap()
864 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { in commit_reap()
865 port->commit_end--; in commit_reap()
866 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", in commit_reap()
867 dev_name(&cxld->dev), port->commit_end); in commit_reap()
875 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_port_commit_reap() local
881 * decoders that were pinned allocated by out-of-order release. in cxl_port_commit_reap()
883 port->commit_end--; in cxl_port_commit_reap()
884 dev_dbg(&port->dev, "reap: %s commit_end: %d\n", dev_name(&cxld->dev), in cxl_port_commit_reap()
885 port->commit_end); in cxl_port_commit_reap()
886 device_for_each_child_reverse_from(&port->dev, &cxld->dev, NULL, in cxl_port_commit_reap()
893 struct cxl_port *port = to_cxl_port(cxld->dev.parent); in cxl_decoder_reset() local
894 struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); in cxl_decoder_reset()
895 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_decoder_reset()
896 int id = cxld->id; in cxl_decoder_reset() local
899 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) in cxl_decoder_reset()
902 if (port->commit_end == id) in cxl_decoder_reset()
905 dev_dbg(&port->dev, in cxl_decoder_reset()
907 dev_name(&cxld->dev), port->id, port->commit_end); in cxl_decoder_reset()
910 ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxl_decoder_reset()
912 writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); in cxl_decoder_reset()
914 writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); in cxl_decoder_reset()
915 writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); in cxl_decoder_reset()
916 writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); in cxl_decoder_reset()
917 writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); in cxl_decoder_reset()
920 cxld->flags &= ~CXL_DECODER_F_ENABLE; in cxl_decoder_reset()
923 if (is_endpoint_decoder(&cxld->dev)) { in cxl_decoder_reset()
926 cxled = to_cxl_endpoint_decoder(&cxld->dev); in cxl_decoder_reset()
927 cxled->state = CXL_DECODER_STATE_MANUAL; in cxl_decoder_reset()
932 struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base, in cxl_setup_hdm_decoder_from_dvsec() argument
939 if (!is_cxl_endpoint(port)) in cxl_setup_hdm_decoder_from_dvsec()
940 return -EOPNOTSUPP; in cxl_setup_hdm_decoder_from_dvsec()
942 cxled = to_cxl_endpoint_decoder(&cxld->dev); in cxl_setup_hdm_decoder_from_dvsec()
943 len = range_len(&info->dvsec_range[which]); in cxl_setup_hdm_decoder_from_dvsec()
945 return -ENOENT; in cxl_setup_hdm_decoder_from_dvsec()
947 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in cxl_setup_hdm_decoder_from_dvsec()
948 cxld->commit = NULL; in cxl_setup_hdm_decoder_from_dvsec()
949 cxld->reset = NULL; in cxl_setup_hdm_decoder_from_dvsec()
950 cxld->hpa_range = info->dvsec_range[which]; in cxl_setup_hdm_decoder_from_dvsec()
956 cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; in cxl_setup_hdm_decoder_from_dvsec()
957 port->commit_end = cxld->id; in cxl_setup_hdm_decoder_from_dvsec()
961 dev_err(&port->dev, in cxl_setup_hdm_decoder_from_dvsec()
962 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", in cxl_setup_hdm_decoder_from_dvsec()
963 port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); in cxl_setup_hdm_decoder_from_dvsec()
967 cxled->state = CXL_DECODER_STATE_AUTO; in cxl_setup_hdm_decoder_from_dvsec()
972 static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, in init_hdm_decoder() argument
988 return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base, in init_hdm_decoder()
999 cxld->commit = cxl_decoder_commit; in init_hdm_decoder()
1000 cxld->reset = cxl_decoder_reset; in init_hdm_decoder()
1005 dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", in init_hdm_decoder()
1006 port->id, cxld->id); in init_hdm_decoder()
1007 return -ENXIO; in init_hdm_decoder()
1011 cxled = to_cxl_endpoint_decoder(&cxld->dev); in init_hdm_decoder()
1012 cxld->hpa_range = (struct range) { in init_hdm_decoder()
1014 .end = base + size - 1, in init_hdm_decoder()
1019 cxld->flags |= CXL_DECODER_F_ENABLE; in init_hdm_decoder()
1021 cxld->flags |= CXL_DECODER_F_LOCK; in init_hdm_decoder()
1023 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
1025 cxld->target_type = CXL_DECODER_DEVMEM; in init_hdm_decoder()
1028 if (cxld->id != cxl_num_decoders_committed(port)) { in init_hdm_decoder()
1029 dev_warn(&port->dev, in init_hdm_decoder()
1031 port->id, cxld->id); in init_hdm_decoder()
1032 return -ENXIO; in init_hdm_decoder()
1036 dev_warn(&port->dev, in init_hdm_decoder()
1038 port->id, cxld->id); in init_hdm_decoder()
1039 return -ENXIO; in init_hdm_decoder()
1041 port->commit_end = cxld->id; in init_hdm_decoder()
1045 struct cxl_dev_state *cxlds = cxlmd->cxlds; in init_hdm_decoder()
1051 if (cxlds->type == CXL_DEVTYPE_CLASSMEM) in init_hdm_decoder()
1052 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
1054 cxld->target_type = CXL_DECODER_DEVMEM; in init_hdm_decoder()
1057 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in init_hdm_decoder()
1061 cxld->target_type == CXL_DECODER_HOSTONLYMEM) { in init_hdm_decoder()
1067 &cxld->interleave_ways); in init_hdm_decoder()
1069 dev_warn(&port->dev, in init_hdm_decoder()
1071 port->id, cxld->id, ctrl); in init_hdm_decoder()
1075 &cxld->interleave_granularity); in init_hdm_decoder()
1077 dev_warn(&port->dev, in init_hdm_decoder()
1079 port->id, cxld->id, ctrl); in init_hdm_decoder()
1083 dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", in init_hdm_decoder()
1084 port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, in init_hdm_decoder()
1085 cxld->interleave_ways, cxld->interleave_granularity); in init_hdm_decoder()
1091 for (i = 0; i < cxld->interleave_ways; i++) in init_hdm_decoder()
1100 dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); in init_hdm_decoder()
1102 dev_err(&port->dev, in init_hdm_decoder()
1104 port->id, cxld->id, size, cxld->interleave_ways); in init_hdm_decoder()
1105 return -ENXIO; in init_hdm_decoder()
1112 dev_err(&port->dev, in init_hdm_decoder()
1113 "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", in init_hdm_decoder()
1114 port->id, cxld->id, *dpa_base, in init_hdm_decoder()
1115 *dpa_base + dpa_size + skip - 1, rc); in init_hdm_decoder()
1120 cxled->state = CXL_DECODER_STATE_AUTO; in init_hdm_decoder()
1127 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in cxl_settle_decoders()
1136 * be careful about trusting the "not-committed" status until the commit in cxl_settle_decoders()
1141 for (i = 0, committed = 0; i < cxlhdm->decoder_count; i++) { in cxl_settle_decoders()
1148 if (committed != cxlhdm->decoder_count) in cxl_settle_decoders()
1153 * devm_cxl_enumerate_decoders - add decoder objects per HDM register set
1160 void __iomem *hdm = cxlhdm->regs.hdm_decoder; in devm_cxl_enumerate_decoders()
1161 struct cxl_port *port = cxlhdm->port; in devm_cxl_enumerate_decoders() local
1167 for (i = 0; i < cxlhdm->decoder_count; i++) { in devm_cxl_enumerate_decoders()
1169 int rc, target_count = cxlhdm->target_count; in devm_cxl_enumerate_decoders()
1172 if (is_cxl_endpoint(port)) { in devm_cxl_enumerate_decoders()
1175 cxled = cxl_endpoint_decoder_alloc(port); in devm_cxl_enumerate_decoders()
1177 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1179 port->id, i); in devm_cxl_enumerate_decoders()
1182 cxld = &cxled->cxld; in devm_cxl_enumerate_decoders()
1186 cxlsd = cxl_switch_decoder_alloc(port, target_count); in devm_cxl_enumerate_decoders()
1188 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1190 port->id, i); in devm_cxl_enumerate_decoders()
1193 cxld = &cxlsd->cxld; in devm_cxl_enumerate_decoders()
1196 rc = init_hdm_decoder(port, cxld, target_map, hdm, i, in devm_cxl_enumerate_decoders()
1199 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1201 port->id, i); in devm_cxl_enumerate_decoders()
1202 put_device(&cxld->dev); in devm_cxl_enumerate_decoders()
1205 rc = add_hdm_decoder(port, cxld, target_map); in devm_cxl_enumerate_decoders()
1207 dev_warn(&port->dev, in devm_cxl_enumerate_decoders()
1208 "Failed to add decoder%d.%d\n", port->id, i); in devm_cxl_enumerate_decoders()