Lines Matching +full:add +full:- +full:pmem
1 // SPDX-License-Identifier: GPL-2.0-only
27 * CXL Specification 3.0 Table 9-22
37 for (i = 0; i < cximsd->nr_maps; i++) in cxl_xor_calc_n()
38 n |= (hweight64(hpa & cximsd->xormaps[i]) & 1) << i; in cxl_xor_calc_n()
40 /* IW: 3,6,12 add a modulo calculation to 'n' */ in cxl_xor_calc_n()
43 return -1; in cxl_xor_calc_n()
52 struct cxl_cxims_data *cximsd = cxlrd->platform_data; in cxl_hb_xor()
53 struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; in cxl_hb_xor()
54 struct cxl_decoder *cxld = &cxlsd->cxld; in cxl_hb_xor()
55 int ig = cxld->interleave_granularity; in cxl_hb_xor()
56 int iw = cxld->interleave_ways; in cxl_hb_xor()
60 if (dev_WARN_ONCE(&cxld->dev, in cxl_hb_xor()
61 cxld->interleave_ways != cxlsd->nr_targets, in cxl_hb_xor()
65 hpa = cxlrd->res->start + pos * ig; in cxl_hb_xor()
74 return cxlrd->cxlsd.target[n]; in cxl_hb_xor()
87 struct cxl_root_decoder *cxlrd = ctx->cxlrd; in cxl_parse_cxims()
88 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in cxl_parse_cxims()
89 struct device *dev = ctx->dev; in cxl_parse_cxims()
94 rc = eig_to_granularity(cxims->hbig, &hbig); in cxl_parse_cxims()
99 if (hbig != cxld->interleave_granularity) in cxl_parse_cxims()
103 if (is_power_of_2(cxld->interleave_ways)) in cxl_parse_cxims()
105 nr_maps = ilog2(cxld->interleave_ways); in cxl_parse_cxims()
108 nr_maps = ilog2(cxld->interleave_ways / 3); in cxl_parse_cxims()
110 if (cxims->nr_xormaps < nr_maps) { in cxl_parse_cxims()
112 cxims->nr_xormaps, nr_maps); in cxl_parse_cxims()
113 return -ENXIO; in cxl_parse_cxims()
119 return -ENOMEM; in cxl_parse_cxims()
120 cximsd->nr_maps = nr_maps; in cxl_parse_cxims()
121 memcpy(cximsd->xormaps, cxims->xormap_list, in cxl_parse_cxims()
122 nr_maps * sizeof(*cximsd->xormaps)); in cxl_parse_cxims()
123 cxlrd->platform_data = cximsd; in cxl_parse_cxims()
152 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO && in cxl_acpi_cfmws_verify()
153 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { in cxl_acpi_cfmws_verify()
155 cfmws->interleave_arithmetic); in cxl_acpi_cfmws_verify()
156 return -EINVAL; in cxl_acpi_cfmws_verify()
159 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) { in cxl_acpi_cfmws_verify()
161 return -EINVAL; in cxl_acpi_cfmws_verify()
164 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) { in cxl_acpi_cfmws_verify()
166 return -EINVAL; in cxl_acpi_cfmws_verify()
169 rc = eiw_to_ways(cfmws->interleave_ways, &ways); in cxl_acpi_cfmws_verify()
172 cfmws->interleave_ways); in cxl_acpi_cfmws_verify()
173 return -EINVAL; in cxl_acpi_cfmws_verify()
178 if (cfmws->header.length < expected_len) { in cxl_acpi_cfmws_verify()
180 cfmws->header.length, expected_len); in cxl_acpi_cfmws_verify()
181 return -EINVAL; in cxl_acpi_cfmws_verify()
184 if (cfmws->header.length > expected_len) in cxl_acpi_cfmws_verify()
186 cfmws->header.length, expected_len); in cxl_acpi_cfmws_verify()
203 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
209 * Return: number of QTG IDs returned, or -errno for errors
222 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency }, in cxl_acpi_evaluate_qtg_dsm()
223 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency }, in cxl_acpi_evaluate_qtg_dsm()
224 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth }, in cxl_acpi_evaluate_qtg_dsm()
225 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth }, in cxl_acpi_evaluate_qtg_dsm()
239 return -EINVAL; in cxl_acpi_evaluate_qtg_dsm()
243 return -ENXIO; in cxl_acpi_evaluate_qtg_dsm()
245 if (out_obj->type != ACPI_TYPE_PACKAGE) { in cxl_acpi_evaluate_qtg_dsm()
246 rc = -ENXIO; in cxl_acpi_evaluate_qtg_dsm()
251 obj = &out_obj->package.elements[0]; in cxl_acpi_evaluate_qtg_dsm()
252 if (obj->type != ACPI_TYPE_INTEGER) { in cxl_acpi_evaluate_qtg_dsm()
253 rc = -ENXIO; in cxl_acpi_evaluate_qtg_dsm()
257 max_qtg = obj->integer.value; in cxl_acpi_evaluate_qtg_dsm()
260 pkg_entries = out_obj->package.count; in cxl_acpi_evaluate_qtg_dsm()
267 obj = &out_obj->package.elements[1]; in cxl_acpi_evaluate_qtg_dsm()
268 if (obj->type != ACPI_TYPE_PACKAGE) { in cxl_acpi_evaluate_qtg_dsm()
269 rc = -ENXIO; in cxl_acpi_evaluate_qtg_dsm()
273 pkg_entries = obj->package.count; in cxl_acpi_evaluate_qtg_dsm()
278 out_buf = &obj->package.elements[i]; in cxl_acpi_evaluate_qtg_dsm()
279 if (out_buf->type != ACPI_TYPE_INTEGER) { in cxl_acpi_evaluate_qtg_dsm()
280 rc = -ENXIO; in cxl_acpi_evaluate_qtg_dsm()
284 qtg_id = out_buf->integer.value; in cxl_acpi_evaluate_qtg_dsm()
302 struct device *dev = cxl_root->port.uport_dev; in cxl_acpi_qos_class()
306 return -ENODEV; in cxl_acpi_qos_class()
310 return -ENODEV; in cxl_acpi_qos_class()
323 struct cxl_port *root_port = ctx->root_port; in __cxl_parse_cfmws()
324 struct resource *cxl_res = ctx->cxl_res; in __cxl_parse_cfmws()
327 struct device *dev = ctx->dev; in __cxl_parse_cfmws()
336 dev_err(dev, "CFMWS range %#llx-%#llx not registered\n", in __cxl_parse_cfmws()
337 cfmws->base_hpa, in __cxl_parse_cfmws()
338 cfmws->base_hpa + cfmws->window_size - 1); in __cxl_parse_cfmws()
342 rc = eiw_to_ways(cfmws->interleave_ways, &ways); in __cxl_parse_cfmws()
345 rc = eig_to_granularity(cfmws->granularity, &ig); in __cxl_parse_cfmws()
349 target_map[i] = cfmws->interleave_targets[i]; in __cxl_parse_cfmws()
353 return -ENOMEM; in __cxl_parse_cfmws()
355 res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++); in __cxl_parse_cfmws()
356 if (!res->name) in __cxl_parse_cfmws()
359 res->start = cfmws->base_hpa; in __cxl_parse_cfmws()
360 res->end = cfmws->base_hpa + cfmws->window_size - 1; in __cxl_parse_cfmws()
361 res->flags = IORESOURCE_MEM; in __cxl_parse_cfmws()
363 /* add to the local resource tracking to establish a sort order */ in __cxl_parse_cfmws()
368 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) in __cxl_parse_cfmws()
377 cxld = &cxlrd->cxlsd.cxld; in __cxl_parse_cfmws()
378 cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); in __cxl_parse_cfmws()
379 cxld->target_type = CXL_DECODER_HOSTONLYMEM; in __cxl_parse_cfmws()
380 cxld->hpa_range = (struct range) { in __cxl_parse_cfmws()
381 .start = res->start, in __cxl_parse_cfmws()
382 .end = res->end, in __cxl_parse_cfmws()
384 cxld->interleave_ways = ways; in __cxl_parse_cfmws()
391 cxld->interleave_granularity = ig; in __cxl_parse_cfmws()
393 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) { in __cxl_parse_cfmws()
403 if (!cxlrd->platform_data) { in __cxl_parse_cfmws()
405 rc = -EINVAL; in __cxl_parse_cfmws()
411 cxlrd->qos_class = cfmws->qtg_id; in __cxl_parse_cfmws()
416 put_device(&cxld->dev); in __cxl_parse_cfmws()
422 kfree(res->name); in __cxl_parse_cfmws()
425 return -ENOMEM; in __cxl_parse_cfmws()
433 struct device *dev = ctx->dev; in cxl_parse_cfmws()
439 "Failed to add decode range: [%#llx - %#llx] (%d)\n", in cxl_parse_cfmws()
440 cfmws->base_hpa, in cxl_parse_cfmws()
441 cfmws->base_hpa + cfmws->window_size - 1, rc); in cxl_parse_cfmws()
443 dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n", in cxl_parse_cfmws()
444 phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa, in cxl_parse_cfmws()
445 cfmws->base_hpa + cfmws->window_size - 1); in cxl_parse_cfmws()
456 if (!acpi_pci_find_root(adev->handle)) in to_cxl_host_bridge()
478 if (ctx->base != CXL_RESOURCE_NONE) in cxl_get_chbs_iter()
483 if (ctx->uid != chbs->uid) in cxl_get_chbs_iter()
486 ctx->cxl_version = chbs->cxl_version; in cxl_get_chbs_iter()
487 if (!chbs->base) in cxl_get_chbs_iter()
490 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 && in cxl_get_chbs_iter()
491 chbs->length != CXL_RCRB_SIZE) in cxl_get_chbs_iter()
494 ctx->base = chbs->base; in cxl_get_chbs_iter()
505 rc = acpi_evaluate_integer(hb->handle, METHOD_NAME__UID, NULL, &uid); in cxl_get_chbs()
508 return -ENOENT; in cxl_get_chbs()
531 return -EINVAL; in get_genport_coordinates()
533 rc = acpi_get_genport_coordinates(uid, &dport->hb_coord); in get_genport_coordinates()
538 dport->hb_coord.read_latency *= 1000; in get_genport_coordinates()
539 dport->hb_coord.write_latency *= 1000; in get_genport_coordinates()
553 struct device *host = root_port->dev.parent; in add_host_bridge_dport()
575 pci_root = acpi_pci_find_root(hb->handle); in add_host_bridge_dport()
576 bridge = pci_root->bus->bridge; in add_host_bridge_dport()
610 struct device *host = root_port->dev.parent; in add_host_bridge_uport()
623 pci_root = acpi_pci_find_root(hb->handle); in add_host_bridge_uport()
624 bridge = pci_root->bus->bridge; in add_host_bridge_uport()
631 if (dport->rch) { in add_host_bridge_uport()
651 rc = devm_cxl_register_pci_bus(host, bridge, pci_root->bus); in add_host_bridge_uport()
669 struct device *host = root_port->dev.parent; in add_root_nvdimm_bridge()
675 if (!(cxld->flags & CXL_DECODER_F_PMEM)) in add_root_nvdimm_bridge()
680 dev_dbg(host, "failed to register pmem\n"); in add_root_nvdimm_bridge()
683 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev), in add_root_nvdimm_bridge()
684 dev_name(&cxl_nvb->dev)); in add_root_nvdimm_bridge()
697 kfree(res->name); in del_cxl_resource()
703 priv->desc = (unsigned long) pub; in cxl_set_public_resource()
708 return (struct resource *) priv->desc; in cxl_get_public_resource()
715 for (res = cxl->child; res; res = next) { in remove_cxl_resources()
718 next = res->sibling; in remove_cxl_resources()
731 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
734 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
740 * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
741 * |--------------- "System RAM" -------------|
753 for (res = cxl_res->child; res; res = next) { in add_cxl_resources()
756 return -ENOMEM; in add_cxl_resources()
757 new->name = res->name; in add_cxl_resources()
758 new->start = res->start; in add_cxl_resources()
759 new->end = res->end; in add_cxl_resources()
760 new->flags = IORESOURCE_MEM; in add_cxl_resources()
761 new->desc = IORES_DESC_CXL; in add_cxl_resources()
771 next = res->sibling; in add_cxl_resources()
774 struct resource *_next = next->sibling; in add_cxl_resources()
780 next->start = new->end + 1; in add_cxl_resources()
794 for (p = cxl_res->child; p; p = p->sibling) { in pair_cxl_resource()
796 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; in pair_cxl_resource()
798 .start = cxld->hpa_range.start, in pair_cxl_resource()
799 .end = cxld->hpa_range.end, in pair_cxl_resource()
804 cxlrd->res = cxl_get_public_resource(p); in pair_cxl_resource()
818 struct device *host = &pdev->dev; in cxl_acpi_probe()
822 device_lock_set_class(&pdev->dev, &cxl_root_key); in cxl_acpi_probe()
823 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class, in cxl_acpi_probe()
824 &pdev->dev); in cxl_acpi_probe()
830 return -ENOMEM; in cxl_acpi_probe()
831 cxl_res->name = "CXL mem"; in cxl_acpi_probe()
832 cxl_res->start = 0; in cxl_acpi_probe()
833 cxl_res->end = -1; in cxl_acpi_probe()
834 cxl_res->flags = IORESOURCE_MEM; in cxl_acpi_probe()
839 root_port = &cxl_root->port; in cxl_acpi_probe()
841 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, in cxl_acpi_probe()
857 return -ENXIO; in cxl_acpi_probe()
867 device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource); in cxl_acpi_probe()
870 * Root level scanned with host-bridge as dports, now scan host-bridges in cxl_acpi_probe()
871 * for their role as CXL uports to their CXL-capable PCIe Root Ports. in cxl_acpi_probe()
873 rc = bus_for_each_dev(adev->dev.bus, NULL, root_port, in cxl_acpi_probe()
879 rc = device_for_each_child(&root_port->dev, root_port, in cxl_acpi_probe()
884 /* In case PCI is scanned before ACPI re-trigger memdev attach */ in cxl_acpi_probe()