1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  acpi_numa.c - ACPI NUMA support
4  *
5  *  Copyright (C) 2002 Takayoshi Kochi <t-kochi@bq.jp.nec.com>
6  */
7 
8 #define pr_fmt(fmt) "ACPI: " fmt
9 
10 #include <linux/module.h>
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/errno.h>
15 #include <linux/acpi.h>
16 #include <linux/memblock.h>
17 #include <linux/numa.h>
18 #include <linux/nodemask.h>
19 #include <linux/topology.h>
20 #include <linux/numa_memblks.h>
21 #include <linux/string_choices.h>
22 
23 static nodemask_t nodes_found_map = NODE_MASK_NONE;
24 
25 /* maps to convert between proximity domain and logical node ID */
26 static int pxm_to_node_map[MAX_PXM_DOMAINS]
27 			= { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
28 static int node_to_pxm_map[MAX_NUMNODES]
29 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
30 
31 unsigned char acpi_srat_revision __initdata;
32 static int acpi_numa __initdata;
33 
34 static int last_real_pxm;
35 
disable_srat(void)36 void __init disable_srat(void)
37 {
38 	acpi_numa = -1;
39 }
40 
pxm_to_node(int pxm)41 int pxm_to_node(int pxm)
42 {
43 	if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
44 		return NUMA_NO_NODE;
45 	return pxm_to_node_map[pxm];
46 }
47 EXPORT_SYMBOL(pxm_to_node);
48 
node_to_pxm(int node)49 int node_to_pxm(int node)
50 {
51 	if (node < 0)
52 		return PXM_INVAL;
53 	return node_to_pxm_map[node];
54 }
55 EXPORT_SYMBOL_GPL(node_to_pxm);
56 
__acpi_map_pxm_to_node(int pxm,int node)57 static void __acpi_map_pxm_to_node(int pxm, int node)
58 {
59 	if (pxm_to_node_map[pxm] == NUMA_NO_NODE || node < pxm_to_node_map[pxm])
60 		pxm_to_node_map[pxm] = node;
61 	if (node_to_pxm_map[node] == PXM_INVAL || pxm < node_to_pxm_map[node])
62 		node_to_pxm_map[node] = pxm;
63 }
64 
acpi_map_pxm_to_node(int pxm)65 int acpi_map_pxm_to_node(int pxm)
66 {
67 	int node;
68 
69 	if (pxm < 0 || pxm >= MAX_PXM_DOMAINS || numa_off)
70 		return NUMA_NO_NODE;
71 
72 	node = pxm_to_node_map[pxm];
73 
74 	if (node == NUMA_NO_NODE) {
75 		node = first_unset_node(nodes_found_map);
76 		if (node >= MAX_NUMNODES)
77 			return NUMA_NO_NODE;
78 		__acpi_map_pxm_to_node(pxm, node);
79 		node_set(node, nodes_found_map);
80 	}
81 
82 	return node;
83 }
84 EXPORT_SYMBOL(acpi_map_pxm_to_node);
85 
86 #ifdef CONFIG_NUMA_EMU
87 /*
88  * Take max_nid - 1 fake-numa nodes into account in both
89  * pxm_to_node_map()/node_to_pxm_map[] tables.
90  */
fix_pxm_node_maps(int max_nid)91 int __init fix_pxm_node_maps(int max_nid)
92 {
93 	static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata
94 			= { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE };
95 	static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata
96 			= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
97 	int i, j, index = -1, count = 0;
98 	nodemask_t nodes_to_enable;
99 
100 	if (numa_off)
101 		return -1;
102 
103 	/* no or incomplete node/PXM mapping set, nothing to do */
104 	if (srat_disabled())
105 		return 0;
106 
107 	/* find fake nodes PXM mapping */
108 	for (i = 0; i < MAX_NUMNODES; i++) {
109 		if (node_to_pxm_map[i] != PXM_INVAL) {
110 			for (j = 0; j <= max_nid; j++) {
111 				if ((emu_nid_to_phys[j] == i) &&
112 				    WARN(node_to_pxm_map_copy[j] != PXM_INVAL,
113 					 "Node %d is already binded to PXM %d\n",
114 					 j, node_to_pxm_map_copy[j]))
115 					return -1;
116 				if (emu_nid_to_phys[j] == i) {
117 					node_to_pxm_map_copy[j] =
118 						node_to_pxm_map[i];
119 					if (j > index)
120 						index = j;
121 					count++;
122 				}
123 			}
124 		}
125 	}
126 	if (index == -1) {
127 		pr_debug("No node/PXM mapping has been set\n");
128 		/* nothing more to be done */
129 		return 0;
130 	}
131 	if (WARN(index != max_nid, "%d max nid  when expected %d\n",
132 		      index, max_nid))
133 		return -1;
134 
135 	nodes_clear(nodes_to_enable);
136 
137 	/* map phys nodes not used for fake nodes */
138 	for (i = 0; i < MAX_NUMNODES; i++) {
139 		if (node_to_pxm_map[i] != PXM_INVAL) {
140 			for (j = 0; j <= max_nid; j++)
141 				if (emu_nid_to_phys[j] == i)
142 					break;
143 			/* fake nodes PXM mapping has been done */
144 			if (j <= max_nid)
145 				continue;
146 			/* find first hole */
147 			for (j = 0;
148 			     j < MAX_NUMNODES &&
149 				 node_to_pxm_map_copy[j] != PXM_INVAL;
150 			     j++)
151 			;
152 			if (WARN(j == MAX_NUMNODES,
153 			    "Number of nodes exceeds MAX_NUMNODES\n"))
154 				return -1;
155 			node_to_pxm_map_copy[j] = node_to_pxm_map[i];
156 			node_set(j, nodes_to_enable);
157 			count++;
158 		}
159 	}
160 
161 	/* creating reverse mapping in pxm_to_node_map[] */
162 	for (i = 0; i < MAX_NUMNODES; i++)
163 		if (node_to_pxm_map_copy[i] != PXM_INVAL &&
164 		    pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE)
165 			pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i;
166 
167 	/* overwrite with new mapping */
168 	for (i = 0; i < MAX_NUMNODES; i++) {
169 		node_to_pxm_map[i] = node_to_pxm_map_copy[i];
170 		pxm_to_node_map[i] = pxm_to_node_map_copy[i];
171 	}
172 
173 	/* enable other nodes found in PXM for hotplug */
174 	nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed);
175 
176 	pr_debug("found %d total number of nodes\n", count);
177 	return 0;
178 }
179 #endif
180 
181 static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header * header)182 acpi_table_print_srat_entry(struct acpi_subtable_header *header)
183 {
184 	switch (header->type) {
185 	case ACPI_SRAT_TYPE_CPU_AFFINITY:
186 		{
187 			struct acpi_srat_cpu_affinity *p =
188 			    (struct acpi_srat_cpu_affinity *)header;
189 			pr_debug("SRAT Processor (id[0x%02x] eid[0x%02x]) in proximity domain %d %s\n",
190 				 p->apic_id, p->local_sapic_eid,
191 				 p->proximity_domain_lo,
192 				 str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
193 		}
194 		break;
195 
196 	case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
197 		{
198 			struct acpi_srat_mem_affinity *p =
199 			    (struct acpi_srat_mem_affinity *)header;
200 			pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
201 				 (unsigned long long)p->base_address,
202 				 (unsigned long long)p->length,
203 				 p->proximity_domain,
204 				 str_enabled_disabled(p->flags & ACPI_SRAT_MEM_ENABLED),
205 				 (p->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
206 				 " hot-pluggable" : "",
207 				 (p->flags & ACPI_SRAT_MEM_NON_VOLATILE) ?
208 				 " non-volatile" : "");
209 		}
210 		break;
211 
212 	case ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY:
213 		{
214 			struct acpi_srat_x2apic_cpu_affinity *p =
215 			    (struct acpi_srat_x2apic_cpu_affinity *)header;
216 			pr_debug("SRAT Processor (x2apicid[0x%08x]) in proximity domain %d %s\n",
217 				 p->apic_id,
218 				 p->proximity_domain,
219 				 str_enabled_disabled(p->flags & ACPI_SRAT_CPU_ENABLED));
220 		}
221 		break;
222 
223 	case ACPI_SRAT_TYPE_GICC_AFFINITY:
224 		{
225 			struct acpi_srat_gicc_affinity *p =
226 			    (struct acpi_srat_gicc_affinity *)header;
227 			pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
228 				 p->acpi_processor_uid,
229 				 p->proximity_domain,
230 				 str_enabled_disabled(p->flags & ACPI_SRAT_GICC_ENABLED));
231 		}
232 		break;
233 
234 	case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
235 	{
236 		struct acpi_srat_generic_affinity *p =
237 			(struct acpi_srat_generic_affinity *)header;
238 
239 		if (p->device_handle_type == 0) {
240 			/*
241 			 * For pci devices this may be the only place they
242 			 * are assigned a proximity domain
243 			 */
244 			pr_debug("SRAT Generic Initiator(Seg:%u BDF:%u) in proximity domain %d %s\n",
245 				 *(u16 *)(&p->device_handle[0]),
246 				 *(u16 *)(&p->device_handle[2]),
247 				 p->proximity_domain,
248 				 str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
249 		} else {
250 			/*
251 			 * In this case we can rely on the device having a
252 			 * proximity domain reference
253 			 */
254 			pr_debug("SRAT Generic Initiator(HID=%.8s UID=%.4s) in proximity domain %d %s\n",
255 				(char *)(&p->device_handle[0]),
256 				(char *)(&p->device_handle[8]),
257 				p->proximity_domain,
258 				str_enabled_disabled(p->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED));
259 		}
260 	}
261 	break;
262 
263 	case ACPI_SRAT_TYPE_RINTC_AFFINITY:
264 		{
265 			struct acpi_srat_rintc_affinity *p =
266 			    (struct acpi_srat_rintc_affinity *)header;
267 			pr_debug("SRAT Processor (acpi id[0x%04x]) in proximity domain %d %s\n",
268 				 p->acpi_processor_uid,
269 				 p->proximity_domain,
270 				 str_enabled_disabled(p->flags & ACPI_SRAT_RINTC_ENABLED));
271 		}
272 		break;
273 
274 	default:
275 		pr_warn("Found unsupported SRAT entry (type = 0x%x)\n",
276 			header->type);
277 		break;
278 	}
279 }
280 
281 /*
282  * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
283  * up the NUMA heuristics which wants the local node to have a smaller
284  * distance than the others.
285  * Do some quick checks here and only use the SLIT if it passes.
286  */
slit_valid(struct acpi_table_slit * slit)287 static int __init slit_valid(struct acpi_table_slit *slit)
288 {
289 	int i, j;
290 	int d = slit->locality_count;
291 	for (i = 0; i < d; i++) {
292 		for (j = 0; j < d; j++) {
293 			u8 val = slit->entry[d*i + j];
294 			if (i == j) {
295 				if (val != LOCAL_DISTANCE)
296 					return 0;
297 			} else if (val <= LOCAL_DISTANCE)
298 				return 0;
299 		}
300 	}
301 	return 1;
302 }
303 
bad_srat(void)304 void __init bad_srat(void)
305 {
306 	pr_err("SRAT: SRAT not used.\n");
307 	disable_srat();
308 }
309 
srat_disabled(void)310 int __init srat_disabled(void)
311 {
312 	return acpi_numa < 0;
313 }
314 
numa_fill_memblks(u64 start,u64 end)315 __weak int __init numa_fill_memblks(u64 start, u64 end)
316 {
317 	return NUMA_NO_MEMBLK;
318 }
319 
320 /*
321  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
322  * I/O localities since SRAT does not list them.  I/O localities are
323  * not supported at this point.
324  */
acpi_parse_slit(struct acpi_table_header * table)325 static int __init acpi_parse_slit(struct acpi_table_header *table)
326 {
327 	struct acpi_table_slit *slit = (struct acpi_table_slit *)table;
328 	int i, j;
329 
330 	if (!slit_valid(slit)) {
331 		pr_info("SLIT table looks invalid. Not used.\n");
332 		return -EINVAL;
333 	}
334 
335 	for (i = 0; i < slit->locality_count; i++) {
336 		const int from_node = pxm_to_node(i);
337 
338 		if (from_node == NUMA_NO_NODE)
339 			continue;
340 
341 		for (j = 0; j < slit->locality_count; j++) {
342 			const int to_node = pxm_to_node(j);
343 
344 			if (to_node == NUMA_NO_NODE)
345 				continue;
346 
347 			numa_set_distance(from_node, to_node,
348 				slit->entry[slit->locality_count * i + j]);
349 		}
350 	}
351 
352 	return 0;
353 }
354 
355 static int parsed_numa_memblks __initdata;
356 
357 static int __init
acpi_parse_memory_affinity(union acpi_subtable_headers * header,const unsigned long table_end)358 acpi_parse_memory_affinity(union acpi_subtable_headers *header,
359 			   const unsigned long table_end)
360 {
361 	struct acpi_srat_mem_affinity *ma;
362 	u64 start, end;
363 	u32 hotpluggable;
364 	int node, pxm;
365 
366 	ma = (struct acpi_srat_mem_affinity *)header;
367 
368 	acpi_table_print_srat_entry(&header->common);
369 
370 	if (srat_disabled())
371 		return 0;
372 	if (ma->header.length < sizeof(struct acpi_srat_mem_affinity)) {
373 		pr_err("SRAT: Unexpected header length: %d\n",
374 		       ma->header.length);
375 		goto out_err_bad_srat;
376 	}
377 	if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
378 		return 0;
379 	hotpluggable = IS_ENABLED(CONFIG_MEMORY_HOTPLUG) &&
380 		(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE);
381 
382 	start = ma->base_address;
383 	end = start + ma->length;
384 	pxm = ma->proximity_domain;
385 	if (acpi_srat_revision <= 1)
386 		pxm &= 0xff;
387 
388 	node = acpi_map_pxm_to_node(pxm);
389 	if (node == NUMA_NO_NODE) {
390 		pr_err("SRAT: Too many proximity domains.\n");
391 		goto out_err_bad_srat;
392 	}
393 
394 	if (numa_add_memblk(node, start, end) < 0) {
395 		pr_err("SRAT: Failed to add memblk to node %u [mem %#010Lx-%#010Lx]\n",
396 		       node, (unsigned long long) start,
397 		       (unsigned long long) end - 1);
398 		goto out_err_bad_srat;
399 	}
400 
401 	node_set(node, numa_nodes_parsed);
402 
403 	pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s%s\n",
404 		node, pxm,
405 		(unsigned long long) start, (unsigned long long) end - 1,
406 		hotpluggable ? " hotplug" : "",
407 		ma->flags & ACPI_SRAT_MEM_NON_VOLATILE ? " non-volatile" : "");
408 
409 	/* Mark hotplug range in memblock. */
410 	if (hotpluggable && memblock_mark_hotplug(start, ma->length))
411 		pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
412 			(unsigned long long)start, (unsigned long long)end - 1);
413 
414 	max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
415 
416 	parsed_numa_memblks++;
417 
418 	return 0;
419 
420 out_err_bad_srat:
421 	/* Just disable SRAT, but do not fail and ignore errors. */
422 	bad_srat();
423 
424 	return 0;
425 }
426 
acpi_parse_cfmws(union acpi_subtable_headers * header,void * arg,const unsigned long table_end)427 static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
428 				   void *arg, const unsigned long table_end)
429 {
430 	struct acpi_cedt_cfmws *cfmws;
431 	int *fake_pxm = arg;
432 	u64 start, end;
433 	int node;
434 
435 	cfmws = (struct acpi_cedt_cfmws *)header;
436 	start = cfmws->base_hpa;
437 	end = cfmws->base_hpa + cfmws->window_size;
438 
439 	/*
440 	 * The SRAT may have already described NUMA details for all,
441 	 * or a portion of, this CFMWS HPA range. Extend the memblks
442 	 * found for any portion of the window to cover the entire
443 	 * window.
444 	 */
445 	if (!numa_fill_memblks(start, end))
446 		return 0;
447 
448 	/* No SRAT description. Create a new node. */
449 	node = acpi_map_pxm_to_node(*fake_pxm);
450 
451 	if (node == NUMA_NO_NODE) {
452 		pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n");
453 		return -EINVAL;
454 	}
455 
456 	if (numa_add_memblk(node, start, end) < 0) {
457 		/* CXL driver must handle the NUMA_NO_NODE case */
458 		pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
459 			node, start, end);
460 	}
461 	node_set(node, numa_nodes_parsed);
462 
463 	/* Set the next available fake_pxm value */
464 	(*fake_pxm)++;
465 	return 0;
466 }
467 
468 void __init __weak
acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity * pa)469 acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
470 {
471 	pr_warn("Found unsupported x2apic [0x%08x] SRAT entry\n", pa->apic_id);
472 }
473 
474 static int __init
acpi_parse_x2apic_affinity(union acpi_subtable_headers * header,const unsigned long end)475 acpi_parse_x2apic_affinity(union acpi_subtable_headers *header,
476 			   const unsigned long end)
477 {
478 	struct acpi_srat_x2apic_cpu_affinity *processor_affinity;
479 
480 	processor_affinity = (struct acpi_srat_x2apic_cpu_affinity *)header;
481 
482 	acpi_table_print_srat_entry(&header->common);
483 
484 	/* let architecture-dependent part to do it */
485 	acpi_numa_x2apic_affinity_init(processor_affinity);
486 
487 	return 0;
488 }
489 
490 static int __init
acpi_parse_processor_affinity(union acpi_subtable_headers * header,const unsigned long end)491 acpi_parse_processor_affinity(union acpi_subtable_headers *header,
492 			      const unsigned long end)
493 {
494 	struct acpi_srat_cpu_affinity *processor_affinity;
495 
496 	processor_affinity = (struct acpi_srat_cpu_affinity *)header;
497 
498 	acpi_table_print_srat_entry(&header->common);
499 
500 	/* let architecture-dependent part to do it */
501 	acpi_numa_processor_affinity_init(processor_affinity);
502 
503 	return 0;
504 }
505 
506 static int __init
acpi_parse_gicc_affinity(union acpi_subtable_headers * header,const unsigned long end)507 acpi_parse_gicc_affinity(union acpi_subtable_headers *header,
508 			 const unsigned long end)
509 {
510 	struct acpi_srat_gicc_affinity *processor_affinity;
511 
512 	processor_affinity = (struct acpi_srat_gicc_affinity *)header;
513 
514 	acpi_table_print_srat_entry(&header->common);
515 
516 	/* let architecture-dependent part to do it */
517 	acpi_numa_gicc_affinity_init(processor_affinity);
518 
519 	return 0;
520 }
521 
522 #if defined(CONFIG_X86) || defined(CONFIG_ARM64)
523 static int __init
acpi_parse_gi_affinity(union acpi_subtable_headers * header,const unsigned long end)524 acpi_parse_gi_affinity(union acpi_subtable_headers *header,
525 		       const unsigned long end)
526 {
527 	struct acpi_srat_generic_affinity *gi_affinity;
528 	int node;
529 
530 	gi_affinity = (struct acpi_srat_generic_affinity *)header;
531 	if (!gi_affinity)
532 		return -EINVAL;
533 	acpi_table_print_srat_entry(&header->common);
534 
535 	if (!(gi_affinity->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
536 		return -EINVAL;
537 
538 	node = acpi_map_pxm_to_node(gi_affinity->proximity_domain);
539 	if (node == NUMA_NO_NODE) {
540 		pr_err("SRAT: Too many proximity domains.\n");
541 		return -EINVAL;
542 	}
543 	node_set(node, numa_nodes_parsed);
544 	node_set_state(node, N_GENERIC_INITIATOR);
545 
546 	return 0;
547 }
548 #else
549 static int __init
acpi_parse_gi_affinity(union acpi_subtable_headers * header,const unsigned long end)550 acpi_parse_gi_affinity(union acpi_subtable_headers *header,
551 		       const unsigned long end)
552 {
553 	return 0;
554 }
555 #endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
556 
557 static int __init
acpi_parse_rintc_affinity(union acpi_subtable_headers * header,const unsigned long end)558 acpi_parse_rintc_affinity(union acpi_subtable_headers *header,
559 			  const unsigned long end)
560 {
561 	struct acpi_srat_rintc_affinity *rintc_affinity;
562 
563 	rintc_affinity = (struct acpi_srat_rintc_affinity *)header;
564 	acpi_table_print_srat_entry(&header->common);
565 
566 	/* let architecture-dependent part to do it */
567 	acpi_numa_rintc_affinity_init(rintc_affinity);
568 
569 	return 0;
570 }
571 
acpi_parse_srat(struct acpi_table_header * table)572 static int __init acpi_parse_srat(struct acpi_table_header *table)
573 {
574 	struct acpi_table_srat *srat = (struct acpi_table_srat *)table;
575 
576 	acpi_srat_revision = srat->header.revision;
577 
578 	/* Real work done in acpi_table_parse_srat below. */
579 
580 	return 0;
581 }
582 
583 static int __init
acpi_table_parse_srat(enum acpi_srat_type id,acpi_tbl_entry_handler handler,unsigned int max_entries)584 acpi_table_parse_srat(enum acpi_srat_type id,
585 		      acpi_tbl_entry_handler handler, unsigned int max_entries)
586 {
587 	return acpi_table_parse_entries(ACPI_SIG_SRAT,
588 					    sizeof(struct acpi_table_srat), id,
589 					    handler, max_entries);
590 }
591 
acpi_numa_init(void)592 int __init acpi_numa_init(void)
593 {
594 	int i, fake_pxm, cnt = 0;
595 
596 	if (acpi_disabled)
597 		return -EINVAL;
598 
599 	/*
600 	 * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
601 	 * SRAT cpu entries could have different order with that in MADT.
602 	 * So go over all cpu entries in SRAT to get apicid to node mapping.
603 	 */
604 
605 	/* SRAT: System Resource Affinity Table */
606 	if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
607 		struct acpi_subtable_proc srat_proc[5];
608 
609 		memset(srat_proc, 0, sizeof(srat_proc));
610 		srat_proc[0].id = ACPI_SRAT_TYPE_CPU_AFFINITY;
611 		srat_proc[0].handler = acpi_parse_processor_affinity;
612 		srat_proc[1].id = ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY;
613 		srat_proc[1].handler = acpi_parse_x2apic_affinity;
614 		srat_proc[2].id = ACPI_SRAT_TYPE_GICC_AFFINITY;
615 		srat_proc[2].handler = acpi_parse_gicc_affinity;
616 		srat_proc[3].id = ACPI_SRAT_TYPE_GENERIC_AFFINITY;
617 		srat_proc[3].handler = acpi_parse_gi_affinity;
618 		srat_proc[4].id = ACPI_SRAT_TYPE_RINTC_AFFINITY;
619 		srat_proc[4].handler = acpi_parse_rintc_affinity;
620 
621 		acpi_table_parse_entries_array(ACPI_SIG_SRAT,
622 					sizeof(struct acpi_table_srat),
623 					srat_proc, ARRAY_SIZE(srat_proc), 0);
624 
625 		cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
626 					    acpi_parse_memory_affinity, 0);
627 	}
628 
629 	/* SLIT: System Locality Information Table */
630 	acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
631 
632 	/*
633 	 * CXL Fixed Memory Window Structures (CFMWS) must be parsed
634 	 * after the SRAT. Create NUMA Nodes for CXL memory ranges that
635 	 * are defined in the CFMWS and not already defined in the SRAT.
636 	 * Initialize a fake_pxm as the first available PXM to emulate.
637 	 */
638 
639 	/* fake_pxm is the next unused PXM value after SRAT parsing */
640 	for (i = 0, fake_pxm = -1; i < MAX_NUMNODES; i++) {
641 		if (node_to_pxm_map[i] > fake_pxm)
642 			fake_pxm = node_to_pxm_map[i];
643 	}
644 	last_real_pxm = fake_pxm;
645 	fake_pxm++;
646 	acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
647 			      &fake_pxm);
648 
649 	if (cnt < 0)
650 		return cnt;
651 	else if (!parsed_numa_memblks)
652 		return -ENOENT;
653 	return 0;
654 }
655 
acpi_node_backed_by_real_pxm(int nid)656 bool acpi_node_backed_by_real_pxm(int nid)
657 {
658 	int pxm = node_to_pxm(nid);
659 
660 	return pxm <= last_real_pxm;
661 }
662 EXPORT_SYMBOL_GPL(acpi_node_backed_by_real_pxm);
663 
acpi_get_pxm(acpi_handle h)664 static int acpi_get_pxm(acpi_handle h)
665 {
666 	unsigned long long pxm;
667 	acpi_status status;
668 	acpi_handle handle;
669 	acpi_handle phandle = h;
670 
671 	do {
672 		handle = phandle;
673 		status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
674 		if (ACPI_SUCCESS(status))
675 			return pxm;
676 		status = acpi_get_parent(handle, &phandle);
677 	} while (ACPI_SUCCESS(status));
678 	return -1;
679 }
680 
acpi_get_node(acpi_handle handle)681 int acpi_get_node(acpi_handle handle)
682 {
683 	int pxm;
684 
685 	pxm = acpi_get_pxm(handle);
686 
687 	return pxm_to_node(pxm);
688 }
689 EXPORT_SYMBOL(acpi_get_node);
690