1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
4 *
5 * Author: Jianmin Lv <lvjianmin@loongson.cn>
6 * Huacai Chen <chenhuacai@loongson.cn>
7 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8 */
9
10 #include <linux/init.h>
11 #include <linux/acpi.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/memblock.h>
15 #include <linux/of_fdt.h>
16 #include <linux/serial_core.h>
17 #include <asm/io.h>
18 #include <asm/numa.h>
19 #include <asm/loongson.h>
20
21 int acpi_disabled;
22 EXPORT_SYMBOL(acpi_disabled);
23 int acpi_noirq;
24 int acpi_pci_disabled;
25 EXPORT_SYMBOL(acpi_pci_disabled);
26 int acpi_strict = 1; /* We have no workarounds on LoongArch */
27 int num_processors;
28 int disabled_cpus;
29
30 u64 acpi_saved_sp;
31
32 #define MAX_CORE_PIC 256
33
34 #define PREFIX "ACPI: "
35
36 struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
37
__acpi_map_table(unsigned long phys,unsigned long size)38 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
39 {
40
41 if (!phys || !size)
42 return NULL;
43
44 return early_memremap(phys, size);
45 }
__acpi_unmap_table(void __iomem * map,unsigned long size)46 void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
47 {
48 if (!map || !size)
49 return;
50
51 early_memunmap(map, size);
52 }
53
acpi_os_ioremap(acpi_physical_address phys,acpi_size size)54 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
55 {
56 if (!memblock_is_memory(phys))
57 return ioremap(phys, size);
58 else
59 return ioremap_cache(phys, size);
60 }
61
62 #ifdef CONFIG_SMP
set_processor_mask(u32 id,u32 flags)63 static int set_processor_mask(u32 id, u32 flags)
64 {
65
66 int cpu, cpuid = id;
67
68 if (num_processors >= nr_cpu_ids) {
69 pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
70 " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
71
72 return -ENODEV;
73
74 }
75 if (cpuid == loongson_sysconf.boot_cpu_id)
76 cpu = 0;
77 else
78 cpu = cpumask_next_zero(-1, cpu_present_mask);
79
80 if (flags & ACPI_MADT_ENABLED) {
81 num_processors++;
82 set_cpu_possible(cpu, true);
83 set_cpu_present(cpu, true);
84 __cpu_number_map[cpuid] = cpu;
85 __cpu_logical_map[cpu] = cpuid;
86 } else
87 disabled_cpus++;
88
89 return cpu;
90 }
91 #endif
92
93 static int __init
acpi_parse_processor(union acpi_subtable_headers * header,const unsigned long end)94 acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
95 {
96 struct acpi_madt_core_pic *processor = NULL;
97
98 processor = (struct acpi_madt_core_pic *)header;
99 if (BAD_MADT_ENTRY(processor, end))
100 return -EINVAL;
101
102 acpi_table_print_madt_entry(&header->common);
103 #ifdef CONFIG_SMP
104 acpi_core_pic[processor->core_id] = *processor;
105 set_processor_mask(processor->core_id, processor->flags);
106 #endif
107
108 return 0;
109 }
110
111 static int __init
acpi_parse_eio_master(union acpi_subtable_headers * header,const unsigned long end)112 acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
113 {
114 static int core = 0;
115 struct acpi_madt_eio_pic *eiointc = NULL;
116
117 eiointc = (struct acpi_madt_eio_pic *)header;
118 if (BAD_MADT_ENTRY(eiointc, end))
119 return -EINVAL;
120
121 core = eiointc->node * CORES_PER_EIO_NODE;
122 set_bit(core, &(loongson_sysconf.cores_io_master));
123
124 return 0;
125 }
126
acpi_process_madt(void)127 static void __init acpi_process_madt(void)
128 {
129 #ifdef CONFIG_SMP
130 int i;
131
132 for (i = 0; i < NR_CPUS; i++) {
133 __cpu_number_map[i] = -1;
134 __cpu_logical_map[i] = -1;
135 }
136 #endif
137 acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
138 acpi_parse_processor, MAX_CORE_PIC);
139
140 acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
141 acpi_parse_eio_master, MAX_IO_PICS);
142
143 loongson_sysconf.nr_cpus = num_processors;
144 }
145
146 int pptt_enabled;
147
parse_acpi_topology(void)148 int __init parse_acpi_topology(void)
149 {
150 int cpu, topology_id;
151
152 for_each_possible_cpu(cpu) {
153 topology_id = find_acpi_cpu_topology(cpu, 0);
154 if (topology_id < 0) {
155 pr_warn("Invalid BIOS PPTT\n");
156 return -ENOENT;
157 }
158
159 if (acpi_pptt_cpu_is_thread(cpu) <= 0)
160 cpu_data[cpu].core = topology_id;
161 else {
162 topology_id = find_acpi_cpu_topology(cpu, 1);
163 if (topology_id < 0)
164 return -ENOENT;
165
166 cpu_data[cpu].core = topology_id;
167 }
168 }
169
170 pptt_enabled = 1;
171
172 return 0;
173 }
174
175 #ifndef CONFIG_SUSPEND
176 int (*acpi_suspend_lowlevel)(void);
177 #else
178 int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
179 #endif
180
acpi_boot_table_init(void)181 void __init acpi_boot_table_init(void)
182 {
183 /*
184 * If acpi_disabled, bail out
185 */
186 if (acpi_disabled)
187 goto fdt_earlycon;
188
189 /*
190 * Initialize the ACPI boot-time table parser.
191 */
192 if (acpi_table_init()) {
193 disable_acpi();
194 goto fdt_earlycon;
195 }
196
197 loongson_sysconf.boot_cpu_id = read_csr_cpuid();
198
199 /*
200 * Process the Multiple APIC Description Table (MADT), if present
201 */
202 acpi_process_madt();
203
204 /* Do not enable ACPI SPCR console by default */
205 acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
206
207 return;
208
209 fdt_earlycon:
210 if (earlycon_acpi_spcr_enable)
211 early_init_dt_scan_chosen_stdout();
212 }
213
214 #ifdef CONFIG_ACPI_NUMA
215
setup_node(int pxm)216 static __init int setup_node(int pxm)
217 {
218 return acpi_map_pxm_to_node(pxm);
219 }
220
221 /*
222 * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
223 * I/O localities since SRAT does not list them. I/O localities are
224 * not supported at this point.
225 */
226 unsigned int numa_distance_cnt;
227
get_numa_distances_cnt(struct acpi_table_slit * slit)228 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
229 {
230 return slit->locality_count;
231 }
232
numa_set_distance(int from,int to,int distance)233 void __init numa_set_distance(int from, int to, int distance)
234 {
235 if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
236 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
237 from, to, distance);
238 return;
239 }
240
241 node_distances[from][to] = distance;
242 }
243
244 /* Callback for Proximity Domain -> CPUID mapping */
245 void __init
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity * pa)246 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
247 {
248 int pxm, node;
249
250 if (srat_disabled())
251 return;
252 if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
253 bad_srat();
254 return;
255 }
256 if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
257 return;
258 pxm = pa->proximity_domain_lo;
259 if (acpi_srat_revision >= 2) {
260 pxm |= (pa->proximity_domain_hi[0] << 8);
261 pxm |= (pa->proximity_domain_hi[1] << 16);
262 pxm |= (pa->proximity_domain_hi[2] << 24);
263 }
264 node = setup_node(pxm);
265 if (node < 0) {
266 pr_err("SRAT: Too many proximity domains %x\n", pxm);
267 bad_srat();
268 return;
269 }
270
271 if (pa->apic_id >= CONFIG_NR_CPUS) {
272 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
273 pxm, pa->apic_id, node);
274 return;
275 }
276
277 early_numa_add_cpu(pa->apic_id, node);
278
279 set_cpuid_to_node(pa->apic_id, node);
280 node_set(node, numa_nodes_parsed);
281 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
282 }
283
284 #endif
285
arch_reserve_mem_area(acpi_physical_address addr,size_t size)286 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
287 {
288 memblock_reserve(addr, size);
289 }
290
291 #ifdef CONFIG_ACPI_HOTPLUG_CPU
292
293 #include <acpi/processor.h>
294
acpi_map_cpu2node(acpi_handle handle,int cpu,int physid)295 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
296 {
297 #ifdef CONFIG_ACPI_NUMA
298 int nid;
299
300 nid = acpi_get_node(handle);
301 if (nid != NUMA_NO_NODE) {
302 set_cpuid_to_node(physid, nid);
303 node_set(nid, numa_nodes_parsed);
304 set_cpu_numa_node(cpu, nid);
305 cpumask_set_cpu(cpu, cpumask_of_node(nid));
306 }
307 #endif
308 return 0;
309 }
310
acpi_map_cpu(acpi_handle handle,phys_cpuid_t physid,u32 acpi_id,int * pcpu)311 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
312 {
313 int cpu;
314
315 cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
316 if (cpu < 0) {
317 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
318 return cpu;
319 }
320
321 acpi_map_cpu2node(handle, cpu, physid);
322
323 *pcpu = cpu;
324
325 return 0;
326 }
327 EXPORT_SYMBOL(acpi_map_cpu);
328
acpi_unmap_cpu(int cpu)329 int acpi_unmap_cpu(int cpu)
330 {
331 #ifdef CONFIG_ACPI_NUMA
332 set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
333 #endif
334 set_cpu_present(cpu, false);
335 num_processors--;
336
337 pr_info("cpu%d hot remove!\n", cpu);
338
339 return 0;
340 }
341 EXPORT_SYMBOL(acpi_unmap_cpu);
342
343 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
344