1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 *
4 * Procedures for interfacing to the RTAS on CHRP machines.
5 *
6 * Peter Bergner, IBM March 2001.
7 * Copyright (C) 2001 IBM.
8 */
9
10 #define pr_fmt(fmt) "rtas: " fmt
11
12 #include <linux/bsearch.h>
13 #include <linux/capability.h>
14 #include <linux/delay.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/kconfig.h>
18 #include <linux/kernel.h>
19 #include <linux/lockdep.h>
20 #include <linux/memblock.h>
21 #include <linux/mutex.h>
22 #include <linux/nospec.h>
23 #include <linux/of.h>
24 #include <linux/of_fdt.h>
25 #include <linux/reboot.h>
26 #include <linux/sched.h>
27 #include <linux/security.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/stdarg.h>
31 #include <linux/syscalls.h>
32 #include <linux/types.h>
33 #include <linux/uaccess.h>
34 #include <linux/xarray.h>
35
36 #include <asm/delay.h>
37 #include <asm/firmware.h>
38 #include <asm/interrupt.h>
39 #include <asm/machdep.h>
40 #include <asm/mmu.h>
41 #include <asm/page.h>
42 #include <asm/rtas-work-area.h>
43 #include <asm/rtas.h>
44 #include <asm/time.h>
45 #include <asm/trace.h>
46 #include <asm/udbg.h>
47
48 struct rtas_filter {
49 /* Indexes into the args buffer, -1 if not used */
50 const int buf_idx1;
51 const int size_idx1;
52 const int buf_idx2;
53 const int size_idx2;
54 /*
55 * Assumed buffer size per the spec if the function does not
56 * have a size parameter, e.g. ibm,errinjct. 0 if unused.
57 */
58 const int fixed_size;
59 };
60
61 /**
62 * struct rtas_function - Descriptor for RTAS functions.
63 *
64 * @token: Value of @name if it exists under the /rtas node.
65 * @name: Function name.
66 * @filter: If non-NULL, invoking this function via the rtas syscall is
67 * generally allowed, and @filter describes constraints on the
68 * arguments. See also @banned_for_syscall_on_le.
69 * @banned_for_syscall_on_le: Set when call via sys_rtas is generally allowed
70 * but specifically restricted on ppc64le. Such
71 * functions are believed to have no users on
72 * ppc64le, and we want to keep it that way. It does
73 * not make sense for this to be set when @filter
74 * is NULL.
75 * @lock: Pointer to an optional dedicated per-function mutex. This
76 * should be set for functions that require multiple calls in
77 * sequence to complete a single operation, and such sequences
78 * will disrupt each other if allowed to interleave. Users of
79 * this function are required to hold the associated lock for
80 * the duration of the call sequence. Add an explanatory
81 * comment to the function table entry if setting this member.
82 */
83 struct rtas_function {
84 s32 token;
85 const bool banned_for_syscall_on_le:1;
86 const char * const name;
87 const struct rtas_filter *filter;
88 struct mutex *lock;
89 };
90
91 /*
92 * Per-function locks for sequence-based RTAS functions.
93 */
94 static DEFINE_MUTEX(rtas_ibm_activate_firmware_lock);
95 static DEFINE_MUTEX(rtas_ibm_get_dynamic_sensor_state_lock);
96 static DEFINE_MUTEX(rtas_ibm_get_indices_lock);
97 static DEFINE_MUTEX(rtas_ibm_lpar_perftools_lock);
98 static DEFINE_MUTEX(rtas_ibm_physical_attestation_lock);
99 static DEFINE_MUTEX(rtas_ibm_set_dynamic_indicator_lock);
100 DEFINE_MUTEX(rtas_ibm_get_vpd_lock);
101
102 static struct rtas_function rtas_function_table[] __ro_after_init = {
103 [RTAS_FNIDX__CHECK_EXCEPTION] = {
104 .name = "check-exception",
105 },
106 [RTAS_FNIDX__DISPLAY_CHARACTER] = {
107 .name = "display-character",
108 .filter = &(const struct rtas_filter) {
109 .buf_idx1 = -1, .size_idx1 = -1,
110 .buf_idx2 = -1, .size_idx2 = -1,
111 },
112 },
113 [RTAS_FNIDX__EVENT_SCAN] = {
114 .name = "event-scan",
115 },
116 [RTAS_FNIDX__FREEZE_TIME_BASE] = {
117 .name = "freeze-time-base",
118 },
119 [RTAS_FNIDX__GET_POWER_LEVEL] = {
120 .name = "get-power-level",
121 .filter = &(const struct rtas_filter) {
122 .buf_idx1 = -1, .size_idx1 = -1,
123 .buf_idx2 = -1, .size_idx2 = -1,
124 },
125 },
126 [RTAS_FNIDX__GET_SENSOR_STATE] = {
127 .name = "get-sensor-state",
128 .filter = &(const struct rtas_filter) {
129 .buf_idx1 = -1, .size_idx1 = -1,
130 .buf_idx2 = -1, .size_idx2 = -1,
131 },
132 },
133 [RTAS_FNIDX__GET_TERM_CHAR] = {
134 .name = "get-term-char",
135 },
136 [RTAS_FNIDX__GET_TIME_OF_DAY] = {
137 .name = "get-time-of-day",
138 .filter = &(const struct rtas_filter) {
139 .buf_idx1 = -1, .size_idx1 = -1,
140 .buf_idx2 = -1, .size_idx2 = -1,
141 },
142 },
143 [RTAS_FNIDX__IBM_ACTIVATE_FIRMWARE] = {
144 .name = "ibm,activate-firmware",
145 .filter = &(const struct rtas_filter) {
146 .buf_idx1 = -1, .size_idx1 = -1,
147 .buf_idx2 = -1, .size_idx2 = -1,
148 },
149 /*
150 * PAPR+ as of v2.13 doesn't explicitly impose any
151 * restriction, but this typically requires multiple
152 * calls before success, and there's no reason to
153 * allow sequences to interleave.
154 */
155 .lock = &rtas_ibm_activate_firmware_lock,
156 },
157 [RTAS_FNIDX__IBM_CBE_START_PTCAL] = {
158 .name = "ibm,cbe-start-ptcal",
159 },
160 [RTAS_FNIDX__IBM_CBE_STOP_PTCAL] = {
161 .name = "ibm,cbe-stop-ptcal",
162 },
163 [RTAS_FNIDX__IBM_CHANGE_MSI] = {
164 .name = "ibm,change-msi",
165 },
166 [RTAS_FNIDX__IBM_CLOSE_ERRINJCT] = {
167 .name = "ibm,close-errinjct",
168 .filter = &(const struct rtas_filter) {
169 .buf_idx1 = -1, .size_idx1 = -1,
170 .buf_idx2 = -1, .size_idx2 = -1,
171 },
172 },
173 [RTAS_FNIDX__IBM_CONFIGURE_BRIDGE] = {
174 .name = "ibm,configure-bridge",
175 },
176 [RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR] = {
177 .name = "ibm,configure-connector",
178 .filter = &(const struct rtas_filter) {
179 .buf_idx1 = 0, .size_idx1 = -1,
180 .buf_idx2 = 1, .size_idx2 = -1,
181 .fixed_size = 4096,
182 },
183 },
184 [RTAS_FNIDX__IBM_CONFIGURE_KERNEL_DUMP] = {
185 .name = "ibm,configure-kernel-dump",
186 },
187 [RTAS_FNIDX__IBM_CONFIGURE_PE] = {
188 .name = "ibm,configure-pe",
189 },
190 [RTAS_FNIDX__IBM_CREATE_PE_DMA_WINDOW] = {
191 .name = "ibm,create-pe-dma-window",
192 },
193 [RTAS_FNIDX__IBM_DISPLAY_MESSAGE] = {
194 .name = "ibm,display-message",
195 .filter = &(const struct rtas_filter) {
196 .buf_idx1 = 0, .size_idx1 = -1,
197 .buf_idx2 = -1, .size_idx2 = -1,
198 },
199 },
200 [RTAS_FNIDX__IBM_ERRINJCT] = {
201 .name = "ibm,errinjct",
202 .filter = &(const struct rtas_filter) {
203 .buf_idx1 = 2, .size_idx1 = -1,
204 .buf_idx2 = -1, .size_idx2 = -1,
205 .fixed_size = 1024,
206 },
207 },
208 [RTAS_FNIDX__IBM_EXTI2C] = {
209 .name = "ibm,exti2c",
210 },
211 [RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO] = {
212 .name = "ibm,get-config-addr-info",
213 },
214 [RTAS_FNIDX__IBM_GET_CONFIG_ADDR_INFO2] = {
215 .name = "ibm,get-config-addr-info2",
216 .filter = &(const struct rtas_filter) {
217 .buf_idx1 = -1, .size_idx1 = -1,
218 .buf_idx2 = -1, .size_idx2 = -1,
219 },
220 },
221 [RTAS_FNIDX__IBM_GET_DYNAMIC_SENSOR_STATE] = {
222 .name = "ibm,get-dynamic-sensor-state",
223 .filter = &(const struct rtas_filter) {
224 .buf_idx1 = 1, .size_idx1 = -1,
225 .buf_idx2 = -1, .size_idx2 = -1,
226 },
227 /*
228 * PAPR+ v2.13 R1–7.3.19–3 is explicit that the OS
229 * must not call ibm,get-dynamic-sensor-state with
230 * different inputs until a non-retry status has been
231 * returned.
232 */
233 .lock = &rtas_ibm_get_dynamic_sensor_state_lock,
234 },
235 [RTAS_FNIDX__IBM_GET_INDICES] = {
236 .name = "ibm,get-indices",
237 .filter = &(const struct rtas_filter) {
238 .buf_idx1 = 2, .size_idx1 = 3,
239 .buf_idx2 = -1, .size_idx2 = -1,
240 },
241 /*
242 * PAPR+ v2.13 R1–7.3.17–2 says that the OS must not
243 * interleave ibm,get-indices call sequences with
244 * different inputs.
245 */
246 .lock = &rtas_ibm_get_indices_lock,
247 },
248 [RTAS_FNIDX__IBM_GET_RIO_TOPOLOGY] = {
249 .name = "ibm,get-rio-topology",
250 },
251 [RTAS_FNIDX__IBM_GET_SYSTEM_PARAMETER] = {
252 .name = "ibm,get-system-parameter",
253 .filter = &(const struct rtas_filter) {
254 .buf_idx1 = 1, .size_idx1 = 2,
255 .buf_idx2 = -1, .size_idx2 = -1,
256 },
257 },
258 [RTAS_FNIDX__IBM_GET_VPD] = {
259 .name = "ibm,get-vpd",
260 .filter = &(const struct rtas_filter) {
261 .buf_idx1 = 0, .size_idx1 = -1,
262 .buf_idx2 = 1, .size_idx2 = 2,
263 },
264 /*
265 * PAPR+ v2.13 R1–7.3.20–4 indicates that sequences
266 * should not be allowed to interleave.
267 */
268 .lock = &rtas_ibm_get_vpd_lock,
269 },
270 [RTAS_FNIDX__IBM_GET_XIVE] = {
271 .name = "ibm,get-xive",
272 },
273 [RTAS_FNIDX__IBM_INT_OFF] = {
274 .name = "ibm,int-off",
275 },
276 [RTAS_FNIDX__IBM_INT_ON] = {
277 .name = "ibm,int-on",
278 },
279 [RTAS_FNIDX__IBM_IO_QUIESCE_ACK] = {
280 .name = "ibm,io-quiesce-ack",
281 },
282 [RTAS_FNIDX__IBM_LPAR_PERFTOOLS] = {
283 .name = "ibm,lpar-perftools",
284 .filter = &(const struct rtas_filter) {
285 .buf_idx1 = 2, .size_idx1 = 3,
286 .buf_idx2 = -1, .size_idx2 = -1,
287 },
288 /*
289 * PAPR+ v2.13 R1–7.3.26–6 says the OS should allow
290 * only one call sequence in progress at a time.
291 */
292 .lock = &rtas_ibm_lpar_perftools_lock,
293 },
294 [RTAS_FNIDX__IBM_MANAGE_FLASH_IMAGE] = {
295 .name = "ibm,manage-flash-image",
296 },
297 [RTAS_FNIDX__IBM_MANAGE_STORAGE_PRESERVATION] = {
298 .name = "ibm,manage-storage-preservation",
299 },
300 [RTAS_FNIDX__IBM_NMI_INTERLOCK] = {
301 .name = "ibm,nmi-interlock",
302 },
303 [RTAS_FNIDX__IBM_NMI_REGISTER] = {
304 .name = "ibm,nmi-register",
305 },
306 [RTAS_FNIDX__IBM_OPEN_ERRINJCT] = {
307 .name = "ibm,open-errinjct",
308 .filter = &(const struct rtas_filter) {
309 .buf_idx1 = -1, .size_idx1 = -1,
310 .buf_idx2 = -1, .size_idx2 = -1,
311 },
312 },
313 [RTAS_FNIDX__IBM_OPEN_SRIOV_ALLOW_UNFREEZE] = {
314 .name = "ibm,open-sriov-allow-unfreeze",
315 },
316 [RTAS_FNIDX__IBM_OPEN_SRIOV_MAP_PE_NUMBER] = {
317 .name = "ibm,open-sriov-map-pe-number",
318 },
319 [RTAS_FNIDX__IBM_OS_TERM] = {
320 .name = "ibm,os-term",
321 },
322 [RTAS_FNIDX__IBM_PARTNER_CONTROL] = {
323 .name = "ibm,partner-control",
324 },
325 [RTAS_FNIDX__IBM_PHYSICAL_ATTESTATION] = {
326 .name = "ibm,physical-attestation",
327 .filter = &(const struct rtas_filter) {
328 .buf_idx1 = 0, .size_idx1 = 1,
329 .buf_idx2 = -1, .size_idx2 = -1,
330 },
331 /*
332 * This follows a sequence-based pattern similar to
333 * ibm,get-vpd et al. Since PAPR+ restricts
334 * interleaving call sequences for other functions of
335 * this style, assume the restriction applies here,
336 * even though it's not explicit in the spec.
337 */
338 .lock = &rtas_ibm_physical_attestation_lock,
339 },
340 [RTAS_FNIDX__IBM_PLATFORM_DUMP] = {
341 .name = "ibm,platform-dump",
342 .filter = &(const struct rtas_filter) {
343 .buf_idx1 = 4, .size_idx1 = 5,
344 .buf_idx2 = -1, .size_idx2 = -1,
345 },
346 /*
347 * PAPR+ v2.13 7.3.3.4.1 indicates that concurrent
348 * sequences of ibm,platform-dump are allowed if they
349 * are operating on different dump tags. So leave the
350 * lock pointer unset for now. This may need
351 * reconsideration if kernel-internal users appear.
352 */
353 },
354 [RTAS_FNIDX__IBM_POWER_OFF_UPS] = {
355 .name = "ibm,power-off-ups",
356 },
357 [RTAS_FNIDX__IBM_QUERY_INTERRUPT_SOURCE_NUMBER] = {
358 .name = "ibm,query-interrupt-source-number",
359 },
360 [RTAS_FNIDX__IBM_QUERY_PE_DMA_WINDOW] = {
361 .name = "ibm,query-pe-dma-window",
362 },
363 [RTAS_FNIDX__IBM_READ_PCI_CONFIG] = {
364 .name = "ibm,read-pci-config",
365 },
366 [RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE] = {
367 .name = "ibm,read-slot-reset-state",
368 .filter = &(const struct rtas_filter) {
369 .buf_idx1 = -1, .size_idx1 = -1,
370 .buf_idx2 = -1, .size_idx2 = -1,
371 },
372 },
373 [RTAS_FNIDX__IBM_READ_SLOT_RESET_STATE2] = {
374 .name = "ibm,read-slot-reset-state2",
375 },
376 [RTAS_FNIDX__IBM_REMOVE_PE_DMA_WINDOW] = {
377 .name = "ibm,remove-pe-dma-window",
378 },
379 [RTAS_FNIDX__IBM_RESET_PE_DMA_WINDOW] = {
380 /*
381 * Note: PAPR+ v2.13 7.3.31.4.1 spells this as
382 * "ibm,reset-pe-dma-windows" (plural), but RTAS
383 * implementations use the singular form in practice.
384 */
385 .name = "ibm,reset-pe-dma-window",
386 },
387 [RTAS_FNIDX__IBM_SCAN_LOG_DUMP] = {
388 .name = "ibm,scan-log-dump",
389 .filter = &(const struct rtas_filter) {
390 .buf_idx1 = 0, .size_idx1 = 1,
391 .buf_idx2 = -1, .size_idx2 = -1,
392 },
393 },
394 [RTAS_FNIDX__IBM_SET_DYNAMIC_INDICATOR] = {
395 .name = "ibm,set-dynamic-indicator",
396 .filter = &(const struct rtas_filter) {
397 .buf_idx1 = 2, .size_idx1 = -1,
398 .buf_idx2 = -1, .size_idx2 = -1,
399 },
400 /*
401 * PAPR+ v2.13 R1–7.3.18–3 says the OS must not call
402 * this function with different inputs until a
403 * non-retry status has been returned.
404 */
405 .lock = &rtas_ibm_set_dynamic_indicator_lock,
406 },
407 [RTAS_FNIDX__IBM_SET_EEH_OPTION] = {
408 .name = "ibm,set-eeh-option",
409 .filter = &(const struct rtas_filter) {
410 .buf_idx1 = -1, .size_idx1 = -1,
411 .buf_idx2 = -1, .size_idx2 = -1,
412 },
413 },
414 [RTAS_FNIDX__IBM_SET_SLOT_RESET] = {
415 .name = "ibm,set-slot-reset",
416 },
417 [RTAS_FNIDX__IBM_SET_SYSTEM_PARAMETER] = {
418 .name = "ibm,set-system-parameter",
419 .filter = &(const struct rtas_filter) {
420 .buf_idx1 = 1, .size_idx1 = -1,
421 .buf_idx2 = -1, .size_idx2 = -1,
422 },
423 },
424 [RTAS_FNIDX__IBM_SET_XIVE] = {
425 .name = "ibm,set-xive",
426 },
427 [RTAS_FNIDX__IBM_SLOT_ERROR_DETAIL] = {
428 .name = "ibm,slot-error-detail",
429 },
430 [RTAS_FNIDX__IBM_SUSPEND_ME] = {
431 .name = "ibm,suspend-me",
432 .banned_for_syscall_on_le = true,
433 .filter = &(const struct rtas_filter) {
434 .buf_idx1 = -1, .size_idx1 = -1,
435 .buf_idx2 = -1, .size_idx2 = -1,
436 },
437 },
438 [RTAS_FNIDX__IBM_TUNE_DMA_PARMS] = {
439 .name = "ibm,tune-dma-parms",
440 },
441 [RTAS_FNIDX__IBM_UPDATE_FLASH_64_AND_REBOOT] = {
442 .name = "ibm,update-flash-64-and-reboot",
443 },
444 [RTAS_FNIDX__IBM_UPDATE_NODES] = {
445 .name = "ibm,update-nodes",
446 .banned_for_syscall_on_le = true,
447 .filter = &(const struct rtas_filter) {
448 .buf_idx1 = 0, .size_idx1 = -1,
449 .buf_idx2 = -1, .size_idx2 = -1,
450 .fixed_size = 4096,
451 },
452 },
453 [RTAS_FNIDX__IBM_UPDATE_PROPERTIES] = {
454 .name = "ibm,update-properties",
455 .banned_for_syscall_on_le = true,
456 .filter = &(const struct rtas_filter) {
457 .buf_idx1 = 0, .size_idx1 = -1,
458 .buf_idx2 = -1, .size_idx2 = -1,
459 .fixed_size = 4096,
460 },
461 },
462 [RTAS_FNIDX__IBM_VALIDATE_FLASH_IMAGE] = {
463 .name = "ibm,validate-flash-image",
464 },
465 [RTAS_FNIDX__IBM_WRITE_PCI_CONFIG] = {
466 .name = "ibm,write-pci-config",
467 },
468 [RTAS_FNIDX__NVRAM_FETCH] = {
469 .name = "nvram-fetch",
470 },
471 [RTAS_FNIDX__NVRAM_STORE] = {
472 .name = "nvram-store",
473 },
474 [RTAS_FNIDX__POWER_OFF] = {
475 .name = "power-off",
476 },
477 [RTAS_FNIDX__PUT_TERM_CHAR] = {
478 .name = "put-term-char",
479 },
480 [RTAS_FNIDX__QUERY_CPU_STOPPED_STATE] = {
481 .name = "query-cpu-stopped-state",
482 },
483 [RTAS_FNIDX__READ_PCI_CONFIG] = {
484 .name = "read-pci-config",
485 },
486 [RTAS_FNIDX__RTAS_LAST_ERROR] = {
487 .name = "rtas-last-error",
488 },
489 [RTAS_FNIDX__SET_INDICATOR] = {
490 .name = "set-indicator",
491 .filter = &(const struct rtas_filter) {
492 .buf_idx1 = -1, .size_idx1 = -1,
493 .buf_idx2 = -1, .size_idx2 = -1,
494 },
495 },
496 [RTAS_FNIDX__SET_POWER_LEVEL] = {
497 .name = "set-power-level",
498 .filter = &(const struct rtas_filter) {
499 .buf_idx1 = -1, .size_idx1 = -1,
500 .buf_idx2 = -1, .size_idx2 = -1,
501 },
502 },
503 [RTAS_FNIDX__SET_TIME_FOR_POWER_ON] = {
504 .name = "set-time-for-power-on",
505 .filter = &(const struct rtas_filter) {
506 .buf_idx1 = -1, .size_idx1 = -1,
507 .buf_idx2 = -1, .size_idx2 = -1,
508 },
509 },
510 [RTAS_FNIDX__SET_TIME_OF_DAY] = {
511 .name = "set-time-of-day",
512 .filter = &(const struct rtas_filter) {
513 .buf_idx1 = -1, .size_idx1 = -1,
514 .buf_idx2 = -1, .size_idx2 = -1,
515 },
516 },
517 [RTAS_FNIDX__START_CPU] = {
518 .name = "start-cpu",
519 },
520 [RTAS_FNIDX__STOP_SELF] = {
521 .name = "stop-self",
522 },
523 [RTAS_FNIDX__SYSTEM_REBOOT] = {
524 .name = "system-reboot",
525 },
526 [RTAS_FNIDX__THAW_TIME_BASE] = {
527 .name = "thaw-time-base",
528 },
529 [RTAS_FNIDX__WRITE_PCI_CONFIG] = {
530 .name = "write-pci-config",
531 },
532 };
533
534 #define for_each_rtas_function(funcp) \
535 for (funcp = &rtas_function_table[0]; \
536 funcp < &rtas_function_table[ARRAY_SIZE(rtas_function_table)]; \
537 ++funcp)
538
539 /*
540 * Nearly all RTAS calls need to be serialized. All uses of the
541 * default rtas_args block must hold rtas_lock.
542 *
543 * Exceptions to the RTAS serialization requirement (e.g. stop-self)
544 * must use a separate rtas_args structure.
545 */
546 static DEFINE_RAW_SPINLOCK(rtas_lock);
547 static struct rtas_args rtas_args;
548
549 /**
550 * rtas_function_token() - RTAS function token lookup.
551 * @handle: Function handle, e.g. RTAS_FN_EVENT_SCAN.
552 *
553 * Context: Any context.
554 * Return: the token value for the function if implemented by this platform,
555 * otherwise RTAS_UNKNOWN_SERVICE.
556 */
rtas_function_token(const rtas_fn_handle_t handle)557 s32 rtas_function_token(const rtas_fn_handle_t handle)
558 {
559 const size_t index = handle.index;
560 const bool out_of_bounds = index >= ARRAY_SIZE(rtas_function_table);
561
562 if (WARN_ONCE(out_of_bounds, "invalid function index %zu", index))
563 return RTAS_UNKNOWN_SERVICE;
564 /*
565 * Various drivers attempt token lookups on non-RTAS
566 * platforms.
567 */
568 if (!rtas.dev)
569 return RTAS_UNKNOWN_SERVICE;
570
571 return rtas_function_table[index].token;
572 }
573 EXPORT_SYMBOL_GPL(rtas_function_token);
574
rtas_function_cmp(const void * a,const void * b)575 static int rtas_function_cmp(const void *a, const void *b)
576 {
577 const struct rtas_function *f1 = a;
578 const struct rtas_function *f2 = b;
579
580 return strcmp(f1->name, f2->name);
581 }
582
583 /*
584 * Boot-time initialization of the function table needs the lookup to
585 * return a non-const-qualified object. Use rtas_name_to_function()
586 * in all other contexts.
587 */
__rtas_name_to_function(const char * name)588 static struct rtas_function *__rtas_name_to_function(const char *name)
589 {
590 const struct rtas_function key = {
591 .name = name,
592 };
593 struct rtas_function *found;
594
595 found = bsearch(&key, rtas_function_table, ARRAY_SIZE(rtas_function_table),
596 sizeof(rtas_function_table[0]), rtas_function_cmp);
597
598 return found;
599 }
600
rtas_name_to_function(const char * name)601 static const struct rtas_function *rtas_name_to_function(const char *name)
602 {
603 return __rtas_name_to_function(name);
604 }
605
606 static DEFINE_XARRAY(rtas_token_to_function_xarray);
607
rtas_token_to_function_xarray_init(void)608 static int __init rtas_token_to_function_xarray_init(void)
609 {
610 const struct rtas_function *func;
611 int err = 0;
612
613 for_each_rtas_function(func) {
614 const s32 token = func->token;
615
616 if (token == RTAS_UNKNOWN_SERVICE)
617 continue;
618
619 err = xa_err(xa_store(&rtas_token_to_function_xarray,
620 token, (void *)func, GFP_KERNEL));
621 if (err)
622 break;
623 }
624
625 return err;
626 }
627 arch_initcall(rtas_token_to_function_xarray_init);
628
629 /*
630 * For use by sys_rtas(), where the token value is provided by user
631 * space and we don't want to warn on failed lookups.
632 */
rtas_token_to_function_untrusted(s32 token)633 static const struct rtas_function *rtas_token_to_function_untrusted(s32 token)
634 {
635 return xa_load(&rtas_token_to_function_xarray, token);
636 }
637
638 /*
639 * Reverse lookup for deriving the function descriptor from a
640 * known-good token value in contexts where the former is not already
641 * available. @token must be valid, e.g. derived from the result of a
642 * prior lookup against the function table.
643 */
rtas_token_to_function(s32 token)644 static const struct rtas_function *rtas_token_to_function(s32 token)
645 {
646 const struct rtas_function *func;
647
648 if (WARN_ONCE(token < 0, "invalid token %d", token))
649 return NULL;
650
651 func = rtas_token_to_function_untrusted(token);
652 if (func)
653 return func;
654 /*
655 * Fall back to linear scan in case the reverse mapping hasn't
656 * been initialized yet.
657 */
658 if (xa_empty(&rtas_token_to_function_xarray)) {
659 for_each_rtas_function(func) {
660 if (func->token == token)
661 return func;
662 }
663 }
664
665 WARN_ONCE(true, "unexpected failed lookup for token %d", token);
666 return NULL;
667 }
668
669 /* This is here deliberately so it's only used in this file */
670 void enter_rtas(unsigned long);
671
__do_enter_rtas(struct rtas_args * args)672 static void __do_enter_rtas(struct rtas_args *args)
673 {
674 enter_rtas(__pa(args));
675 srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
676 }
677
__do_enter_rtas_trace(struct rtas_args * args)678 static void __do_enter_rtas_trace(struct rtas_args *args)
679 {
680 const struct rtas_function *func = rtas_token_to_function(be32_to_cpu(args->token));
681
682 /*
683 * If there is a per-function lock, it must be held by the
684 * caller.
685 */
686 if (func->lock)
687 lockdep_assert_held(func->lock);
688
689 if (args == &rtas_args)
690 lockdep_assert_held(&rtas_lock);
691
692 trace_rtas_input(args, func->name);
693 trace_rtas_ll_entry(args);
694
695 __do_enter_rtas(args);
696
697 trace_rtas_ll_exit(args);
698 trace_rtas_output(args, func->name);
699 }
700
do_enter_rtas(struct rtas_args * args)701 static void do_enter_rtas(struct rtas_args *args)
702 {
703 const unsigned long msr = mfmsr();
704 /*
705 * Situations where we want to skip any active tracepoints for
706 * safety reasons:
707 *
708 * 1. The last code executed on an offline CPU as it stops,
709 * i.e. we're about to call stop-self. The tracepoints'
710 * function name lookup uses xarray, which uses RCU, which
711 * isn't valid to call on an offline CPU. Any events
712 * emitted on an offline CPU will be discarded anyway.
713 *
714 * 2. In real mode, as when invoking ibm,nmi-interlock from
715 * the pseries MCE handler. We cannot count on trace
716 * buffers or the entries in rtas_token_to_function_xarray
717 * to be contained in the RMO.
718 */
719 const unsigned long mask = MSR_IR | MSR_DR;
720 const bool can_trace = likely(cpu_online(raw_smp_processor_id()) &&
721 (msr & mask) == mask);
722 /*
723 * Make sure MSR[RI] is currently enabled as it will be forced later
724 * in enter_rtas.
725 */
726 BUG_ON(!(msr & MSR_RI));
727
728 BUG_ON(!irqs_disabled());
729
730 hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */
731
732 if (can_trace)
733 __do_enter_rtas_trace(args);
734 else
735 __do_enter_rtas(args);
736 }
737
738 struct rtas_t rtas;
739
740 DEFINE_SPINLOCK(rtas_data_buf_lock);
741 EXPORT_SYMBOL_GPL(rtas_data_buf_lock);
742
743 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __aligned(SZ_4K);
744 EXPORT_SYMBOL_GPL(rtas_data_buf);
745
746 unsigned long rtas_rmo_buf;
747
748 /*
749 * If non-NULL, this gets called when the kernel terminates.
750 * This is done like this so rtas_flash can be a module.
751 */
752 void (*rtas_flash_term_hook)(int);
753 EXPORT_SYMBOL_GPL(rtas_flash_term_hook);
754
755 /*
756 * call_rtas_display_status and call_rtas_display_status_delay
757 * are designed only for very early low-level debugging, which
758 * is why the token is hard-coded to 10.
759 */
call_rtas_display_status(unsigned char c)760 static void call_rtas_display_status(unsigned char c)
761 {
762 unsigned long flags;
763
764 if (!rtas.base)
765 return;
766
767 raw_spin_lock_irqsave(&rtas_lock, flags);
768 rtas_call_unlocked(&rtas_args, 10, 1, 1, NULL, c);
769 raw_spin_unlock_irqrestore(&rtas_lock, flags);
770 }
771
call_rtas_display_status_delay(char c)772 static void call_rtas_display_status_delay(char c)
773 {
774 static int pending_newline = 0; /* did last write end with unprinted newline? */
775 static int width = 16;
776
777 if (c == '\n') {
778 while (width-- > 0)
779 call_rtas_display_status(' ');
780 width = 16;
781 mdelay(500);
782 pending_newline = 1;
783 } else {
784 if (pending_newline) {
785 call_rtas_display_status('\r');
786 call_rtas_display_status('\n');
787 }
788 pending_newline = 0;
789 if (width--) {
790 call_rtas_display_status(c);
791 udelay(10000);
792 }
793 }
794 }
795
udbg_init_rtas_panel(void)796 void __init udbg_init_rtas_panel(void)
797 {
798 udbg_putc = call_rtas_display_status_delay;
799 }
800
rtas_progress(char * s,unsigned short hex)801 void rtas_progress(char *s, unsigned short hex)
802 {
803 struct device_node *root;
804 int width;
805 const __be32 *p;
806 char *os;
807 static int display_character, set_indicator;
808 static int display_width, display_lines, form_feed;
809 static const int *row_width;
810 static DEFINE_SPINLOCK(progress_lock);
811 static int current_line;
812 static int pending_newline = 0; /* did last write end with unprinted newline? */
813
814 if (!rtas.base)
815 return;
816
817 if (display_width == 0) {
818 display_width = 0x10;
819 if ((root = of_find_node_by_path("/rtas"))) {
820 if ((p = of_get_property(root,
821 "ibm,display-line-length", NULL)))
822 display_width = be32_to_cpu(*p);
823 if ((p = of_get_property(root,
824 "ibm,form-feed", NULL)))
825 form_feed = be32_to_cpu(*p);
826 if ((p = of_get_property(root,
827 "ibm,display-number-of-lines", NULL)))
828 display_lines = be32_to_cpu(*p);
829 row_width = of_get_property(root,
830 "ibm,display-truncation-length", NULL);
831 of_node_put(root);
832 }
833 display_character = rtas_function_token(RTAS_FN_DISPLAY_CHARACTER);
834 set_indicator = rtas_function_token(RTAS_FN_SET_INDICATOR);
835 }
836
837 if (display_character == RTAS_UNKNOWN_SERVICE) {
838 /* use hex display if available */
839 if (set_indicator != RTAS_UNKNOWN_SERVICE)
840 rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
841 return;
842 }
843
844 spin_lock(&progress_lock);
845
846 /*
847 * Last write ended with newline, but we didn't print it since
848 * it would just clear the bottom line of output. Print it now
849 * instead.
850 *
851 * If no newline is pending and form feed is supported, clear the
852 * display with a form feed; otherwise, print a CR to start output
853 * at the beginning of the line.
854 */
855 if (pending_newline) {
856 rtas_call(display_character, 1, 1, NULL, '\r');
857 rtas_call(display_character, 1, 1, NULL, '\n');
858 pending_newline = 0;
859 } else {
860 current_line = 0;
861 if (form_feed)
862 rtas_call(display_character, 1, 1, NULL,
863 (char)form_feed);
864 else
865 rtas_call(display_character, 1, 1, NULL, '\r');
866 }
867
868 if (row_width)
869 width = row_width[current_line];
870 else
871 width = display_width;
872 os = s;
873 while (*os) {
874 if (*os == '\n' || *os == '\r') {
875 /* If newline is the last character, save it
876 * until next call to avoid bumping up the
877 * display output.
878 */
879 if (*os == '\n' && !os[1]) {
880 pending_newline = 1;
881 current_line++;
882 if (current_line > display_lines-1)
883 current_line = display_lines-1;
884 spin_unlock(&progress_lock);
885 return;
886 }
887
888 /* RTAS wants CR-LF, not just LF */
889
890 if (*os == '\n') {
891 rtas_call(display_character, 1, 1, NULL, '\r');
892 rtas_call(display_character, 1, 1, NULL, '\n');
893 } else {
894 /* CR might be used to re-draw a line, so we'll
895 * leave it alone and not add LF.
896 */
897 rtas_call(display_character, 1, 1, NULL, *os);
898 }
899
900 if (row_width)
901 width = row_width[current_line];
902 else
903 width = display_width;
904 } else {
905 width--;
906 rtas_call(display_character, 1, 1, NULL, *os);
907 }
908
909 os++;
910
911 /* if we overwrite the screen length */
912 if (width <= 0)
913 while ((*os != 0) && (*os != '\n') && (*os != '\r'))
914 os++;
915 }
916
917 spin_unlock(&progress_lock);
918 }
919 EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */
920
rtas_token(const char * service)921 int rtas_token(const char *service)
922 {
923 const struct rtas_function *func;
924 const __be32 *tokp;
925
926 if (rtas.dev == NULL)
927 return RTAS_UNKNOWN_SERVICE;
928
929 func = rtas_name_to_function(service);
930 if (func)
931 return func->token;
932 /*
933 * The caller is looking up a name that is not known to be an
934 * RTAS function. Either it's a function that needs to be
935 * added to the table, or they're misusing rtas_token() to
936 * access non-function properties of the /rtas node. Warn and
937 * fall back to the legacy behavior.
938 */
939 WARN_ONCE(1, "unknown function `%s`, should it be added to rtas_function_table?\n",
940 service);
941
942 tokp = of_get_property(rtas.dev, service, NULL);
943 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
944 }
945 EXPORT_SYMBOL_GPL(rtas_token);
946
947 #ifdef CONFIG_RTAS_ERROR_LOGGING
948
949 static u32 rtas_error_log_max __ro_after_init = RTAS_ERROR_LOG_MAX;
950
951 /*
952 * Return the firmware-specified size of the error log buffer
953 * for all rtas calls that require an error buffer argument.
954 * This includes 'check-exception' and 'rtas-last-error'.
955 */
rtas_get_error_log_max(void)956 int rtas_get_error_log_max(void)
957 {
958 return rtas_error_log_max;
959 }
960
init_error_log_max(void)961 static void __init init_error_log_max(void)
962 {
963 static const char propname[] __initconst = "rtas-error-log-max";
964 u32 max;
965
966 if (of_property_read_u32(rtas.dev, propname, &max)) {
967 pr_warn("%s not found, using default of %u\n",
968 propname, RTAS_ERROR_LOG_MAX);
969 max = RTAS_ERROR_LOG_MAX;
970 }
971
972 if (max > RTAS_ERROR_LOG_MAX) {
973 pr_warn("%s = %u, clamping max error log size to %u\n",
974 propname, max, RTAS_ERROR_LOG_MAX);
975 max = RTAS_ERROR_LOG_MAX;
976 }
977
978 rtas_error_log_max = max;
979 }
980
981
982 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
983
984 /** Return a copy of the detailed error text associated with the
985 * most recent failed call to rtas. Because the error text
986 * might go stale if there are any other intervening rtas calls,
987 * this routine must be called atomically with whatever produced
988 * the error (i.e. with rtas_lock still held from the previous call).
989 */
__fetch_rtas_last_error(char * altbuf)990 static char *__fetch_rtas_last_error(char *altbuf)
991 {
992 const s32 token = rtas_function_token(RTAS_FN_RTAS_LAST_ERROR);
993 struct rtas_args err_args, save_args;
994 u32 bufsz;
995 char *buf = NULL;
996
997 lockdep_assert_held(&rtas_lock);
998
999 if (token == -1)
1000 return NULL;
1001
1002 bufsz = rtas_get_error_log_max();
1003
1004 err_args.token = cpu_to_be32(token);
1005 err_args.nargs = cpu_to_be32(2);
1006 err_args.nret = cpu_to_be32(1);
1007 err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
1008 err_args.args[1] = cpu_to_be32(bufsz);
1009 err_args.args[2] = 0;
1010
1011 save_args = rtas_args;
1012 rtas_args = err_args;
1013
1014 do_enter_rtas(&rtas_args);
1015
1016 err_args = rtas_args;
1017 rtas_args = save_args;
1018
1019 /* Log the error in the unlikely case that there was one. */
1020 if (unlikely(err_args.args[2] == 0)) {
1021 if (altbuf) {
1022 buf = altbuf;
1023 } else {
1024 buf = rtas_err_buf;
1025 if (slab_is_available())
1026 buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
1027 }
1028 if (buf)
1029 memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
1030 }
1031
1032 return buf;
1033 }
1034
1035 #define get_errorlog_buffer() kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
1036
1037 #else /* CONFIG_RTAS_ERROR_LOGGING */
1038 #define __fetch_rtas_last_error(x) NULL
1039 #define get_errorlog_buffer() NULL
init_error_log_max(void)1040 static void __init init_error_log_max(void) {}
1041 #endif
1042
1043
1044 static void
va_rtas_call_unlocked(struct rtas_args * args,int token,int nargs,int nret,va_list list)1045 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
1046 va_list list)
1047 {
1048 int i;
1049
1050 args->token = cpu_to_be32(token);
1051 args->nargs = cpu_to_be32(nargs);
1052 args->nret = cpu_to_be32(nret);
1053 args->rets = &(args->args[nargs]);
1054
1055 for (i = 0; i < nargs; ++i)
1056 args->args[i] = cpu_to_be32(va_arg(list, __u32));
1057
1058 for (i = 0; i < nret; ++i)
1059 args->rets[i] = 0;
1060
1061 do_enter_rtas(args);
1062 }
1063
1064 /**
1065 * rtas_call_unlocked() - Invoke an RTAS firmware function without synchronization.
1066 * @args: RTAS parameter block to be used for the call, must obey RTAS addressing
1067 * constraints.
1068 * @token: Identifies the function being invoked.
1069 * @nargs: Number of input parameters. Does not include token.
1070 * @nret: Number of output parameters, including the call status.
1071 * @....: List of @nargs input parameters.
1072 *
1073 * Invokes the RTAS function indicated by @token, which the caller
1074 * should obtain via rtas_function_token().
1075 *
1076 * This function is similar to rtas_call(), but must be used with a
1077 * limited set of RTAS calls specifically exempted from the general
1078 * requirement that only one RTAS call may be in progress at any
1079 * time. Examples include stop-self and ibm,nmi-interlock.
1080 */
rtas_call_unlocked(struct rtas_args * args,int token,int nargs,int nret,...)1081 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
1082 {
1083 va_list list;
1084
1085 va_start(list, nret);
1086 va_rtas_call_unlocked(args, token, nargs, nret, list);
1087 va_end(list);
1088 }
1089
token_is_restricted_errinjct(s32 token)1090 static bool token_is_restricted_errinjct(s32 token)
1091 {
1092 return token == rtas_function_token(RTAS_FN_IBM_OPEN_ERRINJCT) ||
1093 token == rtas_function_token(RTAS_FN_IBM_ERRINJCT);
1094 }
1095
1096 /**
1097 * rtas_call() - Invoke an RTAS firmware function.
1098 * @token: Identifies the function being invoked.
1099 * @nargs: Number of input parameters. Does not include token.
1100 * @nret: Number of output parameters, including the call status.
1101 * @outputs: Array of @nret output words.
1102 * @....: List of @nargs input parameters.
1103 *
1104 * Invokes the RTAS function indicated by @token, which the caller
1105 * should obtain via rtas_function_token().
1106 *
1107 * The @nargs and @nret arguments must match the number of input and
1108 * output parameters specified for the RTAS function.
1109 *
1110 * rtas_call() returns RTAS status codes, not conventional Linux errno
1111 * values. Callers must translate any failure to an appropriate errno
1112 * in syscall context. Most callers of RTAS functions that can return
1113 * -2 or 990x should use rtas_busy_delay() to correctly handle those
1114 * statuses before calling again.
1115 *
1116 * The return value descriptions are adapted from 7.2.8 [RTAS] Return
1117 * Codes of the PAPR and CHRP specifications.
1118 *
1119 * Context: Process context preferably, interrupt context if
1120 * necessary. Acquires an internal spinlock and may perform
1121 * GFP_ATOMIC slab allocation in error path. Unsafe for NMI
1122 * context.
1123 * Return:
1124 * * 0 - RTAS function call succeeded.
1125 * * -1 - RTAS function encountered a hardware or
1126 * platform error, or the token is invalid,
1127 * or the function is restricted by kernel policy.
1128 * * -2 - Specs say "A necessary hardware device was busy,
1129 * and the requested function could not be
1130 * performed. The operation should be retried at
1131 * a later time." This is misleading, at least with
1132 * respect to current RTAS implementations. What it
1133 * usually means in practice is that the function
1134 * could not be completed while meeting RTAS's
1135 * deadline for returning control to the OS (250us
1136 * for PAPR/PowerVM, typically), but the call may be
1137 * immediately reattempted to resume work on it.
1138 * * -3 - Parameter error.
1139 * * -7 - Unexpected state change.
1140 * * 9000...9899 - Vendor-specific success codes.
1141 * * 9900...9905 - Advisory extended delay. Caller should try
1142 * again after ~10^x ms has elapsed, where x is
1143 * the last digit of the status [0-5]. Again going
1144 * beyond the PAPR text, 990x on PowerVM indicates
1145 * contention for RTAS-internal resources. Other
1146 * RTAS call sequences in progress should be
1147 * allowed to complete before reattempting the
1148 * call.
1149 * * -9000 - Multi-level isolation error.
1150 * * -9999...-9004 - Vendor-specific error codes.
1151 * * Additional negative values - Function-specific error.
1152 * * Additional positive values - Function-specific success.
1153 */
rtas_call(int token,int nargs,int nret,int * outputs,...)1154 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
1155 {
1156 struct pin_cookie cookie;
1157 va_list list;
1158 int i;
1159 unsigned long flags;
1160 struct rtas_args *args;
1161 char *buff_copy = NULL;
1162 int ret;
1163
1164 if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
1165 return -1;
1166
1167 if (token_is_restricted_errinjct(token)) {
1168 /*
1169 * It would be nicer to not discard the error value
1170 * from security_locked_down(), but callers expect an
1171 * RTAS status, not an errno.
1172 */
1173 if (security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION))
1174 return -1;
1175 }
1176
1177 if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) {
1178 WARN_ON_ONCE(1);
1179 return -1;
1180 }
1181
1182 raw_spin_lock_irqsave(&rtas_lock, flags);
1183 cookie = lockdep_pin_lock(&rtas_lock);
1184
1185 /* We use the global rtas args buffer */
1186 args = &rtas_args;
1187
1188 va_start(list, outputs);
1189 va_rtas_call_unlocked(args, token, nargs, nret, list);
1190 va_end(list);
1191
1192 /* A -1 return code indicates that the last command couldn't
1193 be completed due to a hardware error. */
1194 if (be32_to_cpu(args->rets[0]) == -1)
1195 buff_copy = __fetch_rtas_last_error(NULL);
1196
1197 if (nret > 1 && outputs != NULL)
1198 for (i = 0; i < nret-1; ++i)
1199 outputs[i] = be32_to_cpu(args->rets[i + 1]);
1200 ret = (nret > 0) ? be32_to_cpu(args->rets[0]) : 0;
1201
1202 lockdep_unpin_lock(&rtas_lock, cookie);
1203 raw_spin_unlock_irqrestore(&rtas_lock, flags);
1204
1205 if (buff_copy) {
1206 log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
1207 if (slab_is_available())
1208 kfree(buff_copy);
1209 }
1210 return ret;
1211 }
1212 EXPORT_SYMBOL_GPL(rtas_call);
1213
1214 /**
1215 * rtas_busy_delay_time() - From an RTAS status value, calculate the
1216 * suggested delay time in milliseconds.
1217 *
1218 * @status: a value returned from rtas_call() or similar APIs which return
1219 * the status of a RTAS function call.
1220 *
1221 * Context: Any context.
1222 *
1223 * Return:
1224 * * 100000 - If @status is 9905.
1225 * * 10000 - If @status is 9904.
1226 * * 1000 - If @status is 9903.
1227 * * 100 - If @status is 9902.
1228 * * 10 - If @status is 9901.
1229 * * 1 - If @status is either 9900 or -2. This is "wrong" for -2, but
1230 * some callers depend on this behavior, and the worst outcome
1231 * is that they will delay for longer than necessary.
1232 * * 0 - If @status is not a busy or extended delay value.
1233 */
rtas_busy_delay_time(int status)1234 unsigned int rtas_busy_delay_time(int status)
1235 {
1236 int order;
1237 unsigned int ms = 0;
1238
1239 if (status == RTAS_BUSY) {
1240 ms = 1;
1241 } else if (status >= RTAS_EXTENDED_DELAY_MIN &&
1242 status <= RTAS_EXTENDED_DELAY_MAX) {
1243 order = status - RTAS_EXTENDED_DELAY_MIN;
1244 for (ms = 1; order > 0; order--)
1245 ms *= 10;
1246 }
1247
1248 return ms;
1249 }
1250
1251 /*
1252 * Early boot fallback for rtas_busy_delay().
1253 */
rtas_busy_delay_early(int status)1254 static bool __init rtas_busy_delay_early(int status)
1255 {
1256 static size_t successive_ext_delays __initdata;
1257 bool retry;
1258
1259 switch (status) {
1260 case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
1261 /*
1262 * In the unlikely case that we receive an extended
1263 * delay status in early boot, the OS is probably not
1264 * the cause, and there's nothing we can do to clear
1265 * the condition. Best we can do is delay for a bit
1266 * and hope it's transient. Lie to the caller if it
1267 * seems like we're stuck in a retry loop.
1268 */
1269 mdelay(1);
1270 retry = true;
1271 successive_ext_delays += 1;
1272 if (successive_ext_delays > 1000) {
1273 pr_err("too many extended delays, giving up\n");
1274 dump_stack();
1275 retry = false;
1276 successive_ext_delays = 0;
1277 }
1278 break;
1279 case RTAS_BUSY:
1280 retry = true;
1281 successive_ext_delays = 0;
1282 break;
1283 default:
1284 retry = false;
1285 successive_ext_delays = 0;
1286 break;
1287 }
1288
1289 return retry;
1290 }
1291
1292 /**
1293 * rtas_busy_delay() - helper for RTAS busy and extended delay statuses
1294 *
1295 * @status: a value returned from rtas_call() or similar APIs which return
1296 * the status of a RTAS function call.
1297 *
1298 * Context: Process context. May sleep or schedule.
1299 *
1300 * Return:
1301 * * true - @status is RTAS_BUSY or an extended delay hint. The
1302 * caller may assume that the CPU has been yielded if necessary,
1303 * and that an appropriate delay for @status has elapsed.
1304 * Generally the caller should reattempt the RTAS call which
1305 * yielded @status.
1306 *
1307 * * false - @status is not @RTAS_BUSY nor an extended delay hint. The
1308 * caller is responsible for handling @status.
1309 */
rtas_busy_delay(int status)1310 bool __ref rtas_busy_delay(int status)
1311 {
1312 unsigned int ms;
1313 bool ret;
1314
1315 /*
1316 * Can't do timed sleeps before timekeeping is up.
1317 */
1318 if (system_state < SYSTEM_SCHEDULING)
1319 return rtas_busy_delay_early(status);
1320
1321 switch (status) {
1322 case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
1323 ret = true;
1324 ms = rtas_busy_delay_time(status);
1325 /*
1326 * The extended delay hint can be as high as 100 seconds.
1327 * Surely any function returning such a status is either
1328 * buggy or isn't going to be significantly slowed by us
1329 * polling at 1HZ. Clamp the sleep time to one second.
1330 */
1331 ms = clamp(ms, 1U, 1000U);
1332 /*
1333 * The delay hint is an order-of-magnitude suggestion, not a
1334 * minimum. It is fine, possibly even advantageous, for us to
1335 * pause for less time than hinted. To make sure pause time will
1336 * not be way longer than requested independent of HZ
1337 * configuration, use fsleep(). See fsleep() for details of
1338 * used sleeping functions.
1339 */
1340 fsleep(ms * 1000);
1341 break;
1342 case RTAS_BUSY:
1343 ret = true;
1344 /*
1345 * We should call again immediately if there's no other
1346 * work to do.
1347 */
1348 cond_resched();
1349 break;
1350 default:
1351 ret = false;
1352 /*
1353 * Not a busy or extended delay status; the caller should
1354 * handle @status itself. Ensure we warn on misuses in
1355 * atomic context regardless.
1356 */
1357 might_sleep();
1358 break;
1359 }
1360
1361 return ret;
1362 }
1363 EXPORT_SYMBOL_GPL(rtas_busy_delay);
1364
rtas_error_rc(int rtas_rc)1365 int rtas_error_rc(int rtas_rc)
1366 {
1367 int rc;
1368
1369 switch (rtas_rc) {
1370 case RTAS_HARDWARE_ERROR: /* Hardware Error */
1371 rc = -EIO;
1372 break;
1373 case RTAS_INVALID_PARAMETER: /* Bad indicator/domain/etc */
1374 rc = -EINVAL;
1375 break;
1376 case -9000: /* Isolation error */
1377 rc = -EFAULT;
1378 break;
1379 case -9001: /* Outstanding TCE/PTE */
1380 rc = -EEXIST;
1381 break;
1382 case -9002: /* No usable slot */
1383 rc = -ENODEV;
1384 break;
1385 default:
1386 pr_err("%s: unexpected error %d\n", __func__, rtas_rc);
1387 rc = -ERANGE;
1388 break;
1389 }
1390 return rc;
1391 }
1392 EXPORT_SYMBOL_GPL(rtas_error_rc);
1393
rtas_get_power_level(int powerdomain,int * level)1394 int rtas_get_power_level(int powerdomain, int *level)
1395 {
1396 int token = rtas_function_token(RTAS_FN_GET_POWER_LEVEL);
1397 int rc;
1398
1399 if (token == RTAS_UNKNOWN_SERVICE)
1400 return -ENOENT;
1401
1402 while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
1403 udelay(1);
1404
1405 if (rc < 0)
1406 return rtas_error_rc(rc);
1407 return rc;
1408 }
1409 EXPORT_SYMBOL_GPL(rtas_get_power_level);
1410
rtas_set_power_level(int powerdomain,int level,int * setlevel)1411 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
1412 {
1413 int token = rtas_function_token(RTAS_FN_SET_POWER_LEVEL);
1414 int rc;
1415
1416 if (token == RTAS_UNKNOWN_SERVICE)
1417 return -ENOENT;
1418
1419 do {
1420 rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
1421 } while (rtas_busy_delay(rc));
1422
1423 if (rc < 0)
1424 return rtas_error_rc(rc);
1425 return rc;
1426 }
1427 EXPORT_SYMBOL_GPL(rtas_set_power_level);
1428
rtas_get_sensor(int sensor,int index,int * state)1429 int rtas_get_sensor(int sensor, int index, int *state)
1430 {
1431 int token = rtas_function_token(RTAS_FN_GET_SENSOR_STATE);
1432 int rc;
1433
1434 if (token == RTAS_UNKNOWN_SERVICE)
1435 return -ENOENT;
1436
1437 do {
1438 rc = rtas_call(token, 2, 2, state, sensor, index);
1439 } while (rtas_busy_delay(rc));
1440
1441 if (rc < 0)
1442 return rtas_error_rc(rc);
1443 return rc;
1444 }
1445 EXPORT_SYMBOL_GPL(rtas_get_sensor);
1446
rtas_get_sensor_fast(int sensor,int index,int * state)1447 int rtas_get_sensor_fast(int sensor, int index, int *state)
1448 {
1449 int token = rtas_function_token(RTAS_FN_GET_SENSOR_STATE);
1450 int rc;
1451
1452 if (token == RTAS_UNKNOWN_SERVICE)
1453 return -ENOENT;
1454
1455 rc = rtas_call(token, 2, 2, state, sensor, index);
1456 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
1457 rc <= RTAS_EXTENDED_DELAY_MAX));
1458
1459 if (rc < 0)
1460 return rtas_error_rc(rc);
1461 return rc;
1462 }
1463
rtas_indicator_present(int token,int * maxindex)1464 bool rtas_indicator_present(int token, int *maxindex)
1465 {
1466 int proplen, count, i;
1467 const struct indicator_elem {
1468 __be32 token;
1469 __be32 maxindex;
1470 } *indicators;
1471
1472 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
1473 if (!indicators)
1474 return false;
1475
1476 count = proplen / sizeof(struct indicator_elem);
1477
1478 for (i = 0; i < count; i++) {
1479 if (__be32_to_cpu(indicators[i].token) != token)
1480 continue;
1481 if (maxindex)
1482 *maxindex = __be32_to_cpu(indicators[i].maxindex);
1483 return true;
1484 }
1485
1486 return false;
1487 }
1488
rtas_set_indicator(int indicator,int index,int new_value)1489 int rtas_set_indicator(int indicator, int index, int new_value)
1490 {
1491 int token = rtas_function_token(RTAS_FN_SET_INDICATOR);
1492 int rc;
1493
1494 if (token == RTAS_UNKNOWN_SERVICE)
1495 return -ENOENT;
1496
1497 do {
1498 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
1499 } while (rtas_busy_delay(rc));
1500
1501 if (rc < 0)
1502 return rtas_error_rc(rc);
1503 return rc;
1504 }
1505 EXPORT_SYMBOL_GPL(rtas_set_indicator);
1506
1507 /*
1508 * Ignoring RTAS extended delay
1509 */
rtas_set_indicator_fast(int indicator,int index,int new_value)1510 int rtas_set_indicator_fast(int indicator, int index, int new_value)
1511 {
1512 int token = rtas_function_token(RTAS_FN_SET_INDICATOR);
1513 int rc;
1514
1515 if (token == RTAS_UNKNOWN_SERVICE)
1516 return -ENOENT;
1517
1518 rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
1519
1520 WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
1521 rc <= RTAS_EXTENDED_DELAY_MAX));
1522
1523 if (rc < 0)
1524 return rtas_error_rc(rc);
1525
1526 return rc;
1527 }
1528
1529 /**
1530 * rtas_ibm_suspend_me() - Call ibm,suspend-me to suspend the LPAR.
1531 *
1532 * @fw_status: RTAS call status will be placed here if not NULL.
1533 *
1534 * rtas_ibm_suspend_me() should be called only on a CPU which has
1535 * received H_CONTINUE from the H_JOIN hcall. All other active CPUs
1536 * should be waiting to return from H_JOIN.
1537 *
1538 * rtas_ibm_suspend_me() may suspend execution of the OS
1539 * indefinitely. Callers should take appropriate measures upon return, such as
1540 * resetting watchdog facilities.
1541 *
1542 * Callers may choose to retry this call if @fw_status is
1543 * %RTAS_THREADS_ACTIVE.
1544 *
1545 * Return:
1546 * 0 - The partition has resumed from suspend, possibly after
1547 * migration to a different host.
1548 * -ECANCELED - The operation was aborted.
1549 * -EAGAIN - There were other CPUs not in H_JOIN at the time of the call.
1550 * -EBUSY - Some other condition prevented the suspend from succeeding.
1551 * -EIO - Hardware/platform error.
1552 */
rtas_ibm_suspend_me(int * fw_status)1553 int rtas_ibm_suspend_me(int *fw_status)
1554 {
1555 int token = rtas_function_token(RTAS_FN_IBM_SUSPEND_ME);
1556 int fwrc;
1557 int ret;
1558
1559 fwrc = rtas_call(token, 0, 1, NULL);
1560
1561 switch (fwrc) {
1562 case 0:
1563 ret = 0;
1564 break;
1565 case RTAS_SUSPEND_ABORTED:
1566 ret = -ECANCELED;
1567 break;
1568 case RTAS_THREADS_ACTIVE:
1569 ret = -EAGAIN;
1570 break;
1571 case RTAS_NOT_SUSPENDABLE:
1572 case RTAS_OUTSTANDING_COPROC:
1573 ret = -EBUSY;
1574 break;
1575 case -1:
1576 default:
1577 ret = -EIO;
1578 break;
1579 }
1580
1581 if (fw_status)
1582 *fw_status = fwrc;
1583
1584 return ret;
1585 }
1586
rtas_restart(char * cmd)1587 void __noreturn rtas_restart(char *cmd)
1588 {
1589 if (rtas_flash_term_hook)
1590 rtas_flash_term_hook(SYS_RESTART);
1591 pr_emerg("system-reboot returned %d\n",
1592 rtas_call(rtas_function_token(RTAS_FN_SYSTEM_REBOOT), 0, 1, NULL));
1593 for (;;);
1594 }
1595
rtas_power_off(void)1596 void rtas_power_off(void)
1597 {
1598 if (rtas_flash_term_hook)
1599 rtas_flash_term_hook(SYS_POWER_OFF);
1600 /* allow power on only with power button press */
1601 pr_emerg("power-off returned %d\n",
1602 rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1));
1603 for (;;);
1604 }
1605
rtas_halt(void)1606 void __noreturn rtas_halt(void)
1607 {
1608 if (rtas_flash_term_hook)
1609 rtas_flash_term_hook(SYS_HALT);
1610 /* allow power on only with power button press */
1611 pr_emerg("power-off returned %d\n",
1612 rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1));
1613 for (;;);
1614 }
1615
1616 /* Must be in the RMO region, so we place it here */
1617 static char rtas_os_term_buf[2048];
1618 static bool ibm_extended_os_term;
1619
rtas_os_term(char * str)1620 void rtas_os_term(char *str)
1621 {
1622 s32 token = rtas_function_token(RTAS_FN_IBM_OS_TERM);
1623 static struct rtas_args args;
1624 int status;
1625
1626 /*
1627 * Firmware with the ibm,extended-os-term property is guaranteed
1628 * to always return from an ibm,os-term call. Earlier versions without
1629 * this property may terminate the partition which we want to avoid
1630 * since it interferes with panic_timeout.
1631 */
1632
1633 if (token == RTAS_UNKNOWN_SERVICE || !ibm_extended_os_term)
1634 return;
1635
1636 snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
1637
1638 /*
1639 * Keep calling as long as RTAS returns a "try again" status,
1640 * but don't use rtas_busy_delay(), which potentially
1641 * schedules.
1642 */
1643 do {
1644 rtas_call_unlocked(&args, token, 1, 1, NULL, __pa(rtas_os_term_buf));
1645 status = be32_to_cpu(args.rets[0]);
1646 } while (rtas_busy_delay_time(status));
1647
1648 if (status != 0)
1649 pr_emerg("ibm,os-term call failed %d\n", status);
1650 }
1651
1652 /**
1653 * rtas_activate_firmware() - Activate a new version of firmware.
1654 *
1655 * Context: This function may sleep.
1656 *
1657 * Activate a new version of partition firmware. The OS must call this
1658 * after resuming from a partition hibernation or migration in order
1659 * to maintain the ability to perform live firmware updates. It's not
1660 * catastrophic for this method to be absent or to fail; just log the
1661 * condition in that case.
1662 */
rtas_activate_firmware(void)1663 void rtas_activate_firmware(void)
1664 {
1665 int token = rtas_function_token(RTAS_FN_IBM_ACTIVATE_FIRMWARE);
1666 int fwrc;
1667
1668 if (token == RTAS_UNKNOWN_SERVICE) {
1669 pr_notice("ibm,activate-firmware method unavailable\n");
1670 return;
1671 }
1672
1673 mutex_lock(&rtas_ibm_activate_firmware_lock);
1674
1675 do {
1676 fwrc = rtas_call(token, 0, 1, NULL);
1677 } while (rtas_busy_delay(fwrc));
1678
1679 mutex_unlock(&rtas_ibm_activate_firmware_lock);
1680
1681 if (fwrc)
1682 pr_err("ibm,activate-firmware failed (%i)\n", fwrc);
1683 }
1684
1685 /**
1686 * get_pseries_errorlog() - Find a specific pseries error log in an RTAS
1687 * extended event log.
1688 * @log: RTAS error/event log
1689 * @section_id: two character section identifier
1690 *
1691 * Return: A pointer to the specified errorlog or NULL if not found.
1692 */
get_pseries_errorlog(struct rtas_error_log * log,uint16_t section_id)1693 noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1694 uint16_t section_id)
1695 {
1696 struct rtas_ext_event_log_v6 *ext_log =
1697 (struct rtas_ext_event_log_v6 *)log->buffer;
1698 struct pseries_errorlog *sect;
1699 unsigned char *p, *log_end;
1700 uint32_t ext_log_length = rtas_error_extended_log_length(log);
1701 uint8_t log_format = rtas_ext_event_log_format(ext_log);
1702 uint32_t company_id = rtas_ext_event_company_id(ext_log);
1703
1704 /* Check that we understand the format */
1705 if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1706 log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1707 company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1708 return NULL;
1709
1710 log_end = log->buffer + ext_log_length;
1711 p = ext_log->vendor_log;
1712
1713 while (p < log_end) {
1714 sect = (struct pseries_errorlog *)p;
1715 if (pseries_errorlog_id(sect) == section_id)
1716 return sect;
1717 p += pseries_errorlog_length(sect);
1718 }
1719
1720 return NULL;
1721 }
1722
1723 /*
1724 * The sys_rtas syscall, as originally designed, allows root to pass
1725 * arbitrary physical addresses to RTAS calls. A number of RTAS calls
1726 * can be abused to write to arbitrary memory and do other things that
1727 * are potentially harmful to system integrity, and thus should only
1728 * be used inside the kernel and not exposed to userspace.
1729 *
1730 * All known legitimate users of the sys_rtas syscall will only ever
1731 * pass addresses that fall within the RMO buffer, and use a known
1732 * subset of RTAS calls.
1733 *
1734 * Accordingly, we filter RTAS requests to check that the call is
1735 * permitted, and that provided pointers fall within the RMO buffer.
1736 * If a function is allowed to be invoked via the syscall, then its
1737 * entry in the rtas_functions table points to a rtas_filter that
1738 * describes its constraints, with the indexes of the parameters which
1739 * are expected to contain addresses and sizes of buffers allocated
1740 * inside the RMO buffer.
1741 */
1742
in_rmo_buf(u32 base,u32 end)1743 static bool in_rmo_buf(u32 base, u32 end)
1744 {
1745 return base >= rtas_rmo_buf &&
1746 base < (rtas_rmo_buf + RTAS_USER_REGION_SIZE) &&
1747 base <= end &&
1748 end >= rtas_rmo_buf &&
1749 end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE);
1750 }
1751
block_rtas_call(const struct rtas_function * func,int nargs,struct rtas_args * args)1752 static bool block_rtas_call(const struct rtas_function *func, int nargs,
1753 struct rtas_args *args)
1754 {
1755 const struct rtas_filter *f;
1756 const bool is_platform_dump =
1757 func == &rtas_function_table[RTAS_FNIDX__IBM_PLATFORM_DUMP];
1758 const bool is_config_conn =
1759 func == &rtas_function_table[RTAS_FNIDX__IBM_CONFIGURE_CONNECTOR];
1760 u32 base, size, end;
1761
1762 /*
1763 * Only functions with filters attached are allowed.
1764 */
1765 f = func->filter;
1766 if (!f)
1767 goto err;
1768 /*
1769 * And some functions aren't allowed on LE.
1770 */
1771 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) && func->banned_for_syscall_on_le)
1772 goto err;
1773
1774 if (f->buf_idx1 != -1) {
1775 base = be32_to_cpu(args->args[f->buf_idx1]);
1776 if (f->size_idx1 != -1)
1777 size = be32_to_cpu(args->args[f->size_idx1]);
1778 else if (f->fixed_size)
1779 size = f->fixed_size;
1780 else
1781 size = 1;
1782
1783 end = base + size - 1;
1784
1785 /*
1786 * Special case for ibm,platform-dump - NULL buffer
1787 * address is used to indicate end of dump processing
1788 */
1789 if (is_platform_dump && base == 0)
1790 return false;
1791
1792 if (!in_rmo_buf(base, end))
1793 goto err;
1794 }
1795
1796 if (f->buf_idx2 != -1) {
1797 base = be32_to_cpu(args->args[f->buf_idx2]);
1798 if (f->size_idx2 != -1)
1799 size = be32_to_cpu(args->args[f->size_idx2]);
1800 else if (f->fixed_size)
1801 size = f->fixed_size;
1802 else
1803 size = 1;
1804 end = base + size - 1;
1805
1806 /*
1807 * Special case for ibm,configure-connector where the
1808 * address can be 0
1809 */
1810 if (is_config_conn && base == 0)
1811 return false;
1812
1813 if (!in_rmo_buf(base, end))
1814 goto err;
1815 }
1816
1817 return false;
1818 err:
1819 pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
1820 pr_err_ratelimited("sys_rtas: %s nargs=%d (called by %s)\n",
1821 func->name, nargs, current->comm);
1822 return true;
1823 }
1824
1825 /* We assume to be passed big endian arguments */
SYSCALL_DEFINE1(rtas,struct rtas_args __user *,uargs)1826 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
1827 {
1828 const struct rtas_function *func;
1829 struct pin_cookie cookie;
1830 struct rtas_args args;
1831 unsigned long flags;
1832 char *buff_copy, *errbuf = NULL;
1833 int nargs, nret, token;
1834
1835 if (!capable(CAP_SYS_ADMIN))
1836 return -EPERM;
1837
1838 if (!rtas.entry)
1839 return -EINVAL;
1840
1841 if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1842 return -EFAULT;
1843
1844 nargs = be32_to_cpu(args.nargs);
1845 nret = be32_to_cpu(args.nret);
1846 token = be32_to_cpu(args.token);
1847
1848 if (nargs >= ARRAY_SIZE(args.args)
1849 || nret > ARRAY_SIZE(args.args)
1850 || nargs + nret > ARRAY_SIZE(args.args))
1851 return -EINVAL;
1852
1853 nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args));
1854 nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs);
1855
1856 /* Copy in args. */
1857 if (copy_from_user(args.args, uargs->args,
1858 nargs * sizeof(rtas_arg_t)) != 0)
1859 return -EFAULT;
1860
1861 /*
1862 * If this token doesn't correspond to a function the kernel
1863 * understands, you're not allowed to call it.
1864 */
1865 func = rtas_token_to_function_untrusted(token);
1866 if (!func)
1867 return -EINVAL;
1868
1869 args.rets = &args.args[nargs];
1870 memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1871
1872 if (block_rtas_call(func, nargs, &args))
1873 return -EINVAL;
1874
1875 if (token_is_restricted_errinjct(token)) {
1876 int err;
1877
1878 err = security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION);
1879 if (err)
1880 return err;
1881 }
1882
1883 /* Need to handle ibm,suspend_me call specially */
1884 if (token == rtas_function_token(RTAS_FN_IBM_SUSPEND_ME)) {
1885
1886 /*
1887 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1888 * endian, or at least the hcall within it requires it.
1889 */
1890 int rc = 0;
1891 u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1892 | be32_to_cpu(args.args[1]);
1893 rc = rtas_syscall_dispatch_ibm_suspend_me(handle);
1894 if (rc == -EAGAIN)
1895 args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1896 else if (rc == -EIO)
1897 args.rets[0] = cpu_to_be32(-1);
1898 else if (rc)
1899 return rc;
1900 goto copy_return;
1901 }
1902
1903 buff_copy = get_errorlog_buffer();
1904
1905 /*
1906 * If this function has a mutex assigned to it, we must
1907 * acquire it to avoid interleaving with any kernel-based uses
1908 * of the same function. Kernel-based sequences acquire the
1909 * appropriate mutex explicitly.
1910 */
1911 if (func->lock)
1912 mutex_lock(func->lock);
1913
1914 raw_spin_lock_irqsave(&rtas_lock, flags);
1915 cookie = lockdep_pin_lock(&rtas_lock);
1916
1917 rtas_args = args;
1918 do_enter_rtas(&rtas_args);
1919 args = rtas_args;
1920
1921 /* A -1 return code indicates that the last command couldn't
1922 be completed due to a hardware error. */
1923 if (be32_to_cpu(args.rets[0]) == -1)
1924 errbuf = __fetch_rtas_last_error(buff_copy);
1925
1926 lockdep_unpin_lock(&rtas_lock, cookie);
1927 raw_spin_unlock_irqrestore(&rtas_lock, flags);
1928
1929 if (func->lock)
1930 mutex_unlock(func->lock);
1931
1932 if (buff_copy) {
1933 if (errbuf)
1934 log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1935 kfree(buff_copy);
1936 }
1937
1938 copy_return:
1939 /* Copy out args. */
1940 if (copy_to_user(uargs->args + nargs,
1941 args.args + nargs,
1942 nret * sizeof(rtas_arg_t)) != 0)
1943 return -EFAULT;
1944
1945 return 0;
1946 }
1947
rtas_function_table_init(void)1948 static void __init rtas_function_table_init(void)
1949 {
1950 struct property *prop;
1951
1952 for (size_t i = 0; i < ARRAY_SIZE(rtas_function_table); ++i) {
1953 struct rtas_function *curr = &rtas_function_table[i];
1954 struct rtas_function *prior;
1955 int cmp;
1956
1957 curr->token = RTAS_UNKNOWN_SERVICE;
1958
1959 if (i == 0)
1960 continue;
1961 /*
1962 * Ensure table is sorted correctly for binary search
1963 * on function names.
1964 */
1965 prior = &rtas_function_table[i - 1];
1966
1967 cmp = strcmp(prior->name, curr->name);
1968 if (cmp < 0)
1969 continue;
1970
1971 if (cmp == 0) {
1972 pr_err("'%s' has duplicate function table entries\n",
1973 curr->name);
1974 } else {
1975 pr_err("function table unsorted: '%s' wrongly precedes '%s'\n",
1976 prior->name, curr->name);
1977 }
1978 }
1979
1980 for_each_property_of_node(rtas.dev, prop) {
1981 struct rtas_function *func;
1982
1983 if (prop->length != sizeof(u32))
1984 continue;
1985
1986 func = __rtas_name_to_function(prop->name);
1987 if (!func)
1988 continue;
1989
1990 func->token = be32_to_cpup((__be32 *)prop->value);
1991
1992 pr_debug("function %s has token %u\n", func->name, func->token);
1993 }
1994 }
1995
1996 /*
1997 * Call early during boot, before mem init, to retrieve the RTAS
1998 * information from the device-tree and allocate the RMO buffer for userland
1999 * accesses.
2000 */
rtas_initialize(void)2001 void __init rtas_initialize(void)
2002 {
2003 unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
2004 u32 base, size, entry;
2005 int no_base, no_size, no_entry;
2006
2007 /* Get RTAS dev node and fill up our "rtas" structure with infos
2008 * about it.
2009 */
2010 rtas.dev = of_find_node_by_name(NULL, "rtas");
2011 if (!rtas.dev)
2012 return;
2013
2014 no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
2015 no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
2016 if (no_base || no_size) {
2017 of_node_put(rtas.dev);
2018 rtas.dev = NULL;
2019 return;
2020 }
2021
2022 rtas.base = base;
2023 rtas.size = size;
2024 no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
2025 rtas.entry = no_entry ? rtas.base : entry;
2026
2027 init_error_log_max();
2028
2029 /* Must be called before any function token lookups */
2030 rtas_function_table_init();
2031
2032 /*
2033 * Discover this now to avoid a device tree lookup in the
2034 * panic path.
2035 */
2036 ibm_extended_os_term = of_property_read_bool(rtas.dev, "ibm,extended-os-term");
2037
2038 /* If RTAS was found, allocate the RMO buffer for it and look for
2039 * the stop-self token if any
2040 */
2041 #ifdef CONFIG_PPC64
2042 if (firmware_has_feature(FW_FEATURE_LPAR))
2043 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
2044 #endif
2045 rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE,
2046 0, rtas_region);
2047 if (!rtas_rmo_buf)
2048 panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
2049 PAGE_SIZE, &rtas_region);
2050
2051 rtas_work_area_reserve_arena(rtas_region);
2052 }
2053
early_init_dt_scan_rtas(unsigned long node,const char * uname,int depth,void * data)2054 int __init early_init_dt_scan_rtas(unsigned long node,
2055 const char *uname, int depth, void *data)
2056 {
2057 const u32 *basep, *entryp, *sizep;
2058
2059 if (depth != 1 || strcmp(uname, "rtas") != 0)
2060 return 0;
2061
2062 basep = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
2063 entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
2064 sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
2065
2066 #ifdef CONFIG_PPC64
2067 /* need this feature to decide the crashkernel offset */
2068 if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL))
2069 powerpc_firmware_features |= FW_FEATURE_LPAR;
2070 #endif
2071
2072 if (basep && entryp && sizep) {
2073 rtas.base = *basep;
2074 rtas.entry = *entryp;
2075 rtas.size = *sizep;
2076 }
2077
2078 /* break now */
2079 return 1;
2080 }
2081
2082 static DEFINE_RAW_SPINLOCK(timebase_lock);
2083 static u64 timebase = 0;
2084
rtas_give_timebase(void)2085 void rtas_give_timebase(void)
2086 {
2087 unsigned long flags;
2088
2089 raw_spin_lock_irqsave(&timebase_lock, flags);
2090 hard_irq_disable();
2091 rtas_call(rtas_function_token(RTAS_FN_FREEZE_TIME_BASE), 0, 1, NULL);
2092 timebase = get_tb();
2093 raw_spin_unlock(&timebase_lock);
2094
2095 while (timebase)
2096 barrier();
2097 rtas_call(rtas_function_token(RTAS_FN_THAW_TIME_BASE), 0, 1, NULL);
2098 local_irq_restore(flags);
2099 }
2100
rtas_take_timebase(void)2101 void rtas_take_timebase(void)
2102 {
2103 while (!timebase)
2104 barrier();
2105 raw_spin_lock(&timebase_lock);
2106 set_tb(timebase >> 32, timebase & 0xffffffff);
2107 timebase = 0;
2108 raw_spin_unlock(&timebase_lock);
2109 }
2110