1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * TI K3 Remote Processor(s) driver common code
4 *
5 * Refactored out of ti_k3_r5_remoteproc.c, ti_k3_dsp_remoteproc.c and
6 * ti_k3_m4_remoteproc.c.
7 *
8 * ti_k3_r5_remoteproc.c:
9 * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
10 * Suman Anna <s-anna@ti.com>
11 *
12 * ti_k3_dsp_remoteproc.c:
13 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
14 * Suman Anna <s-anna@ti.com>
15 *
16 * ti_k3_m4_remoteproc.c:
17 * Copyright (C) 2021-2024 Texas Instruments Incorporated - https://www.ti.com/
18 * Hari Nagalla <hnagalla@ti.com>
19 */
20
21 #include <linux/io.h>
22 #include <linux/mailbox_client.h>
23 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/of_device.h>
26 #include <linux/of_reserved_mem.h>
27 #include <linux/omap-mailbox.h>
28 #include <linux/platform_device.h>
29 #include <linux/remoteproc.h>
30 #include <linux/reset.h>
31 #include <linux/slab.h>
32
33 #include "omap_remoteproc.h"
34 #include "remoteproc_internal.h"
35 #include "ti_sci_proc.h"
36 #include "ti_k3_common.h"
37
38 /**
39 * k3_rproc_mbox_callback() - inbound mailbox message handler
40 * @client: mailbox client pointer used for requesting the mailbox channel
41 * @data: mailbox payload
42 *
43 * This handler is invoked by the K3 mailbox driver whenever a mailbox
44 * message is received. Usually, the mailbox payload simply contains
45 * the index of the virtqueue that is kicked by the remote processor,
46 * and we let remoteproc core handle it.
47 *
48 * In addition to virtqueue indices, we also have some out-of-band values
49 * that indicate different events. Those values are deliberately very
50 * large so they don't coincide with virtqueue indices.
51 */
k3_rproc_mbox_callback(struct mbox_client * client,void * data)52 void k3_rproc_mbox_callback(struct mbox_client *client, void *data)
53 {
54 struct k3_rproc *kproc = container_of(client, struct k3_rproc, client);
55 struct device *dev = kproc->rproc->dev.parent;
56 struct rproc *rproc = kproc->rproc;
57 u32 msg = (u32)(uintptr_t)(data);
58
59 dev_dbg(dev, "mbox msg: 0x%x\n", msg);
60
61 switch (msg) {
62 case RP_MBOX_CRASH:
63 /*
64 * remoteproc detected an exception, but error recovery is not
65 * supported. So, just log this for now
66 */
67 dev_err(dev, "K3 rproc %s crashed\n", rproc->name);
68 break;
69 case RP_MBOX_ECHO_REPLY:
70 dev_info(dev, "received echo reply from %s\n", rproc->name);
71 break;
72 default:
73 /* silently handle all other valid messages */
74 if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
75 return;
76 if (msg > rproc->max_notifyid) {
77 dev_dbg(dev, "dropping unknown message 0x%x", msg);
78 return;
79 }
80 /* msg contains the index of the triggered vring */
81 if (rproc_vq_interrupt(rproc, msg) == IRQ_NONE)
82 dev_dbg(dev, "no message was found in vqid %d\n", msg);
83 }
84 }
85 EXPORT_SYMBOL_GPL(k3_rproc_mbox_callback);
86
87 /*
88 * Kick the remote processor to notify about pending unprocessed messages.
89 * The vqid usage is not used and is inconsequential, as the kick is performed
90 * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
91 * the remote processor is expected to process both its Tx and Rx virtqueues.
92 */
k3_rproc_kick(struct rproc * rproc,int vqid)93 void k3_rproc_kick(struct rproc *rproc, int vqid)
94 {
95 struct k3_rproc *kproc = rproc->priv;
96 struct device *dev = kproc->dev;
97 u32 msg = (u32)vqid;
98 int ret;
99
100 /*
101 * Send the index of the triggered virtqueue in the mailbox payload.
102 * NOTE: msg is cast to uintptr_t to prevent compiler warnings when
103 * void* is 64bit. It is safely cast back to u32 in the mailbox driver.
104 */
105 ret = mbox_send_message(kproc->mbox, (void *)(uintptr_t)msg);
106 if (ret < 0)
107 dev_err(dev, "failed to send mailbox message, status = %d\n",
108 ret);
109 }
110 EXPORT_SYMBOL_GPL(k3_rproc_kick);
111
112 /* Put the remote processor into reset */
k3_rproc_reset(struct k3_rproc * kproc)113 int k3_rproc_reset(struct k3_rproc *kproc)
114 {
115 struct device *dev = kproc->dev;
116 int ret;
117
118 if (kproc->data->uses_lreset) {
119 ret = reset_control_assert(kproc->reset);
120 if (ret)
121 dev_err(dev, "local-reset assert failed (%pe)\n", ERR_PTR(ret));
122 } else {
123 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
124 kproc->ti_sci_id);
125 if (ret)
126 dev_err(dev, "module-reset assert failed (%pe)\n", ERR_PTR(ret));
127 }
128
129 return ret;
130 }
131 EXPORT_SYMBOL_GPL(k3_rproc_reset);
132
133 /* Release the remote processor from reset */
k3_rproc_release(struct k3_rproc * kproc)134 int k3_rproc_release(struct k3_rproc *kproc)
135 {
136 struct device *dev = kproc->dev;
137 int ret;
138
139 if (kproc->data->uses_lreset) {
140 ret = reset_control_deassert(kproc->reset);
141 if (ret) {
142 dev_err(dev, "local-reset deassert failed, (%pe)\n", ERR_PTR(ret));
143 if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
144 kproc->ti_sci_id))
145 dev_warn(dev, "module-reset assert back failed\n");
146 }
147 } else {
148 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
149 kproc->ti_sci_id);
150 if (ret)
151 dev_err(dev, "module-reset deassert failed (%pe)\n", ERR_PTR(ret));
152 }
153
154 return ret;
155 }
156 EXPORT_SYMBOL_GPL(k3_rproc_release);
157
k3_rproc_request_mbox(struct rproc * rproc)158 int k3_rproc_request_mbox(struct rproc *rproc)
159 {
160 struct k3_rproc *kproc = rproc->priv;
161 struct mbox_client *client = &kproc->client;
162 struct device *dev = kproc->dev;
163 int ret;
164
165 client->dev = dev;
166 client->tx_done = NULL;
167 client->rx_callback = k3_rproc_mbox_callback;
168 client->tx_block = false;
169 client->knows_txdone = false;
170
171 kproc->mbox = mbox_request_channel(client, 0);
172 if (IS_ERR(kproc->mbox))
173 return dev_err_probe(dev, PTR_ERR(kproc->mbox),
174 "mbox_request_channel failed\n");
175
176 /*
177 * Ping the remote processor, this is only for sanity-sake for now;
178 * there is no functional effect whatsoever.
179 *
180 * Note that the reply will _not_ arrive immediately: this message
181 * will wait in the mailbox fifo until the remote processor is booted.
182 */
183 ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
184 if (ret < 0) {
185 dev_err(dev, "mbox_send_message failed (%pe)\n", ERR_PTR(ret));
186 mbox_free_channel(kproc->mbox);
187 return ret;
188 }
189
190 return 0;
191 }
192 EXPORT_SYMBOL_GPL(k3_rproc_request_mbox);
193
194 /*
195 * The K3 DSP and M4 cores have a local reset that affects only the CPU, and a
196 * generic module reset that powers on the device and allows the internal
197 * memories to be accessed while the local reset is asserted. This function is
198 * used to release the global reset on remote cores to allow loading into the
199 * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
200 * firmware loading, and is followed by the .start() ops after loading to
201 * actually let the remote cores to run.
202 */
k3_rproc_prepare(struct rproc * rproc)203 int k3_rproc_prepare(struct rproc *rproc)
204 {
205 struct k3_rproc *kproc = rproc->priv;
206 struct device *dev = kproc->dev;
207 int ret;
208
209 /* If the core is running already no need to deassert the module reset */
210 if (rproc->state == RPROC_DETACHED)
211 return 0;
212
213 /*
214 * Ensure the local reset is asserted so the core doesn't
215 * execute bogus code when the module reset is released.
216 */
217 if (kproc->data->uses_lreset) {
218 ret = k3_rproc_reset(kproc);
219 if (ret)
220 return ret;
221
222 ret = reset_control_status(kproc->reset);
223 if (ret <= 0) {
224 dev_err(dev, "local reset still not asserted\n");
225 return ret;
226 }
227 }
228
229 ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
230 kproc->ti_sci_id);
231 if (ret) {
232 dev_err(dev, "could not deassert module-reset for internal RAM loading\n");
233 return ret;
234 }
235
236 return 0;
237 }
238 EXPORT_SYMBOL_GPL(k3_rproc_prepare);
239
240 /*
241 * This function implements the .unprepare() ops and performs the complimentary
242 * operations to that of the .prepare() ops. The function is used to assert the
243 * global reset on applicable K3 DSP and M4 cores. This completes the second
244 * portion of powering down the remote core. The cores themselves are only
245 * halted in the .stop() callback through the local reset, and the .unprepare()
246 * ops is invoked by the remoteproc core after the remoteproc is stopped to
247 * balance the global reset.
248 */
k3_rproc_unprepare(struct rproc * rproc)249 int k3_rproc_unprepare(struct rproc *rproc)
250 {
251 struct k3_rproc *kproc = rproc->priv;
252 struct device *dev = kproc->dev;
253 int ret;
254
255 /* If the core is going to be detached do not assert the module reset */
256 if (rproc->state == RPROC_DETACHED)
257 return 0;
258
259 ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
260 kproc->ti_sci_id);
261 if (ret) {
262 dev_err(dev, "module-reset assert failed\n");
263 return ret;
264 }
265
266 return 0;
267 }
268 EXPORT_SYMBOL_GPL(k3_rproc_unprepare);
269
270 /*
271 * Power up the remote processor.
272 *
273 * This function will be invoked only after the firmware for this rproc
274 * was loaded, parsed successfully, and all of its resource requirements
275 * were met. This callback is invoked only in remoteproc mode.
276 */
k3_rproc_start(struct rproc * rproc)277 int k3_rproc_start(struct rproc *rproc)
278 {
279 struct k3_rproc *kproc = rproc->priv;
280
281 return k3_rproc_release(kproc);
282 }
283 EXPORT_SYMBOL_GPL(k3_rproc_start);
284
285 /*
286 * Stop the remote processor.
287 *
288 * This function puts the remote processor into reset, and finishes processing
289 * of any pending messages. This callback is invoked only in remoteproc mode.
290 */
k3_rproc_stop(struct rproc * rproc)291 int k3_rproc_stop(struct rproc *rproc)
292 {
293 struct k3_rproc *kproc = rproc->priv;
294
295 return k3_rproc_reset(kproc);
296 }
297 EXPORT_SYMBOL_GPL(k3_rproc_stop);
298
299 /*
300 * Attach to a running remote processor (IPC-only mode)
301 *
302 * The rproc attach callback is a NOP. The remote processor is already booted,
303 * and all required resources have been acquired during probe routine, so there
304 * is no need to issue any TI-SCI commands to boot the remote cores in IPC-only
305 * mode. This callback is invoked only in IPC-only mode and exists because
306 * rproc_validate() checks for its existence.
307 */
k3_rproc_attach(struct rproc * rproc)308 int k3_rproc_attach(struct rproc *rproc) { return 0; }
309 EXPORT_SYMBOL_GPL(k3_rproc_attach);
310
311 /*
312 * Detach from a running remote processor (IPC-only mode)
313 *
314 * The rproc detach callback is a NOP. The remote processor is not stopped and
315 * will be left in booted state in IPC-only mode. This callback is invoked only
316 * in IPC-only mode and exists for sanity sake
317 */
k3_rproc_detach(struct rproc * rproc)318 int k3_rproc_detach(struct rproc *rproc) { return 0; }
319 EXPORT_SYMBOL_GPL(k3_rproc_detach);
320
321 /*
322 * This function implements the .get_loaded_rsc_table() callback and is used
323 * to provide the resource table for a booted remote processor in IPC-only
324 * mode. The remote processor firmwares follow a design-by-contract approach
325 * and are expected to have the resource table at the base of the DDR region
326 * reserved for firmware usage. This provides flexibility for the remote
327 * processor to be booted by different bootloaders that may or may not have the
328 * ability to publish the resource table address and size through a DT
329 * property.
330 */
k3_get_loaded_rsc_table(struct rproc * rproc,size_t * rsc_table_sz)331 struct resource_table *k3_get_loaded_rsc_table(struct rproc *rproc,
332 size_t *rsc_table_sz)
333 {
334 struct k3_rproc *kproc = rproc->priv;
335 struct device *dev = kproc->dev;
336
337 if (!kproc->rmem[0].cpu_addr) {
338 dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
339 return ERR_PTR(-ENOMEM);
340 }
341
342 /*
343 * NOTE: The resource table size is currently hard-coded to a maximum
344 * of 256 bytes. The most common resource table usage for K3 firmwares
345 * is to only have the vdev resource entry and an optional trace entry.
346 * The exact size could be computed based on resource table address, but
347 * the hard-coded value suffices to support the IPC-only mode.
348 */
349 *rsc_table_sz = 256;
350 return (__force struct resource_table *)kproc->rmem[0].cpu_addr;
351 }
352 EXPORT_SYMBOL_GPL(k3_get_loaded_rsc_table);
353
354 /*
355 * Custom function to translate a remote processor device address (internal
356 * RAMs only) to a kernel virtual address. The remote processors can access
357 * their RAMs at either an internal address visible only from a remote
358 * processor, or at the SoC-level bus address. Both these addresses need to be
359 * looked through for translation. The translated addresses can be used either
360 * by the remoteproc core for loading (when using kernel remoteproc loader), or
361 * by any rpmsg bus drivers.
362 */
k3_rproc_da_to_va(struct rproc * rproc,u64 da,size_t len,bool * is_iomem)363 void *k3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
364 {
365 struct k3_rproc *kproc = rproc->priv;
366 void __iomem *va = NULL;
367 phys_addr_t bus_addr;
368 u32 dev_addr, offset;
369 size_t size;
370 int i;
371
372 if (len == 0)
373 return NULL;
374
375 for (i = 0; i < kproc->num_mems; i++) {
376 bus_addr = kproc->mem[i].bus_addr;
377 dev_addr = kproc->mem[i].dev_addr;
378 size = kproc->mem[i].size;
379
380 /* handle rproc-view addresses */
381 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
382 offset = da - dev_addr;
383 va = kproc->mem[i].cpu_addr + offset;
384 return (__force void *)va;
385 }
386
387 /* handle SoC-view addresses */
388 if (da >= bus_addr && (da + len) <= (bus_addr + size)) {
389 offset = da - bus_addr;
390 va = kproc->mem[i].cpu_addr + offset;
391 return (__force void *)va;
392 }
393 }
394
395 /* handle static DDR reserved memory regions */
396 for (i = 0; i < kproc->num_rmems; i++) {
397 dev_addr = kproc->rmem[i].dev_addr;
398 size = kproc->rmem[i].size;
399
400 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
401 offset = da - dev_addr;
402 va = kproc->rmem[i].cpu_addr + offset;
403 return (__force void *)va;
404 }
405 }
406
407 return NULL;
408 }
409 EXPORT_SYMBOL_GPL(k3_rproc_da_to_va);
410
k3_rproc_of_get_memories(struct platform_device * pdev,struct k3_rproc * kproc)411 int k3_rproc_of_get_memories(struct platform_device *pdev,
412 struct k3_rproc *kproc)
413 {
414 const struct k3_rproc_dev_data *data = kproc->data;
415 struct device *dev = &pdev->dev;
416 struct resource *res;
417 int num_mems = 0;
418 int i;
419
420 num_mems = data->num_mems;
421 kproc->mem = devm_kcalloc(kproc->dev, num_mems,
422 sizeof(*kproc->mem), GFP_KERNEL);
423 if (!kproc->mem)
424 return -ENOMEM;
425
426 for (i = 0; i < num_mems; i++) {
427 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
428 data->mems[i].name);
429 if (!res) {
430 dev_err(dev, "found no memory resource for %s\n",
431 data->mems[i].name);
432 return -EINVAL;
433 }
434 if (!devm_request_mem_region(dev, res->start,
435 resource_size(res),
436 dev_name(dev))) {
437 dev_err(dev, "could not request %s region for resource\n",
438 data->mems[i].name);
439 return -EBUSY;
440 }
441
442 kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
443 resource_size(res));
444 if (!kproc->mem[i].cpu_addr) {
445 dev_err(dev, "failed to map %s memory\n",
446 data->mems[i].name);
447 return -ENOMEM;
448 }
449 kproc->mem[i].bus_addr = res->start;
450 kproc->mem[i].dev_addr = data->mems[i].dev_addr;
451 kproc->mem[i].size = resource_size(res);
452
453 dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %p da 0x%x\n",
454 data->mems[i].name, &kproc->mem[i].bus_addr,
455 kproc->mem[i].size, kproc->mem[i].cpu_addr,
456 kproc->mem[i].dev_addr);
457 }
458 kproc->num_mems = num_mems;
459
460 return 0;
461 }
462 EXPORT_SYMBOL_GPL(k3_rproc_of_get_memories);
463
k3_mem_release(void * data)464 void k3_mem_release(void *data)
465 {
466 struct device *dev = data;
467
468 of_reserved_mem_device_release(dev);
469 }
470 EXPORT_SYMBOL_GPL(k3_mem_release);
471
k3_reserved_mem_init(struct k3_rproc * kproc)472 int k3_reserved_mem_init(struct k3_rproc *kproc)
473 {
474 struct device *dev = kproc->dev;
475 struct device_node *np = dev->of_node;
476 struct device_node *rmem_np;
477 struct reserved_mem *rmem;
478 int num_rmems;
479 int ret, i;
480
481 num_rmems = of_property_count_elems_of_size(np, "memory-region",
482 sizeof(phandle));
483 if (num_rmems < 0) {
484 dev_err(dev, "device does not reserved memory regions (%d)\n",
485 num_rmems);
486 return -EINVAL;
487 }
488 if (num_rmems < 2) {
489 dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
490 num_rmems);
491 return -EINVAL;
492 }
493
494 /* use reserved memory region 0 for vring DMA allocations */
495 ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
496 if (ret) {
497 dev_err(dev, "device cannot initialize DMA pool (%d)\n", ret);
498 return ret;
499 }
500 ret = devm_add_action_or_reset(dev, k3_mem_release, dev);
501 if (ret)
502 return ret;
503
504 num_rmems--;
505 kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
506 if (!kproc->rmem)
507 return -ENOMEM;
508
509 /* use remaining reserved memory regions for static carveouts */
510 for (i = 0; i < num_rmems; i++) {
511 rmem_np = of_parse_phandle(np, "memory-region", i + 1);
512 if (!rmem_np)
513 return -EINVAL;
514
515 rmem = of_reserved_mem_lookup(rmem_np);
516 of_node_put(rmem_np);
517 if (!rmem)
518 return -EINVAL;
519
520 kproc->rmem[i].bus_addr = rmem->base;
521 /* 64-bit address regions currently not supported */
522 kproc->rmem[i].dev_addr = (u32)rmem->base;
523 kproc->rmem[i].size = rmem->size;
524 kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
525 if (!kproc->rmem[i].cpu_addr) {
526 dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
527 i + 1, &rmem->base, &rmem->size);
528 return -ENOMEM;
529 }
530
531 dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %p da 0x%x\n",
532 i + 1, &kproc->rmem[i].bus_addr,
533 kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
534 kproc->rmem[i].dev_addr);
535 }
536 kproc->num_rmems = num_rmems;
537
538 return 0;
539 }
540 EXPORT_SYMBOL_GPL(k3_reserved_mem_init);
541
k3_release_tsp(void * data)542 void k3_release_tsp(void *data)
543 {
544 struct ti_sci_proc *tsp = data;
545
546 ti_sci_proc_release(tsp);
547 }
548 EXPORT_SYMBOL_GPL(k3_release_tsp);
549
550 MODULE_LICENSE("GPL");
551 MODULE_DESCRIPTION("TI K3 common Remoteproc code");
552