xref: /linux/drivers/remoteproc/xlnx_r5_remoteproc.c (revision 6b291e8020a8bd90e94ee13d61f251040425c90d)
1*6b291e80STanmay Shah // SPDX-License-Identifier: GPL-2.0
2*6b291e80STanmay Shah /*
3*6b291e80STanmay Shah  * ZynqMP R5 Remote Processor driver
4*6b291e80STanmay Shah  *
5*6b291e80STanmay Shah  */
6*6b291e80STanmay Shah 
7*6b291e80STanmay Shah #include <dt-bindings/power/xlnx-zynqmp-power.h>
8*6b291e80STanmay Shah #include <linux/dma-mapping.h>
9*6b291e80STanmay Shah #include <linux/firmware/xlnx-zynqmp.h>
10*6b291e80STanmay Shah #include <linux/kernel.h>
11*6b291e80STanmay Shah #include <linux/module.h>
12*6b291e80STanmay Shah #include <linux/of_address.h>
13*6b291e80STanmay Shah #include <linux/of_platform.h>
14*6b291e80STanmay Shah #include <linux/of_reserved_mem.h>
15*6b291e80STanmay Shah #include <linux/platform_device.h>
16*6b291e80STanmay Shah #include <linux/remoteproc.h>
17*6b291e80STanmay Shah #include <linux/slab.h>
18*6b291e80STanmay Shah 
19*6b291e80STanmay Shah #include "remoteproc_internal.h"
20*6b291e80STanmay Shah 
21*6b291e80STanmay Shah /*
22*6b291e80STanmay Shah  * settings for RPU cluster mode which
23*6b291e80STanmay Shah  * reflects possible values of xlnx,cluster-mode dt-property
24*6b291e80STanmay Shah  */
25*6b291e80STanmay Shah enum zynqmp_r5_cluster_mode {
26*6b291e80STanmay Shah 	SPLIT_MODE = 0, /* When cores run as separate processor */
27*6b291e80STanmay Shah 	LOCKSTEP_MODE = 1, /* cores execute same code in lockstep,clk-for-clk */
28*6b291e80STanmay Shah 	SINGLE_CPU_MODE = 2, /* core0 is held in reset and only core1 runs */
29*6b291e80STanmay Shah };
30*6b291e80STanmay Shah 
31*6b291e80STanmay Shah /**
32*6b291e80STanmay Shah  * struct mem_bank_data - Memory Bank description
33*6b291e80STanmay Shah  *
34*6b291e80STanmay Shah  * @addr: Start address of memory bank
35*6b291e80STanmay Shah  * @size: Size of Memory bank
36*6b291e80STanmay Shah  * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
37*6b291e80STanmay Shah  * @bank_name: name of the bank for remoteproc framework
38*6b291e80STanmay Shah  */
39*6b291e80STanmay Shah struct mem_bank_data {
40*6b291e80STanmay Shah 	phys_addr_t addr;
41*6b291e80STanmay Shah 	size_t size;
42*6b291e80STanmay Shah 	u32 pm_domain_id;
43*6b291e80STanmay Shah 	char *bank_name;
44*6b291e80STanmay Shah };
45*6b291e80STanmay Shah 
46*6b291e80STanmay Shah /*
47*6b291e80STanmay Shah  * Hardcoded TCM bank values. This will be removed once TCM bindings are
48*6b291e80STanmay Shah  * accepted for system-dt specifications and upstreamed in linux kernel
49*6b291e80STanmay Shah  */
50*6b291e80STanmay Shah static const struct mem_bank_data zynqmp_tcm_banks[] = {
51*6b291e80STanmay Shah 	{0xffe00000UL, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
52*6b291e80STanmay Shah 	{0xffe20000UL, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
53*6b291e80STanmay Shah 	{0xffe90000UL, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
54*6b291e80STanmay Shah 	{0xffeb0000UL, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
55*6b291e80STanmay Shah };
56*6b291e80STanmay Shah 
57*6b291e80STanmay Shah /**
58*6b291e80STanmay Shah  * struct zynqmp_r5_core
59*6b291e80STanmay Shah  *
60*6b291e80STanmay Shah  * @dev: device of RPU instance
61*6b291e80STanmay Shah  * @np: device node of RPU instance
62*6b291e80STanmay Shah  * @tcm_bank_count: number TCM banks accessible to this RPU
63*6b291e80STanmay Shah  * @tcm_banks: array of each TCM bank data
64*6b291e80STanmay Shah  * @rmem_count: Number of reserved mem regions
65*6b291e80STanmay Shah  * @rmem: reserved memory region nodes from device tree
66*6b291e80STanmay Shah  * @rproc: rproc handle
67*6b291e80STanmay Shah  * @pm_domain_id: RPU CPU power domain id
68*6b291e80STanmay Shah  */
69*6b291e80STanmay Shah struct zynqmp_r5_core {
70*6b291e80STanmay Shah 	struct device *dev;
71*6b291e80STanmay Shah 	struct device_node *np;
72*6b291e80STanmay Shah 	int tcm_bank_count;
73*6b291e80STanmay Shah 	struct mem_bank_data **tcm_banks;
74*6b291e80STanmay Shah 	int rmem_count;
75*6b291e80STanmay Shah 	struct reserved_mem **rmem;
76*6b291e80STanmay Shah 	struct rproc *rproc;
77*6b291e80STanmay Shah 	u32 pm_domain_id;
78*6b291e80STanmay Shah };
79*6b291e80STanmay Shah 
80*6b291e80STanmay Shah /**
81*6b291e80STanmay Shah  * struct zynqmp_r5_cluster
82*6b291e80STanmay Shah  *
83*6b291e80STanmay Shah  * @dev: r5f subsystem cluster device node
84*6b291e80STanmay Shah  * @mode: cluster mode of type zynqmp_r5_cluster_mode
85*6b291e80STanmay Shah  * @core_count: number of r5 cores used for this cluster mode
86*6b291e80STanmay Shah  * @r5_cores: Array of pointers pointing to r5 core
87*6b291e80STanmay Shah  */
88*6b291e80STanmay Shah struct zynqmp_r5_cluster {
89*6b291e80STanmay Shah 	struct device *dev;
90*6b291e80STanmay Shah 	enum  zynqmp_r5_cluster_mode mode;
91*6b291e80STanmay Shah 	int core_count;
92*6b291e80STanmay Shah 	struct zynqmp_r5_core **r5_cores;
93*6b291e80STanmay Shah };
94*6b291e80STanmay Shah 
95*6b291e80STanmay Shah /*
96*6b291e80STanmay Shah  * zynqmp_r5_set_mode()
97*6b291e80STanmay Shah  *
98*6b291e80STanmay Shah  * set RPU cluster and TCM operation mode
99*6b291e80STanmay Shah  *
100*6b291e80STanmay Shah  * @r5_core: pointer to zynqmp_r5_core type object
101*6b291e80STanmay Shah  * @fw_reg_val: value expected by firmware to configure RPU cluster mode
102*6b291e80STanmay Shah  * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
103*6b291e80STanmay Shah  *
104*6b291e80STanmay Shah  * Return: 0 for success and < 0 for failure
105*6b291e80STanmay Shah  */
106*6b291e80STanmay Shah static int zynqmp_r5_set_mode(struct zynqmp_r5_core *r5_core,
107*6b291e80STanmay Shah 			      enum rpu_oper_mode fw_reg_val,
108*6b291e80STanmay Shah 			      enum rpu_tcm_comb tcm_mode)
109*6b291e80STanmay Shah {
110*6b291e80STanmay Shah 	int ret;
111*6b291e80STanmay Shah 
112*6b291e80STanmay Shah 	ret = zynqmp_pm_set_rpu_mode(r5_core->pm_domain_id, fw_reg_val);
113*6b291e80STanmay Shah 	if (ret < 0) {
114*6b291e80STanmay Shah 		dev_err(r5_core->dev, "failed to set RPU mode\n");
115*6b291e80STanmay Shah 		return ret;
116*6b291e80STanmay Shah 	}
117*6b291e80STanmay Shah 
118*6b291e80STanmay Shah 	ret = zynqmp_pm_set_tcm_config(r5_core->pm_domain_id, tcm_mode);
119*6b291e80STanmay Shah 	if (ret < 0)
120*6b291e80STanmay Shah 		dev_err(r5_core->dev, "failed to configure TCM\n");
121*6b291e80STanmay Shah 
122*6b291e80STanmay Shah 	return ret;
123*6b291e80STanmay Shah }
124*6b291e80STanmay Shah 
125*6b291e80STanmay Shah /*
126*6b291e80STanmay Shah  * zynqmp_r5_rproc_start()
127*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
128*6b291e80STanmay Shah  *
129*6b291e80STanmay Shah  * Start R5 Core from designated boot address.
130*6b291e80STanmay Shah  *
131*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
132*6b291e80STanmay Shah  */
133*6b291e80STanmay Shah static int zynqmp_r5_rproc_start(struct rproc *rproc)
134*6b291e80STanmay Shah {
135*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core = rproc->priv;
136*6b291e80STanmay Shah 	enum rpu_boot_mem bootmem;
137*6b291e80STanmay Shah 	int ret;
138*6b291e80STanmay Shah 
139*6b291e80STanmay Shah 	/*
140*6b291e80STanmay Shah 	 * The exception vector pointers (EVP) refer to the base-address of
141*6b291e80STanmay Shah 	 * exception vectors (for reset, IRQ, FIQ, etc). The reset-vector
142*6b291e80STanmay Shah 	 * starts at the base-address and subsequent vectors are on 4-byte
143*6b291e80STanmay Shah 	 * boundaries.
144*6b291e80STanmay Shah 	 *
145*6b291e80STanmay Shah 	 * Exception vectors can start either from 0x0000_0000 (LOVEC) or
146*6b291e80STanmay Shah 	 * from 0xFFFF_0000 (HIVEC) which is mapped in the OCM (On-Chip Memory)
147*6b291e80STanmay Shah 	 *
148*6b291e80STanmay Shah 	 * Usually firmware will put Exception vectors at LOVEC.
149*6b291e80STanmay Shah 	 *
150*6b291e80STanmay Shah 	 * It is not recommend that you change the exception vector.
151*6b291e80STanmay Shah 	 * Changing the EVP to HIVEC will result in increased interrupt latency
152*6b291e80STanmay Shah 	 * and jitter. Also, if the OCM is secured and the Cortex-R5F processor
153*6b291e80STanmay Shah 	 * is non-secured, then the Cortex-R5F processor cannot access the
154*6b291e80STanmay Shah 	 * HIVEC exception vectors in the OCM.
155*6b291e80STanmay Shah 	 */
156*6b291e80STanmay Shah 	bootmem = (rproc->bootaddr >= 0xFFFC0000) ?
157*6b291e80STanmay Shah 		   PM_RPU_BOOTMEM_HIVEC : PM_RPU_BOOTMEM_LOVEC;
158*6b291e80STanmay Shah 
159*6b291e80STanmay Shah 	dev_dbg(r5_core->dev, "RPU boot addr 0x%llx from %s.", rproc->bootaddr,
160*6b291e80STanmay Shah 		bootmem == PM_RPU_BOOTMEM_HIVEC ? "OCM" : "TCM");
161*6b291e80STanmay Shah 
162*6b291e80STanmay Shah 	ret = zynqmp_pm_request_wake(r5_core->pm_domain_id, 1,
163*6b291e80STanmay Shah 				     bootmem, ZYNQMP_PM_REQUEST_ACK_NO);
164*6b291e80STanmay Shah 	if (ret)
165*6b291e80STanmay Shah 		dev_err(r5_core->dev,
166*6b291e80STanmay Shah 			"failed to start RPU = 0x%x\n", r5_core->pm_domain_id);
167*6b291e80STanmay Shah 	return ret;
168*6b291e80STanmay Shah }
169*6b291e80STanmay Shah 
170*6b291e80STanmay Shah /*
171*6b291e80STanmay Shah  * zynqmp_r5_rproc_stop()
172*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
173*6b291e80STanmay Shah  *
174*6b291e80STanmay Shah  * Power down  R5 Core.
175*6b291e80STanmay Shah  *
176*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
177*6b291e80STanmay Shah  */
178*6b291e80STanmay Shah static int zynqmp_r5_rproc_stop(struct rproc *rproc)
179*6b291e80STanmay Shah {
180*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core = rproc->priv;
181*6b291e80STanmay Shah 	int ret;
182*6b291e80STanmay Shah 
183*6b291e80STanmay Shah 	ret = zynqmp_pm_force_pwrdwn(r5_core->pm_domain_id,
184*6b291e80STanmay Shah 				     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
185*6b291e80STanmay Shah 	if (ret)
186*6b291e80STanmay Shah 		dev_err(r5_core->dev, "failed to stop remoteproc RPU %d\n", ret);
187*6b291e80STanmay Shah 
188*6b291e80STanmay Shah 	return ret;
189*6b291e80STanmay Shah }
190*6b291e80STanmay Shah 
191*6b291e80STanmay Shah /*
192*6b291e80STanmay Shah  * zynqmp_r5_mem_region_map()
193*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
194*6b291e80STanmay Shah  * @mem: mem descriptor to map reserved memory-regions
195*6b291e80STanmay Shah  *
196*6b291e80STanmay Shah  * Callback to map va for memory-region's carveout.
197*6b291e80STanmay Shah  *
198*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
199*6b291e80STanmay Shah  */
200*6b291e80STanmay Shah static int zynqmp_r5_mem_region_map(struct rproc *rproc,
201*6b291e80STanmay Shah 				    struct rproc_mem_entry *mem)
202*6b291e80STanmay Shah {
203*6b291e80STanmay Shah 	void __iomem *va;
204*6b291e80STanmay Shah 
205*6b291e80STanmay Shah 	va = ioremap_wc(mem->dma, mem->len);
206*6b291e80STanmay Shah 	if (IS_ERR_OR_NULL(va))
207*6b291e80STanmay Shah 		return -ENOMEM;
208*6b291e80STanmay Shah 
209*6b291e80STanmay Shah 	mem->va = (void *)va;
210*6b291e80STanmay Shah 
211*6b291e80STanmay Shah 	return 0;
212*6b291e80STanmay Shah }
213*6b291e80STanmay Shah 
214*6b291e80STanmay Shah /*
215*6b291e80STanmay Shah  * zynqmp_r5_rproc_mem_unmap
216*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
217*6b291e80STanmay Shah  * @mem: mem entry to unmap
218*6b291e80STanmay Shah  *
219*6b291e80STanmay Shah  * Unmap memory-region carveout
220*6b291e80STanmay Shah  *
221*6b291e80STanmay Shah  * return: always returns 0
222*6b291e80STanmay Shah  */
223*6b291e80STanmay Shah static int zynqmp_r5_mem_region_unmap(struct rproc *rproc,
224*6b291e80STanmay Shah 				      struct rproc_mem_entry *mem)
225*6b291e80STanmay Shah {
226*6b291e80STanmay Shah 	iounmap((void __iomem *)mem->va);
227*6b291e80STanmay Shah 	return 0;
228*6b291e80STanmay Shah }
229*6b291e80STanmay Shah 
230*6b291e80STanmay Shah /*
231*6b291e80STanmay Shah  * add_mem_regions_carveout()
232*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
233*6b291e80STanmay Shah  *
234*6b291e80STanmay Shah  * Construct rproc mem carveouts from memory-region property nodes
235*6b291e80STanmay Shah  *
236*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
237*6b291e80STanmay Shah  */
238*6b291e80STanmay Shah static int add_mem_regions_carveout(struct rproc *rproc)
239*6b291e80STanmay Shah {
240*6b291e80STanmay Shah 	struct rproc_mem_entry *rproc_mem;
241*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
242*6b291e80STanmay Shah 	struct reserved_mem *rmem;
243*6b291e80STanmay Shah 	int i, num_mem_regions;
244*6b291e80STanmay Shah 
245*6b291e80STanmay Shah 	r5_core = (struct zynqmp_r5_core *)rproc->priv;
246*6b291e80STanmay Shah 	num_mem_regions = r5_core->rmem_count;
247*6b291e80STanmay Shah 
248*6b291e80STanmay Shah 	for (i = 0; i < num_mem_regions; i++) {
249*6b291e80STanmay Shah 		rmem = r5_core->rmem[i];
250*6b291e80STanmay Shah 
251*6b291e80STanmay Shah 		if (!strncmp(rmem->name, "vdev0buffer", strlen("vdev0buffer"))) {
252*6b291e80STanmay Shah 			/* Init reserved memory for vdev buffer */
253*6b291e80STanmay Shah 			rproc_mem = rproc_of_resm_mem_entry_init(&rproc->dev, i,
254*6b291e80STanmay Shah 								 rmem->size,
255*6b291e80STanmay Shah 								 rmem->base,
256*6b291e80STanmay Shah 								 rmem->name);
257*6b291e80STanmay Shah 		} else {
258*6b291e80STanmay Shah 			/* Register associated reserved memory regions */
259*6b291e80STanmay Shah 			rproc_mem = rproc_mem_entry_init(&rproc->dev, NULL,
260*6b291e80STanmay Shah 							 (dma_addr_t)rmem->base,
261*6b291e80STanmay Shah 							 rmem->size, rmem->base,
262*6b291e80STanmay Shah 							 zynqmp_r5_mem_region_map,
263*6b291e80STanmay Shah 							 zynqmp_r5_mem_region_unmap,
264*6b291e80STanmay Shah 							 rmem->name);
265*6b291e80STanmay Shah 		}
266*6b291e80STanmay Shah 
267*6b291e80STanmay Shah 		if (!rproc_mem)
268*6b291e80STanmay Shah 			return -ENOMEM;
269*6b291e80STanmay Shah 
270*6b291e80STanmay Shah 		rproc_add_carveout(rproc, rproc_mem);
271*6b291e80STanmay Shah 
272*6b291e80STanmay Shah 		dev_dbg(&rproc->dev, "reserved mem carveout %s addr=%llx, size=0x%llx",
273*6b291e80STanmay Shah 			rmem->name, rmem->base, rmem->size);
274*6b291e80STanmay Shah 	}
275*6b291e80STanmay Shah 
276*6b291e80STanmay Shah 	return 0;
277*6b291e80STanmay Shah }
278*6b291e80STanmay Shah 
279*6b291e80STanmay Shah /*
280*6b291e80STanmay Shah  * tcm_mem_unmap()
281*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
282*6b291e80STanmay Shah  * @mem: tcm mem entry to unmap
283*6b291e80STanmay Shah  *
284*6b291e80STanmay Shah  * Unmap TCM banks when powering down R5 core.
285*6b291e80STanmay Shah  *
286*6b291e80STanmay Shah  * return always 0
287*6b291e80STanmay Shah  */
288*6b291e80STanmay Shah static int tcm_mem_unmap(struct rproc *rproc, struct rproc_mem_entry *mem)
289*6b291e80STanmay Shah {
290*6b291e80STanmay Shah 	iounmap((void __iomem *)mem->va);
291*6b291e80STanmay Shah 
292*6b291e80STanmay Shah 	return 0;
293*6b291e80STanmay Shah }
294*6b291e80STanmay Shah 
295*6b291e80STanmay Shah /*
296*6b291e80STanmay Shah  * tcm_mem_map()
297*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
298*6b291e80STanmay Shah  * @mem: tcm memory entry descriptor
299*6b291e80STanmay Shah  *
300*6b291e80STanmay Shah  * Given TCM bank entry, this func setup virtual address for TCM bank
301*6b291e80STanmay Shah  * remoteproc carveout. It also takes care of va to da address translation
302*6b291e80STanmay Shah  *
303*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
304*6b291e80STanmay Shah  */
305*6b291e80STanmay Shah static int tcm_mem_map(struct rproc *rproc,
306*6b291e80STanmay Shah 		       struct rproc_mem_entry *mem)
307*6b291e80STanmay Shah {
308*6b291e80STanmay Shah 	void __iomem *va;
309*6b291e80STanmay Shah 
310*6b291e80STanmay Shah 	va = ioremap_wc(mem->dma, mem->len);
311*6b291e80STanmay Shah 	if (IS_ERR_OR_NULL(va))
312*6b291e80STanmay Shah 		return -ENOMEM;
313*6b291e80STanmay Shah 
314*6b291e80STanmay Shah 	/* Update memory entry va */
315*6b291e80STanmay Shah 	mem->va = (void *)va;
316*6b291e80STanmay Shah 
317*6b291e80STanmay Shah 	/* clear TCMs */
318*6b291e80STanmay Shah 	memset_io(va, 0, mem->len);
319*6b291e80STanmay Shah 
320*6b291e80STanmay Shah 	/*
321*6b291e80STanmay Shah 	 * The R5s expect their TCM banks to be at address 0x0 and 0x2000,
322*6b291e80STanmay Shah 	 * while on the Linux side they are at 0xffexxxxx.
323*6b291e80STanmay Shah 	 *
324*6b291e80STanmay Shah 	 * Zero out the high 12 bits of the address. This will give
325*6b291e80STanmay Shah 	 * expected values for TCM Banks 0A and 0B (0x0 and 0x20000).
326*6b291e80STanmay Shah 	 */
327*6b291e80STanmay Shah 	mem->da &= 0x000fffff;
328*6b291e80STanmay Shah 
329*6b291e80STanmay Shah 	/*
330*6b291e80STanmay Shah 	 * TCM Banks 1A and 1B still have to be translated.
331*6b291e80STanmay Shah 	 *
332*6b291e80STanmay Shah 	 * Below handle these two banks' absolute addresses (0xffe90000 and
333*6b291e80STanmay Shah 	 * 0xffeb0000) and convert to the expected relative addresses
334*6b291e80STanmay Shah 	 * (0x0 and 0x20000).
335*6b291e80STanmay Shah 	 */
336*6b291e80STanmay Shah 	if (mem->da == 0x90000 || mem->da == 0xB0000)
337*6b291e80STanmay Shah 		mem->da -= 0x90000;
338*6b291e80STanmay Shah 
339*6b291e80STanmay Shah 	/* if translated TCM bank address is not valid report error */
340*6b291e80STanmay Shah 	if (mem->da != 0x0 && mem->da != 0x20000) {
341*6b291e80STanmay Shah 		dev_err(&rproc->dev, "invalid TCM address: %x\n", mem->da);
342*6b291e80STanmay Shah 		return -EINVAL;
343*6b291e80STanmay Shah 	}
344*6b291e80STanmay Shah 	return 0;
345*6b291e80STanmay Shah }
346*6b291e80STanmay Shah 
347*6b291e80STanmay Shah /*
348*6b291e80STanmay Shah  * add_tcm_carveout_split_mode()
349*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
350*6b291e80STanmay Shah  *
351*6b291e80STanmay Shah  * allocate and add remoteproc carveout for TCM memory in split mode
352*6b291e80STanmay Shah  *
353*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
354*6b291e80STanmay Shah  */
355*6b291e80STanmay Shah static int add_tcm_carveout_split_mode(struct rproc *rproc)
356*6b291e80STanmay Shah {
357*6b291e80STanmay Shah 	struct rproc_mem_entry *rproc_mem;
358*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
359*6b291e80STanmay Shah 	int i, num_banks, ret;
360*6b291e80STanmay Shah 	phys_addr_t bank_addr;
361*6b291e80STanmay Shah 	struct device *dev;
362*6b291e80STanmay Shah 	u32 pm_domain_id;
363*6b291e80STanmay Shah 	size_t bank_size;
364*6b291e80STanmay Shah 	char *bank_name;
365*6b291e80STanmay Shah 
366*6b291e80STanmay Shah 	r5_core = (struct zynqmp_r5_core *)rproc->priv;
367*6b291e80STanmay Shah 	dev = r5_core->dev;
368*6b291e80STanmay Shah 	num_banks = r5_core->tcm_bank_count;
369*6b291e80STanmay Shah 
370*6b291e80STanmay Shah 	/*
371*6b291e80STanmay Shah 	 * Power-on Each 64KB TCM,
372*6b291e80STanmay Shah 	 * register its address space, map and unmap functions
373*6b291e80STanmay Shah 	 * and add carveouts accordingly
374*6b291e80STanmay Shah 	 */
375*6b291e80STanmay Shah 	for (i = 0; i < num_banks; i++) {
376*6b291e80STanmay Shah 		bank_addr = r5_core->tcm_banks[i]->addr;
377*6b291e80STanmay Shah 		bank_name = r5_core->tcm_banks[i]->bank_name;
378*6b291e80STanmay Shah 		bank_size = r5_core->tcm_banks[i]->size;
379*6b291e80STanmay Shah 		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
380*6b291e80STanmay Shah 
381*6b291e80STanmay Shah 		ret = zynqmp_pm_request_node(pm_domain_id,
382*6b291e80STanmay Shah 					     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
383*6b291e80STanmay Shah 					     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
384*6b291e80STanmay Shah 		if (ret < 0) {
385*6b291e80STanmay Shah 			dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
386*6b291e80STanmay Shah 			goto release_tcm_split;
387*6b291e80STanmay Shah 		}
388*6b291e80STanmay Shah 
389*6b291e80STanmay Shah 		dev_dbg(dev, "TCM carveout split mode %s addr=%llx, size=0x%lx",
390*6b291e80STanmay Shah 			bank_name, bank_addr, bank_size);
391*6b291e80STanmay Shah 
392*6b291e80STanmay Shah 		rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
393*6b291e80STanmay Shah 						 bank_size, bank_addr,
394*6b291e80STanmay Shah 						 tcm_mem_map, tcm_mem_unmap,
395*6b291e80STanmay Shah 						 bank_name);
396*6b291e80STanmay Shah 		if (!rproc_mem) {
397*6b291e80STanmay Shah 			ret = -ENOMEM;
398*6b291e80STanmay Shah 			zynqmp_pm_release_node(pm_domain_id);
399*6b291e80STanmay Shah 			goto release_tcm_split;
400*6b291e80STanmay Shah 		}
401*6b291e80STanmay Shah 
402*6b291e80STanmay Shah 		rproc_add_carveout(rproc, rproc_mem);
403*6b291e80STanmay Shah 	}
404*6b291e80STanmay Shah 
405*6b291e80STanmay Shah 	return 0;
406*6b291e80STanmay Shah 
407*6b291e80STanmay Shah release_tcm_split:
408*6b291e80STanmay Shah 	/* If failed, Turn off all TCM banks turned on before */
409*6b291e80STanmay Shah 	for (i--; i >= 0; i--) {
410*6b291e80STanmay Shah 		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
411*6b291e80STanmay Shah 		zynqmp_pm_release_node(pm_domain_id);
412*6b291e80STanmay Shah 	}
413*6b291e80STanmay Shah 	return ret;
414*6b291e80STanmay Shah }
415*6b291e80STanmay Shah 
416*6b291e80STanmay Shah /*
417*6b291e80STanmay Shah  * add_tcm_carveout_lockstep_mode()
418*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
419*6b291e80STanmay Shah  *
420*6b291e80STanmay Shah  * allocate and add remoteproc carveout for TCM memory in lockstep mode
421*6b291e80STanmay Shah  *
422*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
423*6b291e80STanmay Shah  */
424*6b291e80STanmay Shah static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
425*6b291e80STanmay Shah {
426*6b291e80STanmay Shah 	struct rproc_mem_entry *rproc_mem;
427*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
428*6b291e80STanmay Shah 	int i, num_banks, ret;
429*6b291e80STanmay Shah 	phys_addr_t bank_addr;
430*6b291e80STanmay Shah 	size_t bank_size = 0;
431*6b291e80STanmay Shah 	struct device *dev;
432*6b291e80STanmay Shah 	u32 pm_domain_id;
433*6b291e80STanmay Shah 	char *bank_name;
434*6b291e80STanmay Shah 
435*6b291e80STanmay Shah 	r5_core = (struct zynqmp_r5_core *)rproc->priv;
436*6b291e80STanmay Shah 	dev = r5_core->dev;
437*6b291e80STanmay Shah 
438*6b291e80STanmay Shah 	/* Go through zynqmp banks for r5 node */
439*6b291e80STanmay Shah 	num_banks = r5_core->tcm_bank_count;
440*6b291e80STanmay Shah 
441*6b291e80STanmay Shah 	/*
442*6b291e80STanmay Shah 	 * In lockstep mode, TCM is contiguous memory block
443*6b291e80STanmay Shah 	 * However, each TCM block still needs to be enabled individually.
444*6b291e80STanmay Shah 	 * So, Enable each TCM block individually, but add their size
445*6b291e80STanmay Shah 	 * to create contiguous memory region.
446*6b291e80STanmay Shah 	 */
447*6b291e80STanmay Shah 	bank_addr = r5_core->tcm_banks[0]->addr;
448*6b291e80STanmay Shah 	bank_name = r5_core->tcm_banks[0]->bank_name;
449*6b291e80STanmay Shah 
450*6b291e80STanmay Shah 	for (i = 0; i < num_banks; i++) {
451*6b291e80STanmay Shah 		bank_size += r5_core->tcm_banks[i]->size;
452*6b291e80STanmay Shah 		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
453*6b291e80STanmay Shah 
454*6b291e80STanmay Shah 		/* Turn on each TCM bank individually */
455*6b291e80STanmay Shah 		ret = zynqmp_pm_request_node(pm_domain_id,
456*6b291e80STanmay Shah 					     ZYNQMP_PM_CAPABILITY_ACCESS, 0,
457*6b291e80STanmay Shah 					     ZYNQMP_PM_REQUEST_ACK_BLOCKING);
458*6b291e80STanmay Shah 		if (ret < 0) {
459*6b291e80STanmay Shah 			dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
460*6b291e80STanmay Shah 			goto release_tcm_lockstep;
461*6b291e80STanmay Shah 		}
462*6b291e80STanmay Shah 	}
463*6b291e80STanmay Shah 
464*6b291e80STanmay Shah 	dev_dbg(dev, "TCM add carveout lockstep mode %s addr=0x%llx, size=0x%lx",
465*6b291e80STanmay Shah 		bank_name, bank_addr, bank_size);
466*6b291e80STanmay Shah 
467*6b291e80STanmay Shah 	/* Register TCM address range, TCM map and unmap functions */
468*6b291e80STanmay Shah 	rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
469*6b291e80STanmay Shah 					 bank_size, bank_addr,
470*6b291e80STanmay Shah 					 tcm_mem_map, tcm_mem_unmap,
471*6b291e80STanmay Shah 					 bank_name);
472*6b291e80STanmay Shah 	if (!rproc_mem) {
473*6b291e80STanmay Shah 		ret = -ENOMEM;
474*6b291e80STanmay Shah 		goto release_tcm_lockstep;
475*6b291e80STanmay Shah 	}
476*6b291e80STanmay Shah 
477*6b291e80STanmay Shah 	/* If registration is success, add carveouts */
478*6b291e80STanmay Shah 	rproc_add_carveout(rproc, rproc_mem);
479*6b291e80STanmay Shah 
480*6b291e80STanmay Shah 	return 0;
481*6b291e80STanmay Shah 
482*6b291e80STanmay Shah release_tcm_lockstep:
483*6b291e80STanmay Shah 	/* If failed, Turn off all TCM banks turned on before */
484*6b291e80STanmay Shah 	for (i--; i >= 0; i--) {
485*6b291e80STanmay Shah 		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
486*6b291e80STanmay Shah 		zynqmp_pm_release_node(pm_domain_id);
487*6b291e80STanmay Shah 	}
488*6b291e80STanmay Shah 	return ret;
489*6b291e80STanmay Shah }
490*6b291e80STanmay Shah 
491*6b291e80STanmay Shah /*
492*6b291e80STanmay Shah  * add_tcm_banks()
493*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
494*6b291e80STanmay Shah  *
495*6b291e80STanmay Shah  * allocate and add remoteproc carveouts for TCM memory based on cluster mode
496*6b291e80STanmay Shah  *
497*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
498*6b291e80STanmay Shah  */
499*6b291e80STanmay Shah static int add_tcm_banks(struct rproc *rproc)
500*6b291e80STanmay Shah {
501*6b291e80STanmay Shah 	struct zynqmp_r5_cluster *cluster;
502*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
503*6b291e80STanmay Shah 	struct device *dev;
504*6b291e80STanmay Shah 
505*6b291e80STanmay Shah 	r5_core = (struct zynqmp_r5_core *)rproc->priv;
506*6b291e80STanmay Shah 	if (!r5_core)
507*6b291e80STanmay Shah 		return -EINVAL;
508*6b291e80STanmay Shah 
509*6b291e80STanmay Shah 	dev = r5_core->dev;
510*6b291e80STanmay Shah 
511*6b291e80STanmay Shah 	cluster = dev_get_drvdata(dev->parent);
512*6b291e80STanmay Shah 	if (!cluster) {
513*6b291e80STanmay Shah 		dev_err(dev->parent, "Invalid driver data\n");
514*6b291e80STanmay Shah 		return -EINVAL;
515*6b291e80STanmay Shah 	}
516*6b291e80STanmay Shah 
517*6b291e80STanmay Shah 	/*
518*6b291e80STanmay Shah 	 * In lockstep mode TCM banks are one contiguous memory region of 256Kb
519*6b291e80STanmay Shah 	 * In split mode, each TCM bank is 64Kb and not contiguous.
520*6b291e80STanmay Shah 	 * We add memory carveouts accordingly.
521*6b291e80STanmay Shah 	 */
522*6b291e80STanmay Shah 	if (cluster->mode == SPLIT_MODE)
523*6b291e80STanmay Shah 		return add_tcm_carveout_split_mode(rproc);
524*6b291e80STanmay Shah 	else if (cluster->mode == LOCKSTEP_MODE)
525*6b291e80STanmay Shah 		return add_tcm_carveout_lockstep_mode(rproc);
526*6b291e80STanmay Shah 
527*6b291e80STanmay Shah 	return -EINVAL;
528*6b291e80STanmay Shah }
529*6b291e80STanmay Shah 
530*6b291e80STanmay Shah /*
531*6b291e80STanmay Shah  * zynqmp_r5_parse_fw()
532*6b291e80STanmay Shah  * @rproc: single R5 core's corresponding rproc instance
533*6b291e80STanmay Shah  * @fw: ptr to firmware to be loaded onto r5 core
534*6b291e80STanmay Shah  *
535*6b291e80STanmay Shah  * get resource table if available
536*6b291e80STanmay Shah  *
537*6b291e80STanmay Shah  * return 0 on success, otherwise non-zero value on failure
538*6b291e80STanmay Shah  */
539*6b291e80STanmay Shah static int zynqmp_r5_parse_fw(struct rproc *rproc, const struct firmware *fw)
540*6b291e80STanmay Shah {
541*6b291e80STanmay Shah 	int ret;
542*6b291e80STanmay Shah 
543*6b291e80STanmay Shah 	ret = rproc_elf_load_rsc_table(rproc, fw);
544*6b291e80STanmay Shah 	if (ret == -EINVAL) {
545*6b291e80STanmay Shah 		/*
546*6b291e80STanmay Shah 		 * resource table only required for IPC.
547*6b291e80STanmay Shah 		 * if not present, this is not necessarily an error;
548*6b291e80STanmay Shah 		 * for example, loading r5 hello world application
549*6b291e80STanmay Shah 		 * so simply inform user and keep going.
550*6b291e80STanmay Shah 		 */
551*6b291e80STanmay Shah 		dev_info(&rproc->dev, "no resource table found.\n");
552*6b291e80STanmay Shah 		ret = 0;
553*6b291e80STanmay Shah 	}
554*6b291e80STanmay Shah 	return ret;
555*6b291e80STanmay Shah }
556*6b291e80STanmay Shah 
557*6b291e80STanmay Shah /**
558*6b291e80STanmay Shah  * zynqmp_r5_rproc_prepare()
559*6b291e80STanmay Shah  * adds carveouts for TCM bank and reserved memory regions
560*6b291e80STanmay Shah  *
561*6b291e80STanmay Shah  * @rproc: Device node of each rproc
562*6b291e80STanmay Shah  *
563*6b291e80STanmay Shah  * Return: 0 for success else < 0 error code
564*6b291e80STanmay Shah  */
565*6b291e80STanmay Shah static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
566*6b291e80STanmay Shah {
567*6b291e80STanmay Shah 	int ret;
568*6b291e80STanmay Shah 
569*6b291e80STanmay Shah 	ret = add_tcm_banks(rproc);
570*6b291e80STanmay Shah 	if (ret) {
571*6b291e80STanmay Shah 		dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
572*6b291e80STanmay Shah 		return ret;
573*6b291e80STanmay Shah 	}
574*6b291e80STanmay Shah 
575*6b291e80STanmay Shah 	ret = add_mem_regions_carveout(rproc);
576*6b291e80STanmay Shah 	if (ret) {
577*6b291e80STanmay Shah 		dev_err(&rproc->dev, "failed to get reserve mem regions %d\n", ret);
578*6b291e80STanmay Shah 		return ret;
579*6b291e80STanmay Shah 	}
580*6b291e80STanmay Shah 
581*6b291e80STanmay Shah 	return 0;
582*6b291e80STanmay Shah }
583*6b291e80STanmay Shah 
584*6b291e80STanmay Shah /**
585*6b291e80STanmay Shah  * zynqmp_r5_rproc_unprepare()
586*6b291e80STanmay Shah  * Turns off TCM banks using power-domain id
587*6b291e80STanmay Shah  *
588*6b291e80STanmay Shah  * @rproc: Device node of each rproc
589*6b291e80STanmay Shah  *
590*6b291e80STanmay Shah  * Return: always 0
591*6b291e80STanmay Shah  */
592*6b291e80STanmay Shah static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
593*6b291e80STanmay Shah {
594*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
595*6b291e80STanmay Shah 	u32 pm_domain_id;
596*6b291e80STanmay Shah 	int i;
597*6b291e80STanmay Shah 
598*6b291e80STanmay Shah 	r5_core = (struct zynqmp_r5_core *)rproc->priv;
599*6b291e80STanmay Shah 
600*6b291e80STanmay Shah 	for (i = 0; i < r5_core->tcm_bank_count; i++) {
601*6b291e80STanmay Shah 		pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
602*6b291e80STanmay Shah 		if (zynqmp_pm_release_node(pm_domain_id))
603*6b291e80STanmay Shah 			dev_warn(r5_core->dev,
604*6b291e80STanmay Shah 				 "can't turn off TCM bank 0x%x", pm_domain_id);
605*6b291e80STanmay Shah 	}
606*6b291e80STanmay Shah 
607*6b291e80STanmay Shah 	return 0;
608*6b291e80STanmay Shah }
609*6b291e80STanmay Shah 
610*6b291e80STanmay Shah static const struct rproc_ops zynqmp_r5_rproc_ops = {
611*6b291e80STanmay Shah 	.prepare	= zynqmp_r5_rproc_prepare,
612*6b291e80STanmay Shah 	.unprepare	= zynqmp_r5_rproc_unprepare,
613*6b291e80STanmay Shah 	.start		= zynqmp_r5_rproc_start,
614*6b291e80STanmay Shah 	.stop		= zynqmp_r5_rproc_stop,
615*6b291e80STanmay Shah 	.load		= rproc_elf_load_segments,
616*6b291e80STanmay Shah 	.parse_fw	= zynqmp_r5_parse_fw,
617*6b291e80STanmay Shah 	.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
618*6b291e80STanmay Shah 	.sanity_check	= rproc_elf_sanity_check,
619*6b291e80STanmay Shah 	.get_boot_addr	= rproc_elf_get_boot_addr,
620*6b291e80STanmay Shah };
621*6b291e80STanmay Shah 
622*6b291e80STanmay Shah /**
623*6b291e80STanmay Shah  * zynqmp_r5_add_rproc_core()
624*6b291e80STanmay Shah  * Allocate and add struct rproc object for each r5f core
625*6b291e80STanmay Shah  * This is called for each individual r5f core
626*6b291e80STanmay Shah  *
627*6b291e80STanmay Shah  * @cdev: Device node of each r5 core
628*6b291e80STanmay Shah  *
629*6b291e80STanmay Shah  * Return: zynqmp_r5_core object for success else error code pointer
630*6b291e80STanmay Shah  */
631*6b291e80STanmay Shah static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
632*6b291e80STanmay Shah {
633*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
634*6b291e80STanmay Shah 	struct rproc *r5_rproc;
635*6b291e80STanmay Shah 	int ret;
636*6b291e80STanmay Shah 
637*6b291e80STanmay Shah 	/* Set up DMA mask */
638*6b291e80STanmay Shah 	ret = dma_set_coherent_mask(cdev, DMA_BIT_MASK(32));
639*6b291e80STanmay Shah 	if (ret)
640*6b291e80STanmay Shah 		return ERR_PTR(ret);
641*6b291e80STanmay Shah 
642*6b291e80STanmay Shah 	/* Allocate remoteproc instance */
643*6b291e80STanmay Shah 	r5_rproc = rproc_alloc(cdev, dev_name(cdev),
644*6b291e80STanmay Shah 			       &zynqmp_r5_rproc_ops,
645*6b291e80STanmay Shah 			       NULL, sizeof(struct zynqmp_r5_core));
646*6b291e80STanmay Shah 	if (!r5_rproc) {
647*6b291e80STanmay Shah 		dev_err(cdev, "failed to allocate memory for rproc instance\n");
648*6b291e80STanmay Shah 		return ERR_PTR(-ENOMEM);
649*6b291e80STanmay Shah 	}
650*6b291e80STanmay Shah 
651*6b291e80STanmay Shah 	r5_rproc->auto_boot = false;
652*6b291e80STanmay Shah 	r5_core = (struct zynqmp_r5_core *)r5_rproc->priv;
653*6b291e80STanmay Shah 	r5_core->dev = cdev;
654*6b291e80STanmay Shah 	r5_core->np = dev_of_node(cdev);
655*6b291e80STanmay Shah 	if (!r5_core->np) {
656*6b291e80STanmay Shah 		dev_err(cdev, "can't get device node for r5 core\n");
657*6b291e80STanmay Shah 		ret = -EINVAL;
658*6b291e80STanmay Shah 		goto free_rproc;
659*6b291e80STanmay Shah 	}
660*6b291e80STanmay Shah 
661*6b291e80STanmay Shah 	/* Add R5 remoteproc core */
662*6b291e80STanmay Shah 	ret = rproc_add(r5_rproc);
663*6b291e80STanmay Shah 	if (ret) {
664*6b291e80STanmay Shah 		dev_err(cdev, "failed to add r5 remoteproc\n");
665*6b291e80STanmay Shah 		goto free_rproc;
666*6b291e80STanmay Shah 	}
667*6b291e80STanmay Shah 
668*6b291e80STanmay Shah 	r5_core->rproc = r5_rproc;
669*6b291e80STanmay Shah 	return r5_core;
670*6b291e80STanmay Shah 
671*6b291e80STanmay Shah free_rproc:
672*6b291e80STanmay Shah 	rproc_free(r5_rproc);
673*6b291e80STanmay Shah 	return ERR_PTR(ret);
674*6b291e80STanmay Shah }
675*6b291e80STanmay Shah 
676*6b291e80STanmay Shah /**
677*6b291e80STanmay Shah  * zynqmp_r5_get_tcm_node()
678*6b291e80STanmay Shah  * Ideally this function should parse tcm node and store information
679*6b291e80STanmay Shah  * in r5_core instance. For now, Hardcoded TCM information is used.
680*6b291e80STanmay Shah  * This approach is used as TCM bindings for system-dt is being developed
681*6b291e80STanmay Shah  *
682*6b291e80STanmay Shah  * @cluster: pointer to zynqmp_r5_cluster type object
683*6b291e80STanmay Shah  *
684*6b291e80STanmay Shah  * Return: 0 for success and < 0 error code for failure.
685*6b291e80STanmay Shah  */
686*6b291e80STanmay Shah static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
687*6b291e80STanmay Shah {
688*6b291e80STanmay Shah 	struct device *dev = cluster->dev;
689*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
690*6b291e80STanmay Shah 	int tcm_bank_count, tcm_node;
691*6b291e80STanmay Shah 	int i, j;
692*6b291e80STanmay Shah 
693*6b291e80STanmay Shah 	tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks);
694*6b291e80STanmay Shah 
695*6b291e80STanmay Shah 	/* count per core tcm banks */
696*6b291e80STanmay Shah 	tcm_bank_count = tcm_bank_count / cluster->core_count;
697*6b291e80STanmay Shah 
698*6b291e80STanmay Shah 	/*
699*6b291e80STanmay Shah 	 * r5 core 0 will use all of TCM banks in lockstep mode.
700*6b291e80STanmay Shah 	 * In split mode, r5 core0 will use 128k and r5 core1 will use another
701*6b291e80STanmay Shah 	 * 128k. Assign TCM banks to each core accordingly
702*6b291e80STanmay Shah 	 */
703*6b291e80STanmay Shah 	tcm_node = 0;
704*6b291e80STanmay Shah 	for (i = 0; i < cluster->core_count; i++) {
705*6b291e80STanmay Shah 		r5_core = cluster->r5_cores[i];
706*6b291e80STanmay Shah 		r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
707*6b291e80STanmay Shah 						  sizeof(struct mem_bank_data *),
708*6b291e80STanmay Shah 						  GFP_KERNEL);
709*6b291e80STanmay Shah 		if (!r5_core->tcm_banks)
710*6b291e80STanmay Shah 			return -ENOMEM;
711*6b291e80STanmay Shah 
712*6b291e80STanmay Shah 		for (j = 0; j < tcm_bank_count; j++) {
713*6b291e80STanmay Shah 			/*
714*6b291e80STanmay Shah 			 * Use pre-defined TCM reg values.
715*6b291e80STanmay Shah 			 * Eventually this should be replaced by values
716*6b291e80STanmay Shah 			 * parsed from dts.
717*6b291e80STanmay Shah 			 */
718*6b291e80STanmay Shah 			r5_core->tcm_banks[j] =
719*6b291e80STanmay Shah 				(struct mem_bank_data *)&zynqmp_tcm_banks[tcm_node];
720*6b291e80STanmay Shah 			tcm_node++;
721*6b291e80STanmay Shah 		}
722*6b291e80STanmay Shah 
723*6b291e80STanmay Shah 		r5_core->tcm_bank_count = tcm_bank_count;
724*6b291e80STanmay Shah 	}
725*6b291e80STanmay Shah 
726*6b291e80STanmay Shah 	return 0;
727*6b291e80STanmay Shah }
728*6b291e80STanmay Shah 
729*6b291e80STanmay Shah /**
730*6b291e80STanmay Shah  * zynqmp_r5_get_mem_region_node()
731*6b291e80STanmay Shah  * parse memory-region property and get reserved mem regions
732*6b291e80STanmay Shah  *
733*6b291e80STanmay Shah  * @r5_core: pointer to zynqmp_r5_core type object
734*6b291e80STanmay Shah  *
735*6b291e80STanmay Shah  * Return: 0 for success and error code for failure.
736*6b291e80STanmay Shah  */
737*6b291e80STanmay Shah static int zynqmp_r5_get_mem_region_node(struct zynqmp_r5_core *r5_core)
738*6b291e80STanmay Shah {
739*6b291e80STanmay Shah 	struct device_node *np, *rmem_np;
740*6b291e80STanmay Shah 	struct reserved_mem **rmem;
741*6b291e80STanmay Shah 	int res_mem_count, i;
742*6b291e80STanmay Shah 	struct device *dev;
743*6b291e80STanmay Shah 
744*6b291e80STanmay Shah 	dev = r5_core->dev;
745*6b291e80STanmay Shah 	np = r5_core->np;
746*6b291e80STanmay Shah 
747*6b291e80STanmay Shah 	res_mem_count = of_property_count_elems_of_size(np, "memory-region",
748*6b291e80STanmay Shah 							sizeof(phandle));
749*6b291e80STanmay Shah 	if (res_mem_count <= 0) {
750*6b291e80STanmay Shah 		dev_warn(dev, "failed to get memory-region property %d\n",
751*6b291e80STanmay Shah 			 res_mem_count);
752*6b291e80STanmay Shah 		return 0;
753*6b291e80STanmay Shah 	}
754*6b291e80STanmay Shah 
755*6b291e80STanmay Shah 	rmem = devm_kcalloc(dev, res_mem_count,
756*6b291e80STanmay Shah 			    sizeof(struct reserved_mem *), GFP_KERNEL);
757*6b291e80STanmay Shah 	if (!rmem)
758*6b291e80STanmay Shah 		return -ENOMEM;
759*6b291e80STanmay Shah 
760*6b291e80STanmay Shah 	for (i = 0; i < res_mem_count; i++) {
761*6b291e80STanmay Shah 		rmem_np = of_parse_phandle(np, "memory-region", i);
762*6b291e80STanmay Shah 		if (!rmem_np)
763*6b291e80STanmay Shah 			goto release_rmem;
764*6b291e80STanmay Shah 
765*6b291e80STanmay Shah 		rmem[i] = of_reserved_mem_lookup(rmem_np);
766*6b291e80STanmay Shah 		if (!rmem[i]) {
767*6b291e80STanmay Shah 			of_node_put(rmem_np);
768*6b291e80STanmay Shah 			goto release_rmem;
769*6b291e80STanmay Shah 		}
770*6b291e80STanmay Shah 
771*6b291e80STanmay Shah 		of_node_put(rmem_np);
772*6b291e80STanmay Shah 	}
773*6b291e80STanmay Shah 
774*6b291e80STanmay Shah 	r5_core->rmem_count = res_mem_count;
775*6b291e80STanmay Shah 	r5_core->rmem = rmem;
776*6b291e80STanmay Shah 	return 0;
777*6b291e80STanmay Shah 
778*6b291e80STanmay Shah release_rmem:
779*6b291e80STanmay Shah 	return -EINVAL;
780*6b291e80STanmay Shah }
781*6b291e80STanmay Shah 
782*6b291e80STanmay Shah /*
783*6b291e80STanmay Shah  * zynqmp_r5_core_init()
784*6b291e80STanmay Shah  * Create and initialize zynqmp_r5_core type object
785*6b291e80STanmay Shah  *
786*6b291e80STanmay Shah  * @cluster: pointer to zynqmp_r5_cluster type object
787*6b291e80STanmay Shah  * @fw_reg_val: value expected by firmware to configure RPU cluster mode
788*6b291e80STanmay Shah  * @tcm_mode: value expected by fw to configure TCM mode (lockstep or split)
789*6b291e80STanmay Shah  *
790*6b291e80STanmay Shah  * Return: 0 for success and error code for failure.
791*6b291e80STanmay Shah  */
792*6b291e80STanmay Shah static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
793*6b291e80STanmay Shah 			       enum rpu_oper_mode fw_reg_val,
794*6b291e80STanmay Shah 			       enum rpu_tcm_comb tcm_mode)
795*6b291e80STanmay Shah {
796*6b291e80STanmay Shah 	struct device *dev = cluster->dev;
797*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
798*6b291e80STanmay Shah 	int ret, i;
799*6b291e80STanmay Shah 
800*6b291e80STanmay Shah 	ret = zynqmp_r5_get_tcm_node(cluster);
801*6b291e80STanmay Shah 	if (ret < 0) {
802*6b291e80STanmay Shah 		dev_err(dev, "can't get tcm node, err %d\n", ret);
803*6b291e80STanmay Shah 		return ret;
804*6b291e80STanmay Shah 	}
805*6b291e80STanmay Shah 
806*6b291e80STanmay Shah 	for (i = 0; i < cluster->core_count; i++) {
807*6b291e80STanmay Shah 		r5_core = cluster->r5_cores[i];
808*6b291e80STanmay Shah 
809*6b291e80STanmay Shah 		ret = zynqmp_r5_get_mem_region_node(r5_core);
810*6b291e80STanmay Shah 		if (ret)
811*6b291e80STanmay Shah 			dev_warn(dev, "memory-region prop failed %d\n", ret);
812*6b291e80STanmay Shah 
813*6b291e80STanmay Shah 		/* Initialize r5 cores with power-domains parsed from dts */
814*6b291e80STanmay Shah 		ret = of_property_read_u32_index(r5_core->np, "power-domains",
815*6b291e80STanmay Shah 						 1, &r5_core->pm_domain_id);
816*6b291e80STanmay Shah 		if (ret) {
817*6b291e80STanmay Shah 			dev_err(dev, "failed to get power-domains property\n");
818*6b291e80STanmay Shah 			return ret;
819*6b291e80STanmay Shah 		}
820*6b291e80STanmay Shah 
821*6b291e80STanmay Shah 		ret = zynqmp_r5_set_mode(r5_core, fw_reg_val, tcm_mode);
822*6b291e80STanmay Shah 		if (ret) {
823*6b291e80STanmay Shah 			dev_err(dev, "failed to set r5 cluster mode %d, err %d\n",
824*6b291e80STanmay Shah 				cluster->mode, ret);
825*6b291e80STanmay Shah 			return ret;
826*6b291e80STanmay Shah 		}
827*6b291e80STanmay Shah 	}
828*6b291e80STanmay Shah 
829*6b291e80STanmay Shah 	return 0;
830*6b291e80STanmay Shah }
831*6b291e80STanmay Shah 
832*6b291e80STanmay Shah /*
833*6b291e80STanmay Shah  * zynqmp_r5_cluster_init()
834*6b291e80STanmay Shah  * Create and initialize zynqmp_r5_cluster type object
835*6b291e80STanmay Shah  *
836*6b291e80STanmay Shah  * @cluster: pointer to zynqmp_r5_cluster type object
837*6b291e80STanmay Shah  *
838*6b291e80STanmay Shah  * Return: 0 for success and error code for failure.
839*6b291e80STanmay Shah  */
840*6b291e80STanmay Shah static int zynqmp_r5_cluster_init(struct zynqmp_r5_cluster *cluster)
841*6b291e80STanmay Shah {
842*6b291e80STanmay Shah 	enum zynqmp_r5_cluster_mode cluster_mode = LOCKSTEP_MODE;
843*6b291e80STanmay Shah 	struct device *dev = cluster->dev;
844*6b291e80STanmay Shah 	struct device_node *dev_node = dev_of_node(dev);
845*6b291e80STanmay Shah 	struct platform_device *child_pdev;
846*6b291e80STanmay Shah 	struct zynqmp_r5_core **r5_cores;
847*6b291e80STanmay Shah 	enum rpu_oper_mode fw_reg_val;
848*6b291e80STanmay Shah 	struct device **child_devs;
849*6b291e80STanmay Shah 	struct device_node *child;
850*6b291e80STanmay Shah 	enum rpu_tcm_comb tcm_mode;
851*6b291e80STanmay Shah 	int core_count, ret, i;
852*6b291e80STanmay Shah 
853*6b291e80STanmay Shah 	ret = of_property_read_u32(dev_node, "xlnx,cluster-mode", &cluster_mode);
854*6b291e80STanmay Shah 
855*6b291e80STanmay Shah 	/*
856*6b291e80STanmay Shah 	 * on success returns 0, if not defined then returns -EINVAL,
857*6b291e80STanmay Shah 	 * In that case, default is LOCKSTEP mode. Other than that
858*6b291e80STanmay Shah 	 * returns relative error code < 0.
859*6b291e80STanmay Shah 	 */
860*6b291e80STanmay Shah 	if (ret != -EINVAL && ret != 0) {
861*6b291e80STanmay Shah 		dev_err(dev, "Invalid xlnx,cluster-mode property\n");
862*6b291e80STanmay Shah 		return ret;
863*6b291e80STanmay Shah 	}
864*6b291e80STanmay Shah 
865*6b291e80STanmay Shah 	/*
866*6b291e80STanmay Shah 	 * For now driver only supports split mode and lockstep mode.
867*6b291e80STanmay Shah 	 * fail driver probe if either of that is not set in dts.
868*6b291e80STanmay Shah 	 */
869*6b291e80STanmay Shah 	if (cluster_mode == LOCKSTEP_MODE) {
870*6b291e80STanmay Shah 		tcm_mode = PM_RPU_TCM_COMB;
871*6b291e80STanmay Shah 		fw_reg_val = PM_RPU_MODE_LOCKSTEP;
872*6b291e80STanmay Shah 	} else if (cluster_mode == SPLIT_MODE) {
873*6b291e80STanmay Shah 		tcm_mode = PM_RPU_TCM_SPLIT;
874*6b291e80STanmay Shah 		fw_reg_val = PM_RPU_MODE_SPLIT;
875*6b291e80STanmay Shah 	} else {
876*6b291e80STanmay Shah 		dev_err(dev, "driver does not support cluster mode %d\n", cluster_mode);
877*6b291e80STanmay Shah 		return -EINVAL;
878*6b291e80STanmay Shah 	}
879*6b291e80STanmay Shah 
880*6b291e80STanmay Shah 	/*
881*6b291e80STanmay Shah 	 * Number of cores is decided by number of child nodes of
882*6b291e80STanmay Shah 	 * r5f subsystem node in dts. If Split mode is used in dts
883*6b291e80STanmay Shah 	 * 2 child nodes are expected.
884*6b291e80STanmay Shah 	 * In lockstep mode if two child nodes are available,
885*6b291e80STanmay Shah 	 * only use first child node and consider it as core0
886*6b291e80STanmay Shah 	 * and ignore core1 dt node.
887*6b291e80STanmay Shah 	 */
888*6b291e80STanmay Shah 	core_count = of_get_available_child_count(dev_node);
889*6b291e80STanmay Shah 	if (core_count == 0) {
890*6b291e80STanmay Shah 		dev_err(dev, "Invalid number of r5 cores %d", core_count);
891*6b291e80STanmay Shah 		return -EINVAL;
892*6b291e80STanmay Shah 	} else if (cluster_mode == SPLIT_MODE && core_count != 2) {
893*6b291e80STanmay Shah 		dev_err(dev, "Invalid number of r5 cores for split mode\n");
894*6b291e80STanmay Shah 		return -EINVAL;
895*6b291e80STanmay Shah 	} else if (cluster_mode == LOCKSTEP_MODE && core_count == 2) {
896*6b291e80STanmay Shah 		dev_warn(dev, "Only r5 core0 will be used\n");
897*6b291e80STanmay Shah 		core_count = 1;
898*6b291e80STanmay Shah 	}
899*6b291e80STanmay Shah 
900*6b291e80STanmay Shah 	child_devs = kcalloc(core_count, sizeof(struct device *), GFP_KERNEL);
901*6b291e80STanmay Shah 	if (!child_devs)
902*6b291e80STanmay Shah 		return -ENOMEM;
903*6b291e80STanmay Shah 
904*6b291e80STanmay Shah 	r5_cores = kcalloc(core_count,
905*6b291e80STanmay Shah 			   sizeof(struct zynqmp_r5_core *), GFP_KERNEL);
906*6b291e80STanmay Shah 	if (!r5_cores) {
907*6b291e80STanmay Shah 		kfree(child_devs);
908*6b291e80STanmay Shah 		return -ENOMEM;
909*6b291e80STanmay Shah 	}
910*6b291e80STanmay Shah 
911*6b291e80STanmay Shah 	i = 0;
912*6b291e80STanmay Shah 	for_each_available_child_of_node(dev_node, child) {
913*6b291e80STanmay Shah 		child_pdev = of_find_device_by_node(child);
914*6b291e80STanmay Shah 		if (!child_pdev) {
915*6b291e80STanmay Shah 			of_node_put(child);
916*6b291e80STanmay Shah 			ret = -ENODEV;
917*6b291e80STanmay Shah 			goto release_r5_cores;
918*6b291e80STanmay Shah 		}
919*6b291e80STanmay Shah 
920*6b291e80STanmay Shah 		child_devs[i] = &child_pdev->dev;
921*6b291e80STanmay Shah 
922*6b291e80STanmay Shah 		/* create and add remoteproc instance of type struct rproc */
923*6b291e80STanmay Shah 		r5_cores[i] = zynqmp_r5_add_rproc_core(&child_pdev->dev);
924*6b291e80STanmay Shah 		if (IS_ERR(r5_cores[i])) {
925*6b291e80STanmay Shah 			of_node_put(child);
926*6b291e80STanmay Shah 			ret = PTR_ERR(r5_cores[i]);
927*6b291e80STanmay Shah 			r5_cores[i] = NULL;
928*6b291e80STanmay Shah 			goto release_r5_cores;
929*6b291e80STanmay Shah 		}
930*6b291e80STanmay Shah 
931*6b291e80STanmay Shah 		/*
932*6b291e80STanmay Shah 		 * If two child nodes are available in dts in lockstep mode,
933*6b291e80STanmay Shah 		 * then ignore second child node.
934*6b291e80STanmay Shah 		 */
935*6b291e80STanmay Shah 		if (cluster_mode == LOCKSTEP_MODE) {
936*6b291e80STanmay Shah 			of_node_put(child);
937*6b291e80STanmay Shah 			break;
938*6b291e80STanmay Shah 		}
939*6b291e80STanmay Shah 
940*6b291e80STanmay Shah 		i++;
941*6b291e80STanmay Shah 	}
942*6b291e80STanmay Shah 
943*6b291e80STanmay Shah 	cluster->mode = cluster_mode;
944*6b291e80STanmay Shah 	cluster->core_count = core_count;
945*6b291e80STanmay Shah 	cluster->r5_cores = r5_cores;
946*6b291e80STanmay Shah 
947*6b291e80STanmay Shah 	ret = zynqmp_r5_core_init(cluster, fw_reg_val, tcm_mode);
948*6b291e80STanmay Shah 	if (ret < 0) {
949*6b291e80STanmay Shah 		dev_err(dev, "failed to init r5 core err %d\n", ret);
950*6b291e80STanmay Shah 		cluster->core_count = 0;
951*6b291e80STanmay Shah 		cluster->r5_cores = NULL;
952*6b291e80STanmay Shah 
953*6b291e80STanmay Shah 		/*
954*6b291e80STanmay Shah 		 * at this point rproc resources for each core are allocated.
955*6b291e80STanmay Shah 		 * adjust index to free resources in reverse order
956*6b291e80STanmay Shah 		 */
957*6b291e80STanmay Shah 		i = core_count - 1;
958*6b291e80STanmay Shah 		goto release_r5_cores;
959*6b291e80STanmay Shah 	}
960*6b291e80STanmay Shah 
961*6b291e80STanmay Shah 	kfree(child_devs);
962*6b291e80STanmay Shah 	return 0;
963*6b291e80STanmay Shah 
964*6b291e80STanmay Shah release_r5_cores:
965*6b291e80STanmay Shah 	while (i >= 0) {
966*6b291e80STanmay Shah 		put_device(child_devs[i]);
967*6b291e80STanmay Shah 		if (r5_cores[i]) {
968*6b291e80STanmay Shah 			of_reserved_mem_device_release(r5_cores[i]->dev);
969*6b291e80STanmay Shah 			rproc_del(r5_cores[i]->rproc);
970*6b291e80STanmay Shah 			rproc_free(r5_cores[i]->rproc);
971*6b291e80STanmay Shah 		}
972*6b291e80STanmay Shah 		i--;
973*6b291e80STanmay Shah 	}
974*6b291e80STanmay Shah 	kfree(r5_cores);
975*6b291e80STanmay Shah 	kfree(child_devs);
976*6b291e80STanmay Shah 	return ret;
977*6b291e80STanmay Shah }
978*6b291e80STanmay Shah 
979*6b291e80STanmay Shah static void zynqmp_r5_cluster_exit(void *data)
980*6b291e80STanmay Shah {
981*6b291e80STanmay Shah 	struct platform_device *pdev = (struct platform_device *)data;
982*6b291e80STanmay Shah 	struct zynqmp_r5_cluster *cluster;
983*6b291e80STanmay Shah 	struct zynqmp_r5_core *r5_core;
984*6b291e80STanmay Shah 	int i;
985*6b291e80STanmay Shah 
986*6b291e80STanmay Shah 	cluster = (struct zynqmp_r5_cluster *)platform_get_drvdata(pdev);
987*6b291e80STanmay Shah 	if (!cluster)
988*6b291e80STanmay Shah 		return;
989*6b291e80STanmay Shah 
990*6b291e80STanmay Shah 	for (i = 0; i < cluster->core_count; i++) {
991*6b291e80STanmay Shah 		r5_core = cluster->r5_cores[i];
992*6b291e80STanmay Shah 		of_reserved_mem_device_release(r5_core->dev);
993*6b291e80STanmay Shah 		put_device(r5_core->dev);
994*6b291e80STanmay Shah 		rproc_del(r5_core->rproc);
995*6b291e80STanmay Shah 		rproc_free(r5_core->rproc);
996*6b291e80STanmay Shah 	}
997*6b291e80STanmay Shah 
998*6b291e80STanmay Shah 	kfree(cluster->r5_cores);
999*6b291e80STanmay Shah 	kfree(cluster);
1000*6b291e80STanmay Shah 	platform_set_drvdata(pdev, NULL);
1001*6b291e80STanmay Shah }
1002*6b291e80STanmay Shah 
1003*6b291e80STanmay Shah /*
1004*6b291e80STanmay Shah  * zynqmp_r5_remoteproc_probe()
1005*6b291e80STanmay Shah  * parse device-tree, initialize hardware and allocate required resources
1006*6b291e80STanmay Shah  * and remoteproc ops
1007*6b291e80STanmay Shah  *
1008*6b291e80STanmay Shah  * @pdev: domain platform device for R5 cluster
1009*6b291e80STanmay Shah  *
1010*6b291e80STanmay Shah  * Return: 0 for success and < 0 for failure.
1011*6b291e80STanmay Shah  */
1012*6b291e80STanmay Shah static int zynqmp_r5_remoteproc_probe(struct platform_device *pdev)
1013*6b291e80STanmay Shah {
1014*6b291e80STanmay Shah 	struct zynqmp_r5_cluster *cluster;
1015*6b291e80STanmay Shah 	struct device *dev = &pdev->dev;
1016*6b291e80STanmay Shah 	int ret;
1017*6b291e80STanmay Shah 
1018*6b291e80STanmay Shah 	cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
1019*6b291e80STanmay Shah 	if (!cluster)
1020*6b291e80STanmay Shah 		return -ENOMEM;
1021*6b291e80STanmay Shah 
1022*6b291e80STanmay Shah 	cluster->dev = dev;
1023*6b291e80STanmay Shah 
1024*6b291e80STanmay Shah 	ret = devm_of_platform_populate(dev);
1025*6b291e80STanmay Shah 	if (ret) {
1026*6b291e80STanmay Shah 		dev_err_probe(dev, ret, "failed to populate platform dev\n");
1027*6b291e80STanmay Shah 		kfree(cluster);
1028*6b291e80STanmay Shah 		return ret;
1029*6b291e80STanmay Shah 	}
1030*6b291e80STanmay Shah 
1031*6b291e80STanmay Shah 	/* wire in so each core can be cleaned up at driver remove */
1032*6b291e80STanmay Shah 	platform_set_drvdata(pdev, cluster);
1033*6b291e80STanmay Shah 
1034*6b291e80STanmay Shah 	ret = zynqmp_r5_cluster_init(cluster);
1035*6b291e80STanmay Shah 	if (ret) {
1036*6b291e80STanmay Shah 		kfree(cluster);
1037*6b291e80STanmay Shah 		platform_set_drvdata(pdev, NULL);
1038*6b291e80STanmay Shah 		dev_err_probe(dev, ret, "Invalid r5f subsystem device tree\n");
1039*6b291e80STanmay Shah 		return ret;
1040*6b291e80STanmay Shah 	}
1041*6b291e80STanmay Shah 
1042*6b291e80STanmay Shah 	ret = devm_add_action_or_reset(dev, zynqmp_r5_cluster_exit, pdev);
1043*6b291e80STanmay Shah 	if (ret)
1044*6b291e80STanmay Shah 		return ret;
1045*6b291e80STanmay Shah 
1046*6b291e80STanmay Shah 	return 0;
1047*6b291e80STanmay Shah }
1048*6b291e80STanmay Shah 
1049*6b291e80STanmay Shah /* Match table for OF platform binding */
1050*6b291e80STanmay Shah static const struct of_device_id zynqmp_r5_remoteproc_match[] = {
1051*6b291e80STanmay Shah 	{ .compatible = "xlnx,zynqmp-r5fss", },
1052*6b291e80STanmay Shah 	{ /* end of list */ },
1053*6b291e80STanmay Shah };
1054*6b291e80STanmay Shah MODULE_DEVICE_TABLE(of, zynqmp_r5_remoteproc_match);
1055*6b291e80STanmay Shah 
1056*6b291e80STanmay Shah static struct platform_driver zynqmp_r5_remoteproc_driver = {
1057*6b291e80STanmay Shah 	.probe = zynqmp_r5_remoteproc_probe,
1058*6b291e80STanmay Shah 	.driver = {
1059*6b291e80STanmay Shah 		.name = "zynqmp_r5_remoteproc",
1060*6b291e80STanmay Shah 		.of_match_table = zynqmp_r5_remoteproc_match,
1061*6b291e80STanmay Shah 	},
1062*6b291e80STanmay Shah };
1063*6b291e80STanmay Shah module_platform_driver(zynqmp_r5_remoteproc_driver);
1064*6b291e80STanmay Shah 
1065*6b291e80STanmay Shah MODULE_DESCRIPTION("Xilinx R5F remote processor driver");
1066*6b291e80STanmay Shah MODULE_AUTHOR("Xilinx Inc.");
1067*6b291e80STanmay Shah MODULE_LICENSE("GPL");
1068