1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4 */
5
6 #include <linux/iopoll.h>
7 #include <linux/pm_opp.h>
8 #include <linux/reset.h>
9
10 #include "iris_core.h"
11 #include "iris_vpu_common.h"
12 #include "iris_vpu_register_defines.h"
13
14 #define WRAPPER_TZ_BASE_OFFS 0x000C0000
15 #define AON_BASE_OFFS 0x000E0000
16
17 #define CPU_IC_BASE_OFFS (CPU_BASE_OFFS)
18
19 #define CPU_CS_A2HSOFTINTCLR (CPU_CS_BASE_OFFS + 0x1C)
20 #define CLEAR_XTENSA2HOST_INTR BIT(0)
21
22 #define CTRL_INIT (CPU_CS_BASE_OFFS + 0x48)
23 #define CTRL_STATUS (CPU_CS_BASE_OFFS + 0x4C)
24
25 #define CTRL_INIT_IDLE_MSG_BMSK 0x40000000
26 #define CTRL_ERROR_STATUS__M 0xfe
27 #define CTRL_STATUS_PC_READY 0x100
28
29 #define QTBL_INFO (CPU_CS_BASE_OFFS + 0x50)
30 #define QTBL_ENABLE BIT(0)
31
32 #define QTBL_ADDR (CPU_CS_BASE_OFFS + 0x54)
33 #define CPU_CS_SCIACMDARG3 (CPU_CS_BASE_OFFS + 0x58)
34 #define SFR_ADDR (CPU_CS_BASE_OFFS + 0x5C)
35 #define UC_REGION_ADDR (CPU_CS_BASE_OFFS + 0x64)
36 #define UC_REGION_SIZE (CPU_CS_BASE_OFFS + 0x68)
37
38 #define CPU_CS_H2XSOFTINTEN (CPU_CS_BASE_OFFS + 0x148)
39 #define HOST2XTENSA_INTR_ENABLE BIT(0)
40
41 #define CPU_CS_X2RPMH (CPU_CS_BASE_OFFS + 0x168)
42 #define MSK_SIGNAL_FROM_TENSILICA BIT(0)
43 #define MSK_CORE_POWER_ON BIT(1)
44
45 #define CPU_IC_SOFTINT (CPU_IC_BASE_OFFS + 0x150)
46 #define CPU_IC_SOFTINT_H2A_SHFT 0x0
47
48 #define WRAPPER_INTR_STATUS (WRAPPER_BASE_OFFS + 0x0C)
49 #define WRAPPER_INTR_STATUS_A2HWD_BMSK BIT(3)
50 #define WRAPPER_INTR_STATUS_A2H_BMSK BIT(2)
51
52 #define WRAPPER_INTR_MASK (WRAPPER_BASE_OFFS + 0x10)
53 #define WRAPPER_INTR_MASK_A2HWD_BMSK BIT(3)
54 #define WRAPPER_INTR_MASK_A2HCPU_BMSK BIT(2)
55
56 #define WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x54)
57 #define WRAPPER_DEBUG_BRIDGE_LPI_STATUS (WRAPPER_BASE_OFFS + 0x58)
58 #define WRAPPER_IRIS_CPU_NOC_LPI_CONTROL (WRAPPER_BASE_OFFS + 0x5C)
59 #define WRAPPER_IRIS_CPU_NOC_LPI_STATUS (WRAPPER_BASE_OFFS + 0x60)
60
61 #define WRAPPER_TZ_CPU_STATUS (WRAPPER_TZ_BASE_OFFS + 0x10)
62 #define WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG (WRAPPER_TZ_BASE_OFFS + 0x14)
63 #define CTL_AXI_CLK_HALT BIT(0)
64 #define CTL_CLK_HALT BIT(1)
65
66 #define WRAPPER_TZ_QNS4PDXFIFO_RESET (WRAPPER_TZ_BASE_OFFS + 0x18)
67 #define RESET_HIGH BIT(0)
68
69 #define AON_WRAPPER_MVP_NOC_LPI_CONTROL (AON_BASE_OFFS)
70 #define REQ_POWER_DOWN_PREP BIT(0)
71
72 #define AON_WRAPPER_MVP_NOC_LPI_STATUS (AON_BASE_OFFS + 0x4)
73
iris_vpu_interrupt_init(struct iris_core * core)74 static void iris_vpu_interrupt_init(struct iris_core *core)
75 {
76 u32 mask_val;
77
78 mask_val = readl(core->reg_base + WRAPPER_INTR_MASK);
79 mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BMSK |
80 WRAPPER_INTR_MASK_A2HCPU_BMSK);
81 writel(mask_val, core->reg_base + WRAPPER_INTR_MASK);
82 }
83
iris_vpu_setup_ucregion_memory_map(struct iris_core * core)84 static void iris_vpu_setup_ucregion_memory_map(struct iris_core *core)
85 {
86 u32 queue_size, value;
87
88 /* Iris hardware requires 4K queue alignment */
89 queue_size = ALIGN(sizeof(struct iris_hfi_queue_table_header) +
90 (IFACEQ_QUEUE_SIZE * IFACEQ_NUMQ), SZ_4K);
91
92 value = (u32)core->iface_q_table_daddr;
93 writel(value, core->reg_base + UC_REGION_ADDR);
94
95 /* Iris hardware requires 1M queue alignment */
96 value = ALIGN(SFR_SIZE + queue_size, SZ_1M);
97 writel(value, core->reg_base + UC_REGION_SIZE);
98
99 value = (u32)core->iface_q_table_daddr;
100 writel(value, core->reg_base + QTBL_ADDR);
101
102 writel(QTBL_ENABLE, core->reg_base + QTBL_INFO);
103
104 if (core->sfr_daddr) {
105 value = (u32)core->sfr_daddr + core->iris_platform_data->core_arch;
106 writel(value, core->reg_base + SFR_ADDR);
107 }
108 }
109
iris_vpu_boot_firmware(struct iris_core * core)110 int iris_vpu_boot_firmware(struct iris_core *core)
111 {
112 u32 ctrl_init = BIT(0), ctrl_status = 0, count = 0, max_tries = 1000;
113
114 iris_vpu_setup_ucregion_memory_map(core);
115
116 writel(ctrl_init, core->reg_base + CTRL_INIT);
117 writel(0x1, core->reg_base + CPU_CS_SCIACMDARG3);
118
119 while (!ctrl_status && count < max_tries) {
120 ctrl_status = readl(core->reg_base + CTRL_STATUS);
121 if ((ctrl_status & CTRL_ERROR_STATUS__M) == 0x4) {
122 dev_err(core->dev, "invalid setting for uc_region\n");
123 break;
124 }
125
126 usleep_range(50, 100);
127 count++;
128 }
129
130 if (count >= max_tries) {
131 dev_err(core->dev, "error booting up iris firmware\n");
132 return -ETIME;
133 }
134
135 writel(HOST2XTENSA_INTR_ENABLE, core->reg_base + CPU_CS_H2XSOFTINTEN);
136 writel(0x0, core->reg_base + CPU_CS_X2RPMH);
137
138 return 0;
139 }
140
iris_vpu_raise_interrupt(struct iris_core * core)141 void iris_vpu_raise_interrupt(struct iris_core *core)
142 {
143 writel(1 << CPU_IC_SOFTINT_H2A_SHFT, core->reg_base + CPU_IC_SOFTINT);
144 }
145
iris_vpu_clear_interrupt(struct iris_core * core)146 void iris_vpu_clear_interrupt(struct iris_core *core)
147 {
148 u32 intr_status, mask;
149
150 intr_status = readl(core->reg_base + WRAPPER_INTR_STATUS);
151 mask = (WRAPPER_INTR_STATUS_A2H_BMSK |
152 WRAPPER_INTR_STATUS_A2HWD_BMSK |
153 CTRL_INIT_IDLE_MSG_BMSK);
154
155 if (intr_status & mask)
156 core->intr_status |= intr_status;
157
158 writel(CLEAR_XTENSA2HOST_INTR, core->reg_base + CPU_CS_A2HSOFTINTCLR);
159 }
160
iris_vpu_watchdog(struct iris_core * core,u32 intr_status)161 int iris_vpu_watchdog(struct iris_core *core, u32 intr_status)
162 {
163 if (intr_status & WRAPPER_INTR_STATUS_A2HWD_BMSK) {
164 dev_err(core->dev, "received watchdog interrupt\n");
165 return -ETIME;
166 }
167
168 return 0;
169 }
170
iris_vpu_prepare_pc(struct iris_core * core)171 int iris_vpu_prepare_pc(struct iris_core *core)
172 {
173 u32 wfi_status, idle_status, pc_ready;
174 u32 ctrl_status, val = 0;
175 int ret;
176
177 ctrl_status = readl(core->reg_base + CTRL_STATUS);
178 pc_ready = ctrl_status & CTRL_STATUS_PC_READY;
179 idle_status = ctrl_status & BIT(30);
180 if (pc_ready)
181 return 0;
182
183 wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
184 wfi_status &= BIT(0);
185 if (!wfi_status || !idle_status)
186 goto skip_power_off;
187
188 ret = core->hfi_ops->sys_pc_prep(core);
189 if (ret)
190 goto skip_power_off;
191
192 ret = readl_poll_timeout(core->reg_base + CTRL_STATUS, val,
193 val & CTRL_STATUS_PC_READY, 250, 2500);
194 if (ret)
195 goto skip_power_off;
196
197 ret = readl_poll_timeout(core->reg_base + WRAPPER_TZ_CPU_STATUS,
198 val, val & BIT(0), 250, 2500);
199 if (ret)
200 goto skip_power_off;
201
202 return 0;
203
204 skip_power_off:
205 ctrl_status = readl(core->reg_base + CTRL_STATUS);
206 wfi_status = readl(core->reg_base + WRAPPER_TZ_CPU_STATUS);
207 wfi_status &= BIT(0);
208 dev_err(core->dev, "skip power collapse, wfi=%#x, idle=%#x, pcr=%#x, ctrl=%#x)\n",
209 wfi_status, idle_status, pc_ready, ctrl_status);
210
211 return -EAGAIN;
212 }
213
iris_vpu_power_off_controller(struct iris_core * core)214 static int iris_vpu_power_off_controller(struct iris_core *core)
215 {
216 u32 val = 0;
217 int ret;
218
219 writel(MSK_SIGNAL_FROM_TENSILICA | MSK_CORE_POWER_ON, core->reg_base + CPU_CS_X2RPMH);
220
221 writel(REQ_POWER_DOWN_PREP, core->reg_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
222
223 ret = readl_poll_timeout(core->reg_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
224 val, val & BIT(0), 200, 2000);
225 if (ret)
226 goto disable_power;
227
228 writel(REQ_POWER_DOWN_PREP, core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_CONTROL);
229
230 ret = readl_poll_timeout(core->reg_base + WRAPPER_IRIS_CPU_NOC_LPI_STATUS,
231 val, val & BIT(0), 200, 2000);
232 if (ret)
233 goto disable_power;
234
235 writel(0x0, core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL);
236
237 ret = readl_poll_timeout(core->reg_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS,
238 val, val == 0, 200, 2000);
239 if (ret)
240 goto disable_power;
241
242 writel(CTL_AXI_CLK_HALT | CTL_CLK_HALT,
243 core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
244 writel(RESET_HIGH, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
245 writel(0x0, core->reg_base + WRAPPER_TZ_QNS4PDXFIFO_RESET);
246 writel(0x0, core->reg_base + WRAPPER_TZ_CTL_AXI_CLOCK_CONFIG);
247
248 disable_power:
249 iris_disable_unprepare_clock(core, IRIS_CTRL_CLK);
250 iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
251 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
252
253 return 0;
254 }
255
iris_vpu_power_off_hw(struct iris_core * core)256 void iris_vpu_power_off_hw(struct iris_core *core)
257 {
258 dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], false);
259 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
260 iris_disable_unprepare_clock(core, IRIS_HW_CLK);
261 }
262
iris_vpu_power_off(struct iris_core * core)263 void iris_vpu_power_off(struct iris_core *core)
264 {
265 dev_pm_opp_set_rate(core->dev, 0);
266 core->iris_platform_data->vpu_ops->power_off_hw(core);
267 iris_vpu_power_off_controller(core);
268 iris_unset_icc_bw(core);
269
270 if (!iris_vpu_watchdog(core, core->intr_status))
271 disable_irq_nosync(core->irq);
272 }
273
iris_vpu_power_on_controller(struct iris_core * core)274 static int iris_vpu_power_on_controller(struct iris_core *core)
275 {
276 u32 rst_tbl_size = core->iris_platform_data->clk_rst_tbl_size;
277 int ret;
278
279 ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
280 if (ret)
281 return ret;
282
283 ret = reset_control_bulk_reset(rst_tbl_size, core->resets);
284 if (ret)
285 goto err_disable_power;
286
287 ret = iris_prepare_enable_clock(core, IRIS_AXI_CLK);
288 if (ret)
289 goto err_disable_power;
290
291 ret = iris_prepare_enable_clock(core, IRIS_CTRL_CLK);
292 if (ret)
293 goto err_disable_clock;
294
295 return 0;
296
297 err_disable_clock:
298 iris_disable_unprepare_clock(core, IRIS_AXI_CLK);
299 err_disable_power:
300 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_CTRL_POWER_DOMAIN]);
301
302 return ret;
303 }
304
iris_vpu_power_on_hw(struct iris_core * core)305 static int iris_vpu_power_on_hw(struct iris_core *core)
306 {
307 int ret;
308
309 ret = iris_enable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
310 if (ret)
311 return ret;
312
313 ret = iris_prepare_enable_clock(core, IRIS_HW_CLK);
314 if (ret)
315 goto err_disable_power;
316
317 ret = dev_pm_genpd_set_hwmode(core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN], true);
318 if (ret)
319 goto err_disable_clock;
320
321 return 0;
322
323 err_disable_clock:
324 iris_disable_unprepare_clock(core, IRIS_HW_CLK);
325 err_disable_power:
326 iris_disable_power_domains(core, core->pmdomain_tbl->pd_devs[IRIS_HW_POWER_DOMAIN]);
327
328 return ret;
329 }
330
iris_vpu_power_on(struct iris_core * core)331 int iris_vpu_power_on(struct iris_core *core)
332 {
333 u32 freq;
334 int ret;
335
336 ret = iris_set_icc_bw(core, INT_MAX);
337 if (ret)
338 goto err;
339
340 ret = iris_vpu_power_on_controller(core);
341 if (ret)
342 goto err_unvote_icc;
343
344 ret = iris_vpu_power_on_hw(core);
345 if (ret)
346 goto err_power_off_ctrl;
347
348 freq = core->power.clk_freq ? core->power.clk_freq :
349 (u32)ULONG_MAX;
350
351 dev_pm_opp_set_rate(core->dev, freq);
352
353 core->iris_platform_data->set_preset_registers(core);
354
355 iris_vpu_interrupt_init(core);
356 core->intr_status = 0;
357 enable_irq(core->irq);
358
359 return 0;
360
361 err_power_off_ctrl:
362 iris_vpu_power_off_controller(core);
363 err_unvote_icc:
364 iris_unset_icc_bw(core);
365 err:
366 dev_err(core->dev, "power on failed\n");
367
368 return ret;
369 }
370