1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_ipc.h"
9 #include "ivpu_jsm_msg.h"
10 #include "vpu_jsm_api.h"
11
ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)12 const char *ivpu_jsm_msg_type_to_str(enum vpu_ipc_msg_type type)
13 {
14 #define IVPU_CASE_TO_STR(x) case x: return #x
15 switch (type) {
16 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNKNOWN);
17 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET);
18 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT);
19 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB);
20 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB);
21 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB);
22 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT);
23 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL);
24 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL);
25 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN);
26 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE);
27 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG);
28 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG);
29 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY);
30 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME);
31 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE);
32 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START);
33 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP);
34 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE);
35 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO);
36 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP);
37 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE);
38 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE);
39 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES);
40 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_REGISTER_DB);
41 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ);
42 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ);
43 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP);
44 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE);
45 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG);
46 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP);
47 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION);
48 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_ENGINE_RESUME);
49 IVPU_CASE_TO_STR(VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE);
50 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP);
51 IVPU_CASE_TO_STR(VPU_JSM_MSG_STATE_DUMP_RSP);
52 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED);
53 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL);
54 IVPU_CASE_TO_STR(VPU_JSM_MSG_JOB_DONE);
55 IVPU_CASE_TO_STR(VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED);
56 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_RESET_DONE);
57 IVPU_CASE_TO_STR(VPU_JSM_MSG_ENGINE_PREEMPT_DONE);
58 IVPU_CASE_TO_STR(VPU_JSM_MSG_REGISTER_DB_DONE);
59 IVPU_CASE_TO_STR(VPU_JSM_MSG_UNREGISTER_DB_DONE);
60 IVPU_CASE_TO_STR(VPU_JSM_MSG_QUERY_ENGINE_HB_DONE);
61 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE);
62 IVPU_CASE_TO_STR(VPU_JSM_MSG_GET_POWER_LEVEL_DONE);
63 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_POWER_LEVEL_DONE);
64 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE);
65 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE);
66 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_SET_CONFIG_RSP);
67 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CONFIG_RSP);
68 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP);
69 IVPU_CASE_TO_STR(VPU_JSM_MSG_TRACE_GET_NAME_RSP);
70 IVPU_CASE_TO_STR(VPU_JSM_MSG_SSID_RELEASE_DONE);
71 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_START_DONE);
72 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE);
73 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE);
74 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE);
75 IVPU_CASE_TO_STR(VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION);
76 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP);
77 IVPU_CASE_TO_STR(VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP);
78 IVPU_CASE_TO_STR(VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP);
79 IVPU_CASE_TO_STR(VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP);
80 IVPU_CASE_TO_STR(VPU_JSM_MSG_BLOB_DEINIT_DONE);
81 IVPU_CASE_TO_STR(VPU_JSM_MSG_DYNDBG_CONTROL_RSP);
82 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER);
83 IVPU_CASE_TO_STR(VPU_JSM_MSG_PWR_D0I3_ENTER_DONE);
84 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE);
85 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_ENABLE_DONE);
86 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE);
87 IVPU_CASE_TO_STR(VPU_JSM_MSG_DCT_DISABLE_DONE);
88 }
89 #undef IVPU_CASE_TO_STR
90
91 return "Unknown JSM message type";
92 }
93
ivpu_jsm_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 db_id,u64 jobq_base,u32 jobq_size)94 int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
95 u64 jobq_base, u32 jobq_size)
96 {
97 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
98 struct vpu_jsm_msg resp;
99 int ret = 0;
100
101 req.payload.register_db.db_idx = db_id;
102 req.payload.register_db.jobq_base = jobq_base;
103 req.payload.register_db.jobq_size = jobq_size;
104 req.payload.register_db.host_ssid = ctx_id;
105
106 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
107 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
108 if (ret)
109 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
110
111 return ret;
112 }
113
ivpu_jsm_unregister_db(struct ivpu_device * vdev,u32 db_id)114 int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
115 {
116 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
117 struct vpu_jsm_msg resp;
118 int ret = 0;
119
120 req.payload.unregister_db.db_idx = db_id;
121
122 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
123 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
124 if (ret)
125 ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
126
127 return ret;
128 }
129
ivpu_jsm_get_heartbeat(struct ivpu_device * vdev,u32 engine,u64 * heartbeat)130 int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
131 {
132 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
133 struct vpu_jsm_msg resp;
134 int ret;
135
136 if (engine != VPU_ENGINE_COMPUTE)
137 return -EINVAL;
138
139 req.payload.query_engine_hb.engine_idx = engine;
140
141 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
142 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
143 if (ret) {
144 ivpu_err_ratelimited(vdev, "Failed to get heartbeat from engine %d: %d\n",
145 engine, ret);
146 return ret;
147 }
148
149 *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
150 return ret;
151 }
152
ivpu_jsm_reset_engine(struct ivpu_device * vdev,u32 engine)153 int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
154 {
155 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
156 struct vpu_jsm_msg resp;
157 int ret;
158
159 if (engine != VPU_ENGINE_COMPUTE)
160 return -EINVAL;
161
162 req.payload.engine_reset.engine_idx = engine;
163
164 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
165 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
166 if (ret)
167 ivpu_err_ratelimited(vdev, "Failed to reset engine %d: %d\n", engine, ret);
168
169 return ret;
170 }
171
ivpu_jsm_preempt_engine(struct ivpu_device * vdev,u32 engine,u32 preempt_id)172 int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
173 {
174 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
175 struct vpu_jsm_msg resp;
176 int ret;
177
178 if (engine != VPU_ENGINE_COMPUTE)
179 return -EINVAL;
180
181 req.payload.engine_preempt.engine_idx = engine;
182 req.payload.engine_preempt.preempt_id = preempt_id;
183
184 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
185 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
186 if (ret)
187 ivpu_err_ratelimited(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
188
189 return ret;
190 }
191
ivpu_jsm_dyndbg_control(struct ivpu_device * vdev,char * command,size_t size)192 int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
193 {
194 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
195 struct vpu_jsm_msg resp;
196 int ret;
197
198 strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
199
200 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
201 VPU_IPC_CHAN_GEN_CMD, vdev->timeout.jsm);
202 if (ret)
203 ivpu_warn_ratelimited(vdev, "Failed to send command \"%s\": ret %d\n",
204 command, ret);
205
206 return ret;
207 }
208
ivpu_jsm_trace_get_capability(struct ivpu_device * vdev,u32 * trace_destination_mask,u64 * trace_hw_component_mask)209 int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
210 u64 *trace_hw_component_mask)
211 {
212 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
213 struct vpu_jsm_msg resp;
214 int ret;
215
216 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
217 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
218 if (ret) {
219 ivpu_warn_ratelimited(vdev, "Failed to get trace capability: %d\n", ret);
220 return ret;
221 }
222
223 *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
224 *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
225
226 return ret;
227 }
228
ivpu_jsm_trace_set_config(struct ivpu_device * vdev,u32 trace_level,u32 trace_destination_mask,u64 trace_hw_component_mask)229 int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
230 u64 trace_hw_component_mask)
231 {
232 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
233 struct vpu_jsm_msg resp;
234 int ret;
235
236 req.payload.trace_config.trace_level = trace_level;
237 req.payload.trace_config.trace_destination_mask = trace_destination_mask;
238 req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
239
240 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
241 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
242 if (ret)
243 ivpu_warn_ratelimited(vdev, "Failed to set config: %d\n", ret);
244
245 return ret;
246 }
247
ivpu_jsm_context_release(struct ivpu_device * vdev,u32 host_ssid)248 int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
249 {
250 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
251 struct vpu_jsm_msg resp;
252 int ret;
253
254 req.payload.ssid_release.host_ssid = host_ssid;
255
256 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
257 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
258 if (ret)
259 ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
260
261 return ret;
262 }
263
ivpu_jsm_pwr_d0i3_enter(struct ivpu_device * vdev)264 int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
265 {
266 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_PWR_D0I3_ENTER };
267 struct vpu_jsm_msg resp;
268 int ret;
269
270 if (IVPU_WA(disable_d0i3_msg))
271 return 0;
272
273 req.payload.pwr_d0i3_enter.send_response = 1;
274
275 ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_PWR_D0I3_ENTER_DONE, &resp,
276 VPU_IPC_CHAN_GEN_CMD, vdev->timeout.d0i3_entry_msg);
277 if (ret)
278 return ret;
279
280 return ivpu_hw_wait_for_idle(vdev);
281 }
282
ivpu_jsm_hws_create_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_group,u32 cmdq_id,u32 pid,u32 engine,u64 cmdq_base,u32 cmdq_size)283 int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
284 u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
285 {
286 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
287 struct vpu_jsm_msg resp;
288 int ret;
289
290 req.payload.hws_create_cmdq.host_ssid = ctx_id;
291 req.payload.hws_create_cmdq.process_id = pid;
292 req.payload.hws_create_cmdq.engine_idx = engine;
293 req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
294 req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
295 req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
296 req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
297
298 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
299 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
300 if (ret)
301 ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
302
303 return ret;
304 }
305
ivpu_jsm_hws_destroy_cmdq(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id)306 int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
307 {
308 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
309 struct vpu_jsm_msg resp;
310 int ret;
311
312 req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
313 req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
314
315 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
316 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
317 if (ret)
318 ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
319
320 return ret;
321 }
322
ivpu_jsm_hws_register_db(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 db_id,u64 cmdq_base,u32 cmdq_size)323 int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
324 u64 cmdq_base, u32 cmdq_size)
325 {
326 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
327 struct vpu_jsm_msg resp;
328 int ret = 0;
329
330 req.payload.hws_register_db.db_id = db_id;
331 req.payload.hws_register_db.host_ssid = ctx_id;
332 req.payload.hws_register_db.cmdq_id = cmdq_id;
333 req.payload.hws_register_db.cmdq_base = cmdq_base;
334 req.payload.hws_register_db.cmdq_size = cmdq_size;
335
336 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
337 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
338 if (ret)
339 ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
340
341 return ret;
342 }
343
ivpu_jsm_hws_resume_engine(struct ivpu_device * vdev,u32 engine)344 int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
345 {
346 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
347 struct vpu_jsm_msg resp;
348 int ret;
349
350 if (engine != VPU_ENGINE_COMPUTE)
351 return -EINVAL;
352
353 req.payload.hws_resume_engine.engine_idx = engine;
354
355 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
356 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
357 if (ret)
358 ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
359
360 return ret;
361 }
362
ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device * vdev,u32 ctx_id,u32 cmdq_id,u32 priority)363 int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
364 u32 priority)
365 {
366 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
367 struct vpu_jsm_msg resp;
368 int ret;
369
370 req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
371 req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
372 req.payload.hws_set_context_sched_properties.priority_band = priority;
373 req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
374 req.payload.hws_set_context_sched_properties.in_process_priority = 0;
375 req.payload.hws_set_context_sched_properties.context_quantum = 20000;
376 req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
377 req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
378
379 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
380 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
381 if (ret)
382 ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
383
384 return ret;
385 }
386
ivpu_jsm_hws_set_scheduling_log(struct ivpu_device * vdev,u32 engine_idx,u32 host_ssid,u64 vpu_log_buffer_va)387 int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
388 u64 vpu_log_buffer_va)
389 {
390 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
391 struct vpu_jsm_msg resp;
392 int ret;
393
394 req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
395 req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
396 req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
397 req.payload.hws_set_scheduling_log.notify_index = 0;
398
399 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
400 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
401 if (ret)
402 ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
403
404 return ret;
405 }
406
ivpu_jsm_hws_setup_priority_bands(struct ivpu_device * vdev)407 int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
408 {
409 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
410 struct vpu_jsm_msg resp;
411 struct ivpu_hw_info *hw = vdev->hw;
412 struct vpu_ipc_msg_payload_hws_priority_band_setup *setup =
413 &req.payload.hws_priority_band_setup;
414 int ret;
415
416 for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
417 band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
418 setup->grace_period[band] = hw->hws.grace_period[band];
419 setup->process_grace_period[band] = hw->hws.process_grace_period[band];
420 setup->process_quantum[band] = hw->hws.process_quantum[band];
421 }
422 setup->normal_band_percentage = 10;
423
424 ret = ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
425 &resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
426 if (ret)
427 ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
428
429 return ret;
430 }
431
ivpu_jsm_metric_streamer_start(struct ivpu_device * vdev,u64 metric_group_mask,u64 sampling_rate,u64 buffer_addr,u64 buffer_size)432 int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
433 u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
434 {
435 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
436 struct vpu_jsm_msg resp;
437 int ret;
438
439 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
440 req.payload.metric_streamer_start.sampling_rate = sampling_rate;
441 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
442 req.payload.metric_streamer_start.buffer_size = buffer_size;
443
444 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
445 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
446 if (ret) {
447 ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
448 return ret;
449 }
450
451 return ret;
452 }
453
ivpu_jsm_metric_streamer_stop(struct ivpu_device * vdev,u64 metric_group_mask)454 int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
455 {
456 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
457 struct vpu_jsm_msg resp;
458 int ret;
459
460 req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
461
462 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
463 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
464 if (ret)
465 ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
466
467 return ret;
468 }
469
ivpu_jsm_metric_streamer_update(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u64 * bytes_written)470 int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
471 u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
472 {
473 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
474 struct vpu_jsm_msg resp;
475 int ret;
476
477 req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
478 req.payload.metric_streamer_update.buffer_addr = buffer_addr;
479 req.payload.metric_streamer_update.buffer_size = buffer_size;
480
481 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
482 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
483 if (ret) {
484 ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
485 return ret;
486 }
487
488 if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
489 ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
490 resp.payload.metric_streamer_done.bytes_written, buffer_size);
491 return -EOVERFLOW;
492 }
493
494 *bytes_written = resp.payload.metric_streamer_done.bytes_written;
495
496 return ret;
497 }
498
ivpu_jsm_metric_streamer_info(struct ivpu_device * vdev,u64 metric_group_mask,u64 buffer_addr,u64 buffer_size,u32 * sample_size,u64 * info_size)499 int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
500 u64 buffer_size, u32 *sample_size, u64 *info_size)
501 {
502 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
503 struct vpu_jsm_msg resp;
504 int ret;
505
506 req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
507 req.payload.metric_streamer_start.buffer_addr = buffer_addr;
508 req.payload.metric_streamer_start.buffer_size = buffer_size;
509
510 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
511 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
512 if (ret) {
513 ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
514 return ret;
515 }
516
517 if (!resp.payload.metric_streamer_done.sample_size) {
518 ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
519 return -EBADMSG;
520 }
521
522 if (sample_size)
523 *sample_size = resp.payload.metric_streamer_done.sample_size;
524 if (info_size)
525 *info_size = resp.payload.metric_streamer_done.bytes_written;
526
527 return ret;
528 }
529
ivpu_jsm_dct_enable(struct ivpu_device * vdev,u32 active_us,u32 inactive_us)530 int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
531 {
532 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
533 struct vpu_jsm_msg resp;
534
535 req.payload.pwr_dct_control.dct_active_us = active_us;
536 req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
537
538 return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE, &resp,
539 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
540 }
541
ivpu_jsm_dct_disable(struct ivpu_device * vdev)542 int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
543 {
544 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
545 struct vpu_jsm_msg resp;
546
547 return ivpu_ipc_send_receive_internal(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE, &resp,
548 VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
549 }
550
ivpu_jsm_state_dump(struct ivpu_device * vdev)551 int ivpu_jsm_state_dump(struct ivpu_device *vdev)
552 {
553 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_STATE_DUMP };
554
555 return ivpu_ipc_send_and_wait(vdev, &req, VPU_IPC_CHAN_ASYNC_CMD,
556 vdev->timeout.state_dump_msg);
557 }
558