1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4 */
5
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_file.h>
10 #include <drm/drm_gem.h>
11 #include <drm/drm_gem_shmem_helper.h>
12 #include <drm/drm_print.h>
13 #include <drm/gpu_scheduler.h>
14 #include <linux/xarray.h>
15 #include <trace/events/amdxdna.h>
16
17 #include "amdxdna_ctx.h"
18 #include "amdxdna_gem.h"
19 #include "amdxdna_pci_drv.h"
20 #include "amdxdna_pm.h"
21
22 #define MAX_HWCTX_ID 255
23 #define MAX_ARG_COUNT 4095
24
25 struct amdxdna_fence {
26 struct dma_fence base;
27 spinlock_t lock; /* for base */
28 struct amdxdna_hwctx *hwctx;
29 };
30
amdxdna_fence_get_driver_name(struct dma_fence * fence)31 static const char *amdxdna_fence_get_driver_name(struct dma_fence *fence)
32 {
33 return KBUILD_MODNAME;
34 }
35
amdxdna_fence_get_timeline_name(struct dma_fence * fence)36 static const char *amdxdna_fence_get_timeline_name(struct dma_fence *fence)
37 {
38 struct amdxdna_fence *xdna_fence;
39
40 xdna_fence = container_of(fence, struct amdxdna_fence, base);
41
42 return xdna_fence->hwctx->name;
43 }
44
45 static const struct dma_fence_ops fence_ops = {
46 .get_driver_name = amdxdna_fence_get_driver_name,
47 .get_timeline_name = amdxdna_fence_get_timeline_name,
48 };
49
amdxdna_fence_create(struct amdxdna_hwctx * hwctx)50 static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
51 {
52 struct amdxdna_fence *fence;
53
54 fence = kzalloc_obj(*fence);
55 if (!fence)
56 return NULL;
57
58 fence->hwctx = hwctx;
59 spin_lock_init(&fence->lock);
60 dma_fence_init(&fence->base, &fence_ops, &fence->lock, hwctx->id, 0);
61 return &fence->base;
62 }
63
amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx * hwctx,struct srcu_struct * ss)64 static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
65 struct srcu_struct *ss)
66 {
67 struct amdxdna_dev *xdna = hwctx->client->xdna;
68
69 synchronize_srcu(ss);
70
71 /* At this point, user is not able to submit new commands */
72 xdna->dev_info->ops->hwctx_fini(hwctx);
73
74 kfree(hwctx->name);
75 kfree(hwctx);
76 }
77
amdxdna_hwctx_walk(struct amdxdna_client * client,void * arg,int (* walk)(struct amdxdna_hwctx * hwctx,void * arg))78 int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
79 int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
80 {
81 struct amdxdna_hwctx *hwctx;
82 unsigned long hwctx_id;
83 int ret = 0, idx;
84
85 idx = srcu_read_lock(&client->hwctx_srcu);
86 amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
87 ret = walk(hwctx, arg);
88 if (ret)
89 break;
90 }
91 srcu_read_unlock(&client->hwctx_srcu, idx);
92
93 return ret;
94 }
95
amdxdna_cmd_get_payload(struct amdxdna_gem_obj * abo,u32 * size)96 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
97 {
98 struct amdxdna_cmd *cmd = amdxdna_gem_vmap(abo);
99 u32 num_masks, count;
100
101 if (!cmd)
102 return NULL;
103
104 if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
105 num_masks = 0;
106 else
107 num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
108
109 if (size) {
110 count = FIELD_GET(AMDXDNA_CMD_COUNT, cmd->header);
111 if (unlikely(count <= num_masks ||
112 count * sizeof(u32) +
113 offsetof(struct amdxdna_cmd, data[0]) >
114 abo->mem.size)) {
115 *size = 0;
116 return NULL;
117 }
118 *size = (count - num_masks) * sizeof(u32);
119 }
120 return &cmd->data[num_masks];
121 }
122
amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj * abo)123 u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
124 {
125 struct amdxdna_cmd *cmd = amdxdna_gem_vmap(abo);
126 u32 num_masks, i;
127 u32 *cu_mask;
128
129 if (!cmd)
130 return INVALID_CU_IDX;
131
132 if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
133 return INVALID_CU_IDX;
134
135 num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
136 cu_mask = cmd->data;
137 for (i = 0; i < num_masks; i++) {
138 if (cu_mask[i])
139 return ffs(cu_mask[i]) - 1;
140 }
141
142 return INVALID_CU_IDX;
143 }
144
amdxdna_cmd_set_error(struct amdxdna_gem_obj * abo,struct amdxdna_sched_job * job,u32 cmd_idx,enum ert_cmd_state error_state,void * err_data,size_t size)145 int amdxdna_cmd_set_error(struct amdxdna_gem_obj *abo,
146 struct amdxdna_sched_job *job, u32 cmd_idx,
147 enum ert_cmd_state error_state,
148 void *err_data, size_t size)
149 {
150 struct amdxdna_client *client = job->hwctx->client;
151 struct amdxdna_cmd *cmd = amdxdna_gem_vmap(abo);
152 struct amdxdna_cmd_chain *cc = NULL;
153
154 if (!cmd)
155 return -ENOMEM;
156
157 cmd->header &= ~AMDXDNA_CMD_STATE;
158 cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, error_state);
159
160 if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN) {
161 cc = amdxdna_cmd_get_payload(abo, NULL);
162 cc->error_index = (cmd_idx < cc->command_count) ? cmd_idx : 0;
163 abo = amdxdna_gem_get_obj(client, cc->data[0], AMDXDNA_BO_SHARE);
164 if (!abo)
165 return -EINVAL;
166 cmd = amdxdna_gem_vmap(abo);
167 if (!cmd)
168 return -ENOMEM;
169 }
170
171 memset(cmd->data, 0xff, abo->mem.size - sizeof(*cmd));
172 if (err_data)
173 memcpy(cmd->data, err_data, min(size, abo->mem.size - sizeof(*cmd)));
174
175 if (cc)
176 amdxdna_gem_put_obj(abo);
177
178 return 0;
179 }
180
181 /*
182 * This should be called in close() and remove(). DO NOT call in other syscalls.
183 * This guarantee that when hwctx and resources will be released, if user
184 * doesn't call amdxdna_drm_destroy_hwctx_ioctl.
185 */
amdxdna_hwctx_remove_all(struct amdxdna_client * client)186 void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
187 {
188 struct amdxdna_hwctx *hwctx;
189 unsigned long hwctx_id;
190
191 amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
192 XDNA_DBG(client->xdna, "PID %d close HW context %d",
193 client->pid, hwctx->id);
194 xa_erase(&client->hwctx_xa, hwctx->id);
195 amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
196 }
197 }
198
amdxdna_drm_create_hwctx_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)199 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
200 {
201 struct amdxdna_client *client = filp->driver_priv;
202 struct amdxdna_drm_create_hwctx *args = data;
203 struct amdxdna_dev *xdna = to_xdna_dev(dev);
204 struct amdxdna_hwctx *hwctx;
205 int ret, idx;
206
207 if (args->ext || args->ext_flags)
208 return -EINVAL;
209
210 hwctx = kzalloc_obj(*hwctx);
211 if (!hwctx)
212 return -ENOMEM;
213
214 if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
215 XDNA_ERR(xdna, "Access QoS info failed");
216 kfree(hwctx);
217 return -EFAULT;
218 }
219
220 hwctx->client = client;
221 hwctx->fw_ctx_id = -1;
222 hwctx->num_tiles = args->num_tiles;
223 hwctx->mem_size = args->mem_size;
224 hwctx->max_opc = args->max_opc;
225
226 guard(mutex)(&xdna->dev_lock);
227
228 if (!drm_dev_enter(dev, &idx)) {
229 ret = -ENODEV;
230 goto free_hwctx;
231 }
232
233 ret = xdna->dev_info->ops->hwctx_init(hwctx);
234 if (ret) {
235 XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
236 goto dev_exit;
237 }
238
239 hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
240 if (!hwctx->name) {
241 ret = -ENOMEM;
242 goto fini_hwctx;
243 }
244
245 ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
246 XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
247 &client->next_hwctxid, GFP_KERNEL);
248 if (ret < 0) {
249 XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
250 goto free_name;
251 }
252
253 args->handle = hwctx->id;
254 args->syncobj_handle = hwctx->syncobj_hdl;
255
256 atomic64_set(&hwctx->job_submit_cnt, 0);
257 atomic64_set(&hwctx->job_free_cnt, 0);
258 XDNA_DBG(xdna, "PID %d create HW context %d, ret %d", client->pid, args->handle, ret);
259 drm_dev_exit(idx);
260 return 0;
261
262 free_name:
263 kfree(hwctx->name);
264 fini_hwctx:
265 xdna->dev_info->ops->hwctx_fini(hwctx);
266 dev_exit:
267 drm_dev_exit(idx);
268 free_hwctx:
269 kfree(hwctx);
270 return ret;
271 }
272
amdxdna_drm_destroy_hwctx_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)273 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
274 {
275 struct amdxdna_client *client = filp->driver_priv;
276 struct amdxdna_drm_destroy_hwctx *args = data;
277 struct amdxdna_dev *xdna = to_xdna_dev(dev);
278 struct amdxdna_hwctx *hwctx;
279 int ret = 0, idx;
280
281 if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
282 return -EINVAL;
283
284 if (!drm_dev_enter(dev, &idx))
285 return -ENODEV;
286
287 mutex_lock(&xdna->dev_lock);
288 hwctx = xa_erase(&client->hwctx_xa, args->handle);
289 if (!hwctx) {
290 ret = -EINVAL;
291 XDNA_DBG(xdna, "PID %d HW context %d not exist",
292 client->pid, args->handle);
293 goto out;
294 }
295
296 /*
297 * The pushed jobs are handled by DRM scheduler during destroy.
298 * SRCU to synchronize with exec command ioctls.
299 */
300 amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
301
302 XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
303 out:
304 mutex_unlock(&xdna->dev_lock);
305 drm_dev_exit(idx);
306 return ret;
307 }
308
amdxdna_drm_config_hwctx_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)309 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
310 {
311 struct amdxdna_client *client = filp->driver_priv;
312 struct amdxdna_drm_config_hwctx *args = data;
313 struct amdxdna_dev *xdna = to_xdna_dev(dev);
314 struct amdxdna_hwctx *hwctx;
315 u32 buf_size;
316 void *buf;
317 int ret;
318 u64 val;
319
320 if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
321 return -EINVAL;
322
323 if (!xdna->dev_info->ops->hwctx_config)
324 return -EOPNOTSUPP;
325
326 val = args->param_val;
327 buf_size = args->param_val_size;
328
329 switch (args->param_type) {
330 case DRM_AMDXDNA_HWCTX_CONFIG_CU:
331 /* For those types that param_val is pointer */
332 if (buf_size > PAGE_SIZE) {
333 XDNA_ERR(xdna, "Config CU param buffer too large");
334 return -E2BIG;
335 }
336
337 /* Hwctx needs to keep buf */
338 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
339 if (!buf)
340 return -ENOMEM;
341
342 if (copy_from_user(buf, u64_to_user_ptr(val), buf_size)) {
343 kfree(buf);
344 return -EFAULT;
345 }
346
347 break;
348 case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
349 case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
350 /* For those types that param_val is a value */
351 buf = NULL;
352 buf_size = 0;
353 break;
354 default:
355 XDNA_DBG(xdna, "Unknown HW context config type %d", args->param_type);
356 return -EINVAL;
357 }
358
359 guard(mutex)(&xdna->dev_lock);
360 hwctx = xa_load(&client->hwctx_xa, args->handle);
361 if (!hwctx) {
362 XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
363 ret = -EINVAL;
364 goto free_buf;
365 }
366
367 ret = xdna->dev_info->ops->hwctx_config(hwctx, args->param_type, val, buf, buf_size);
368
369 free_buf:
370 kfree(buf);
371 return ret;
372 }
373
amdxdna_hwctx_sync_debug_bo(struct amdxdna_client * client,u32 debug_bo_hdl)374 int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
375 {
376 struct amdxdna_dev *xdna = client->xdna;
377 struct amdxdna_hwctx *hwctx;
378 struct amdxdna_gem_obj *abo;
379 struct drm_gem_object *gobj;
380 int ret;
381
382 if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
383 return -EOPNOTSUPP;
384
385 gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
386 if (!gobj)
387 return -EINVAL;
388
389 abo = to_xdna_obj(gobj);
390 guard(mutex)(&xdna->dev_lock);
391 hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
392 if (!hwctx) {
393 ret = -EINVAL;
394 goto put_obj;
395 }
396
397 ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
398
399 put_obj:
400 drm_gem_object_put(gobj);
401 return ret;
402 }
403
404 static void
amdxdna_arg_bos_put(struct amdxdna_sched_job * job)405 amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
406 {
407 int i;
408
409 for (i = 0; i < job->bo_cnt; i++) {
410 if (!job->bos[i])
411 break;
412 drm_gem_object_put(job->bos[i]);
413 }
414 }
415
416 static int
amdxdna_arg_bos_lookup(struct amdxdna_client * client,struct amdxdna_sched_job * job,u32 * bo_hdls,u32 bo_cnt)417 amdxdna_arg_bos_lookup(struct amdxdna_client *client,
418 struct amdxdna_sched_job *job,
419 u32 *bo_hdls, u32 bo_cnt)
420 {
421 struct drm_gem_object *gobj;
422 int i, ret;
423
424 job->bo_cnt = bo_cnt;
425 for (i = 0; i < job->bo_cnt; i++) {
426 struct amdxdna_gem_obj *abo;
427
428 gobj = drm_gem_object_lookup(client->filp, bo_hdls[i]);
429 if (!gobj) {
430 ret = -ENOENT;
431 goto put_shmem_bo;
432 }
433 abo = to_xdna_obj(gobj);
434
435 mutex_lock(&abo->lock);
436 if (abo->pinned) {
437 mutex_unlock(&abo->lock);
438 job->bos[i] = gobj;
439 continue;
440 }
441
442 ret = amdxdna_gem_pin_nolock(abo);
443 if (ret) {
444 mutex_unlock(&abo->lock);
445 drm_gem_object_put(gobj);
446 goto put_shmem_bo;
447 }
448 abo->pinned = true;
449 mutex_unlock(&abo->lock);
450
451 job->bos[i] = gobj;
452 }
453
454 return 0;
455
456 put_shmem_bo:
457 amdxdna_arg_bos_put(job);
458 return ret;
459 }
460
amdxdna_sched_job_cleanup(struct amdxdna_sched_job * job)461 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
462 {
463 trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
464 amdxdna_pm_suspend_put(job->hwctx->client->xdna);
465 amdxdna_arg_bos_put(job);
466 amdxdna_gem_put_obj(job->cmd_bo);
467 dma_fence_put(job->fence);
468 }
469
amdxdna_cmd_submit(struct amdxdna_client * client,struct amdxdna_drv_cmd * drv_cmd,u32 cmd_bo_hdl,u32 * arg_bo_hdls,u32 arg_bo_cnt,u32 hwctx_hdl,u64 * seq)470 int amdxdna_cmd_submit(struct amdxdna_client *client,
471 struct amdxdna_drv_cmd *drv_cmd,
472 u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
473 u32 hwctx_hdl, u64 *seq)
474 {
475 struct amdxdna_dev *xdna = client->xdna;
476 struct amdxdna_sched_job *job;
477 struct amdxdna_hwctx *hwctx;
478 int ret, idx;
479
480 XDNA_DBG(xdna, "Command BO hdl %d, Arg BO count %d", cmd_bo_hdl, arg_bo_cnt);
481 job = kzalloc_flex(*job, bos, arg_bo_cnt);
482 if (!job)
483 return -ENOMEM;
484
485 job->drv_cmd = drv_cmd;
486
487 if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
488 job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_SHARE);
489 if (!job->cmd_bo) {
490 XDNA_ERR(xdna, "Failed to get cmd bo from %d", cmd_bo_hdl);
491 ret = -EINVAL;
492 goto free_job;
493 }
494 }
495
496 ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
497 if (ret) {
498 XDNA_ERR(xdna, "Argument BOs lookup failed, ret %d", ret);
499 goto cmd_put;
500 }
501
502 ret = amdxdna_pm_resume_get(xdna);
503 if (ret) {
504 XDNA_ERR(xdna, "Resume failed, ret %d", ret);
505 goto put_bos;
506 }
507
508 idx = srcu_read_lock(&client->hwctx_srcu);
509 hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
510 if (!hwctx) {
511 XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
512 client->pid, hwctx_hdl);
513 ret = -EINVAL;
514 goto unlock_srcu;
515 }
516
517
518 job->hwctx = hwctx;
519 job->mm = current->mm;
520
521 job->fence = amdxdna_fence_create(hwctx);
522 if (!job->fence) {
523 XDNA_ERR(xdna, "Failed to create fence");
524 ret = -ENOMEM;
525 goto unlock_srcu;
526 }
527 kref_init(&job->refcnt);
528
529 ret = xdna->dev_info->ops->cmd_submit(hwctx, job, seq);
530 if (ret)
531 goto put_fence;
532
533 /*
534 * The amdxdna_hwctx_destroy_rcu() will release hwctx and associated
535 * resource after synchronize_srcu(). The submitted jobs should be
536 * handled by the queue, for example DRM scheduler, in device layer.
537 * For here we can unlock SRCU.
538 */
539 srcu_read_unlock(&client->hwctx_srcu, idx);
540 trace_amdxdna_debug_point(hwctx->name, *seq, "job pushed");
541
542 return 0;
543
544 put_fence:
545 dma_fence_put(job->fence);
546 unlock_srcu:
547 srcu_read_unlock(&client->hwctx_srcu, idx);
548 amdxdna_pm_suspend_put(xdna);
549 put_bos:
550 amdxdna_arg_bos_put(job);
551 cmd_put:
552 amdxdna_gem_put_obj(job->cmd_bo);
553 free_job:
554 kfree(job);
555 return ret;
556 }
557
558 /*
559 * The submit command ioctl submits a command to firmware. One firmware command
560 * may contain multiple command BOs for processing as a whole.
561 * The command sequence number is returned which can be used for wait command ioctl.
562 */
amdxdna_drm_submit_execbuf(struct amdxdna_client * client,struct amdxdna_drm_exec_cmd * args)563 static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
564 struct amdxdna_drm_exec_cmd *args)
565 {
566 struct amdxdna_dev *xdna = client->xdna;
567 u32 *arg_bo_hdls = NULL;
568 u32 cmd_bo_hdl;
569 int ret;
570
571 if (args->arg_count > MAX_ARG_COUNT) {
572 XDNA_ERR(xdna, "Invalid arg bo count %d", args->arg_count);
573 return -EINVAL;
574 }
575
576 /* Only support single command for now. */
577 if (args->cmd_count != 1) {
578 XDNA_ERR(xdna, "Invalid cmd bo count %d", args->cmd_count);
579 return -EINVAL;
580 }
581
582 cmd_bo_hdl = (u32)args->cmd_handles;
583 if (args->arg_count) {
584 arg_bo_hdls = kcalloc(args->arg_count, sizeof(u32), GFP_KERNEL);
585 if (!arg_bo_hdls)
586 return -ENOMEM;
587 ret = copy_from_user(arg_bo_hdls, u64_to_user_ptr(args->args),
588 args->arg_count * sizeof(u32));
589 if (ret) {
590 ret = -EFAULT;
591 goto free_cmd_bo_hdls;
592 }
593 }
594
595 ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
596 args->arg_count, args->hwctx, &args->seq);
597 if (ret)
598 XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
599
600 free_cmd_bo_hdls:
601 kfree(arg_bo_hdls);
602 if (!ret)
603 XDNA_DBG(xdna, "Pushed cmd %lld to scheduler", args->seq);
604 return ret;
605 }
606
amdxdna_drm_submit_cmd_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)607 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
608 {
609 struct amdxdna_client *client = filp->driver_priv;
610 struct amdxdna_drm_exec_cmd *args = data;
611
612 if (args->ext || args->ext_flags)
613 return -EINVAL;
614
615 switch (args->type) {
616 case AMDXDNA_CMD_SUBMIT_EXEC_BUF:
617 return amdxdna_drm_submit_execbuf(client, args);
618 }
619
620 XDNA_ERR(client->xdna, "Invalid command type %d", args->type);
621 return -EINVAL;
622 }
623