1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4 */
5
6 #include <drm/amdxdna_accel.h>
7 #include <drm/drm_device.h>
8 #include <drm/drm_drv.h>
9 #include <drm/drm_gem_shmem_helper.h>
10 #include <drm/drm_managed.h>
11 #include <drm/drm_print.h>
12 #include <drm/gpu_scheduler.h>
13 #include <linux/amd-pmf-io.h>
14 #include <linux/cleanup.h>
15 #include <linux/errno.h>
16 #include <linux/firmware.h>
17 #include <linux/iommu.h>
18 #include <linux/iopoll.h>
19 #include <linux/pci.h>
20 #include <linux/xarray.h>
21 #include <asm/hypervisor.h>
22
23 #include "aie2_msg_priv.h"
24 #include "aie2_pci.h"
25 #include "aie2_solver.h"
26 #include "amdxdna_ctx.h"
27 #include "amdxdna_gem.h"
28 #include "amdxdna_mailbox.h"
29 #include "amdxdna_pci_drv.h"
30 #include "amdxdna_pm.h"
31
32 static int aie2_max_col = XRS_MAX_COL;
33 module_param(aie2_max_col, uint, 0600);
34 MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
35
36 static char *npu_fw[] = {
37 "npu_7.sbin",
38 "npu.sbin"
39 };
40
41 /*
42 * The management mailbox channel is allocated by firmware.
43 * The related register and ring buffer information is on SRAM BAR.
44 * This struct is the register layout.
45 */
46 #define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
47 struct mgmt_mbox_chann_info {
48 __u32 x2i_tail;
49 __u32 x2i_head;
50 __u32 x2i_buf;
51 __u32 x2i_buf_sz;
52 __u32 i2x_tail;
53 __u32 i2x_head;
54 __u32 i2x_buf;
55 __u32 i2x_buf_sz;
56 __u32 magic;
57 __u32 msi_id;
58 __u32 prot_major;
59 __u32 prot_minor;
60 __u32 rsvd[4];
61 };
62
aie2_check_protocol(struct amdxdna_dev_hdl * ndev,u32 fw_major,u32 fw_minor)63 static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
64 {
65 const struct aie2_fw_feature_tbl *feature;
66 bool found = false;
67
68 for (feature = ndev->priv->fw_feature_tbl; feature->major; feature++) {
69 if (feature->major != fw_major)
70 continue;
71 if (fw_minor < feature->min_minor)
72 continue;
73 if (feature->max_minor > 0 && fw_minor > feature->max_minor)
74 continue;
75
76 ndev->feature_mask |= feature->features;
77
78 /* firmware version matches one of the driver support entry */
79 found = true;
80 }
81
82 return found ? 0 : -EOPNOTSUPP;
83 }
84
aie2_dump_chann_info_debug(struct amdxdna_dev_hdl * ndev)85 static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
86 {
87 struct amdxdna_dev *xdna = ndev->xdna;
88
89 XDNA_DBG(xdna, "i2x tail 0x%x", ndev->mgmt_i2x.mb_tail_ptr_reg);
90 XDNA_DBG(xdna, "i2x head 0x%x", ndev->mgmt_i2x.mb_head_ptr_reg);
91 XDNA_DBG(xdna, "i2x ringbuf 0x%x", ndev->mgmt_i2x.rb_start_addr);
92 XDNA_DBG(xdna, "i2x rsize 0x%x", ndev->mgmt_i2x.rb_size);
93 XDNA_DBG(xdna, "x2i tail 0x%x", ndev->mgmt_x2i.mb_tail_ptr_reg);
94 XDNA_DBG(xdna, "x2i head 0x%x", ndev->mgmt_x2i.mb_head_ptr_reg);
95 XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
96 XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
97 XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
98 XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
99 XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
100 }
101
aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl * ndev)102 static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
103 {
104 struct mgmt_mbox_chann_info info_regs;
105 struct xdna_mailbox_chann_res *i2x;
106 struct xdna_mailbox_chann_res *x2i;
107 u32 addr, off;
108 u32 *reg;
109 int ret;
110 int i;
111
112 /*
113 * Once firmware is alive, it will write management channel
114 * information in SRAM BAR and write the address of that information
115 * at FW_ALIVE_OFF offset in SRMA BAR.
116 *
117 * Read a non-zero value from FW_ALIVE_OFF implies that firmware
118 * is alive.
119 */
120 ret = readx_poll_timeout(readl, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF),
121 addr, addr, AIE2_INTERVAL, AIE2_TIMEOUT);
122 if (ret || !addr)
123 return -ETIME;
124
125 off = AIE2_SRAM_OFF(ndev, addr);
126 reg = (u32 *)&info_regs;
127 for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
128 reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
129
130 if (info_regs.magic != MGMT_MBOX_MAGIC) {
131 XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
132 ret = -EINVAL;
133 goto done;
134 }
135
136 i2x = &ndev->mgmt_i2x;
137 x2i = &ndev->mgmt_x2i;
138
139 i2x->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_head);
140 i2x->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.i2x_tail);
141 i2x->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.i2x_buf);
142 i2x->rb_size = info_regs.i2x_buf_sz;
143
144 x2i->mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_head);
145 x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
146 x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
147 x2i->rb_size = info_regs.x2i_buf_sz;
148
149 ndev->mgmt_chan_idx = info_regs.msi_id;
150 ndev->mgmt_prot_major = info_regs.prot_major;
151 ndev->mgmt_prot_minor = info_regs.prot_minor;
152
153 ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
154
155 done:
156 aie2_dump_chann_info_debug(ndev);
157
158 /* Must clear address at FW_ALIVE_OFF */
159 writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
160
161 return ret;
162 }
163
aie2_runtime_cfg(struct amdxdna_dev_hdl * ndev,enum rt_config_category category,u32 * val)164 int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
165 enum rt_config_category category, u32 *val)
166 {
167 const struct rt_config *cfg;
168 u32 value;
169 int ret;
170
171 for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
172 if (cfg->category != category)
173 continue;
174
175 if (cfg->feature_mask &&
176 bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX))
177 continue;
178
179 value = val ? *val : cfg->value;
180 ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
181 if (ret) {
182 XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
183 cfg->type, value);
184 return ret;
185 }
186 }
187
188 return 0;
189 }
190
aie2_xdna_reset(struct amdxdna_dev_hdl * ndev)191 static int aie2_xdna_reset(struct amdxdna_dev_hdl *ndev)
192 {
193 int ret;
194
195 ret = aie2_suspend_fw(ndev);
196 if (ret) {
197 XDNA_ERR(ndev->xdna, "Suspend firmware failed");
198 return ret;
199 }
200
201 ret = aie2_resume_fw(ndev);
202 if (ret) {
203 XDNA_ERR(ndev->xdna, "Resume firmware failed");
204 return ret;
205 }
206
207 return 0;
208 }
209
aie2_mgmt_fw_init(struct amdxdna_dev_hdl * ndev)210 static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
211 {
212 int ret;
213
214 ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
215 if (ret) {
216 XDNA_ERR(ndev->xdna, "Runtime config failed");
217 return ret;
218 }
219
220 ret = aie2_assign_mgmt_pasid(ndev, 0);
221 if (ret) {
222 XDNA_ERR(ndev->xdna, "Can not assign PASID");
223 return ret;
224 }
225
226 ret = aie2_xdna_reset(ndev);
227 if (ret) {
228 XDNA_ERR(ndev->xdna, "Reset firmware failed");
229 return ret;
230 }
231
232 return 0;
233 }
234
aie2_mgmt_fw_query(struct amdxdna_dev_hdl * ndev)235 static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
236 {
237 int ret;
238
239 ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
240 if (ret) {
241 XDNA_ERR(ndev->xdna, "query firmware version failed");
242 return ret;
243 }
244
245 ret = aie2_query_aie_version(ndev, &ndev->version);
246 if (ret) {
247 XDNA_ERR(ndev->xdna, "Query AIE version failed");
248 return ret;
249 }
250
251 ret = aie2_query_aie_metadata(ndev, &ndev->metadata);
252 if (ret) {
253 XDNA_ERR(ndev->xdna, "Query AIE metadata failed");
254 return ret;
255 }
256
257 ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
258
259 return 0;
260 }
261
aie2_mgmt_fw_fini(struct amdxdna_dev_hdl * ndev)262 static void aie2_mgmt_fw_fini(struct amdxdna_dev_hdl *ndev)
263 {
264 if (aie2_suspend_fw(ndev))
265 XDNA_ERR(ndev->xdna, "Suspend_fw failed");
266 XDNA_DBG(ndev->xdna, "Firmware suspended");
267 }
268
aie2_xrs_load(void * cb_arg,struct xrs_action_load * action)269 static int aie2_xrs_load(void *cb_arg, struct xrs_action_load *action)
270 {
271 struct amdxdna_hwctx *hwctx = cb_arg;
272 struct amdxdna_dev *xdna;
273 int ret;
274
275 xdna = hwctx->client->xdna;
276
277 hwctx->start_col = action->part.start_col;
278 hwctx->num_col = action->part.ncols;
279 ret = aie2_create_context(xdna->dev_handle, hwctx);
280 if (ret)
281 XDNA_ERR(xdna, "create context failed, ret %d", ret);
282
283 return ret;
284 }
285
aie2_xrs_unload(void * cb_arg)286 static int aie2_xrs_unload(void *cb_arg)
287 {
288 struct amdxdna_hwctx *hwctx = cb_arg;
289 struct amdxdna_dev *xdna;
290 int ret;
291
292 xdna = hwctx->client->xdna;
293
294 ret = aie2_destroy_context(xdna->dev_handle, hwctx);
295 if (ret)
296 XDNA_ERR(xdna, "destroy context failed, ret %d", ret);
297
298 return ret;
299 }
300
aie2_xrs_set_dft_dpm_level(struct drm_device * ddev,u32 dpm_level)301 static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
302 {
303 struct amdxdna_dev *xdna = to_xdna_dev(ddev);
304 struct amdxdna_dev_hdl *ndev;
305
306 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
307
308 ndev = xdna->dev_handle;
309 ndev->dft_dpm_level = dpm_level;
310 if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
311 return 0;
312
313 return aie2_pm_set_dpm(ndev, dpm_level);
314 }
315
316 static struct xrs_action_ops aie2_xrs_actions = {
317 .load = aie2_xrs_load,
318 .unload = aie2_xrs_unload,
319 .set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
320 };
321
aie2_hw_stop(struct amdxdna_dev * xdna)322 static void aie2_hw_stop(struct amdxdna_dev *xdna)
323 {
324 struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
325 struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
326
327 if (ndev->dev_status <= AIE2_DEV_INIT) {
328 XDNA_ERR(xdna, "device is already stopped");
329 return;
330 }
331
332 aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, NULL);
333 aie2_mgmt_fw_fini(ndev);
334 aie2_destroy_mgmt_chann(ndev);
335 drmm_kfree(&xdna->ddev, ndev->mbox);
336 ndev->mbox = NULL;
337 aie2_psp_stop(ndev->psp_hdl);
338 aie2_smu_fini(ndev);
339 aie2_error_async_events_free(ndev);
340 pci_disable_device(pdev);
341
342 ndev->dev_status = AIE2_DEV_INIT;
343 }
344
aie2_hw_start(struct amdxdna_dev * xdna)345 static int aie2_hw_start(struct amdxdna_dev *xdna)
346 {
347 struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
348 struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
349 struct xdna_mailbox_res mbox_res;
350 u32 xdna_mailbox_intr_reg;
351 int mgmt_mb_irq, ret;
352
353 if (ndev->dev_status >= AIE2_DEV_START) {
354 XDNA_INFO(xdna, "device is already started");
355 return 0;
356 }
357
358 ret = pci_enable_device(pdev);
359 if (ret) {
360 XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
361 return ret;
362 }
363 pci_set_master(pdev);
364
365 mbox_res.ringbuf_base = ndev->sram_base;
366 mbox_res.ringbuf_size = pci_resource_len(pdev, xdna->dev_info->sram_bar);
367 mbox_res.mbox_base = ndev->mbox_base;
368 mbox_res.mbox_size = MBOX_SIZE(ndev);
369 mbox_res.name = "xdna_mailbox";
370 ndev->mbox = xdnam_mailbox_create(&xdna->ddev, &mbox_res);
371 if (!ndev->mbox) {
372 XDNA_ERR(xdna, "failed to create mailbox device");
373 ret = -ENODEV;
374 goto disable_dev;
375 }
376
377 ndev->mgmt_chann = xdna_mailbox_alloc_channel(ndev->mbox);
378 if (!ndev->mgmt_chann) {
379 XDNA_ERR(xdna, "failed to alloc channel");
380 ret = -ENODEV;
381 goto disable_dev;
382 }
383
384 ret = aie2_smu_init(ndev);
385 if (ret) {
386 XDNA_ERR(xdna, "failed to init smu, ret %d", ret);
387 goto free_channel;
388 }
389
390 ret = aie2_psp_start(ndev->psp_hdl);
391 if (ret) {
392 XDNA_ERR(xdna, "failed to start psp, ret %d", ret);
393 goto fini_smu;
394 }
395
396 ret = aie2_get_mgmt_chann_info(ndev);
397 if (ret) {
398 XDNA_ERR(xdna, "firmware is not alive");
399 goto stop_psp;
400 }
401
402 mgmt_mb_irq = pci_irq_vector(pdev, ndev->mgmt_chan_idx);
403 if (mgmt_mb_irq < 0) {
404 ret = mgmt_mb_irq;
405 XDNA_ERR(xdna, "failed to alloc irq vector, ret %d", ret);
406 goto stop_psp;
407 }
408
409 xdna_mailbox_intr_reg = ndev->mgmt_i2x.mb_head_ptr_reg + 4;
410 ret = xdna_mailbox_start_channel(ndev->mgmt_chann,
411 &ndev->mgmt_x2i,
412 &ndev->mgmt_i2x,
413 xdna_mailbox_intr_reg,
414 mgmt_mb_irq);
415 if (ret) {
416 XDNA_ERR(xdna, "failed to start management mailbox channel");
417 ret = -EINVAL;
418 goto stop_psp;
419 }
420
421 ret = aie2_mgmt_fw_init(ndev);
422 if (ret) {
423 XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
424 goto stop_fw;
425 }
426
427 ret = aie2_pm_init(ndev);
428 if (ret) {
429 XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
430 goto stop_fw;
431 }
432
433 ret = aie2_mgmt_fw_query(ndev);
434 if (ret) {
435 XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
436 goto stop_fw;
437 }
438
439 ret = aie2_error_async_events_alloc(ndev);
440 if (ret) {
441 XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
442 goto stop_fw;
443 }
444
445 ndev->dev_status = AIE2_DEV_START;
446
447 return 0;
448
449 stop_fw:
450 aie2_suspend_fw(ndev);
451 xdna_mailbox_stop_channel(ndev->mgmt_chann);
452 stop_psp:
453 aie2_psp_stop(ndev->psp_hdl);
454 fini_smu:
455 aie2_smu_fini(ndev);
456 free_channel:
457 xdna_mailbox_free_channel(ndev->mgmt_chann);
458 ndev->mgmt_chann = NULL;
459 disable_dev:
460 pci_disable_device(pdev);
461
462 return ret;
463 }
464
aie2_hw_suspend(struct amdxdna_dev * xdna)465 static int aie2_hw_suspend(struct amdxdna_dev *xdna)
466 {
467 struct amdxdna_client *client;
468
469 list_for_each_entry(client, &xdna->client_list, node)
470 aie2_hwctx_suspend(client);
471
472 aie2_hw_stop(xdna);
473
474 return 0;
475 }
476
aie2_hw_resume(struct amdxdna_dev * xdna)477 static int aie2_hw_resume(struct amdxdna_dev *xdna)
478 {
479 struct amdxdna_client *client;
480 int ret;
481
482 ret = aie2_hw_start(xdna);
483 if (ret) {
484 XDNA_ERR(xdna, "Start hardware failed, %d", ret);
485 return ret;
486 }
487
488 list_for_each_entry(client, &xdna->client_list, node) {
489 ret = aie2_hwctx_resume(client);
490 if (ret)
491 break;
492 }
493
494 return ret;
495 }
496
aie2_init(struct amdxdna_dev * xdna)497 static int aie2_init(struct amdxdna_dev *xdna)
498 {
499 struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
500 void __iomem *tbl[PCI_NUM_RESOURCES] = {0};
501 struct init_config xrs_cfg = { 0 };
502 struct amdxdna_dev_hdl *ndev;
503 struct psp_config psp_conf;
504 const struct firmware *fw;
505 unsigned long bars = 0;
506 char *fw_full_path;
507 int i, nvec, ret;
508
509 if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
510 XDNA_ERR(xdna, "Running under hypervisor not supported");
511 return -EINVAL;
512 }
513
514 ndev = drmm_kzalloc(&xdna->ddev, sizeof(*ndev), GFP_KERNEL);
515 if (!ndev)
516 return -ENOMEM;
517
518 ndev->priv = xdna->dev_info->dev_priv;
519 ndev->xdna = xdna;
520
521 for (i = 0; i < ARRAY_SIZE(npu_fw); i++) {
522 fw_full_path = kasprintf(GFP_KERNEL, "%s%s", ndev->priv->fw_path, npu_fw[i]);
523 if (!fw_full_path)
524 return -ENOMEM;
525
526 ret = firmware_request_nowarn(&fw, fw_full_path, &pdev->dev);
527 kfree(fw_full_path);
528 if (!ret) {
529 XDNA_INFO(xdna, "Load firmware %s%s", ndev->priv->fw_path, npu_fw[i]);
530 break;
531 }
532 }
533
534 if (ret) {
535 XDNA_ERR(xdna, "failed to request_firmware %s, ret %d",
536 ndev->priv->fw_path, ret);
537 return ret;
538 }
539
540 ret = pcim_enable_device(pdev);
541 if (ret) {
542 XDNA_ERR(xdna, "pcim enable device failed, ret %d", ret);
543 goto release_fw;
544 }
545
546 for (i = 0; i < PSP_MAX_REGS; i++)
547 set_bit(PSP_REG_BAR(ndev, i), &bars);
548
549 set_bit(xdna->dev_info->sram_bar, &bars);
550 set_bit(xdna->dev_info->smu_bar, &bars);
551 set_bit(xdna->dev_info->mbox_bar, &bars);
552
553 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
554 if (!test_bit(i, &bars))
555 continue;
556 tbl[i] = pcim_iomap(pdev, i, 0);
557 if (!tbl[i]) {
558 XDNA_ERR(xdna, "map bar %d failed", i);
559 ret = -ENOMEM;
560 goto release_fw;
561 }
562 }
563
564 ndev->sram_base = tbl[xdna->dev_info->sram_bar];
565 ndev->smu_base = tbl[xdna->dev_info->smu_bar];
566 ndev->mbox_base = tbl[xdna->dev_info->mbox_bar];
567
568 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
569 if (ret) {
570 XDNA_ERR(xdna, "Failed to set DMA mask: %d", ret);
571 goto release_fw;
572 }
573
574 nvec = pci_msix_vec_count(pdev);
575 if (nvec <= 0) {
576 XDNA_ERR(xdna, "does not get number of interrupt vector");
577 ret = -EINVAL;
578 goto release_fw;
579 }
580
581 ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
582 if (ret < 0) {
583 XDNA_ERR(xdna, "failed to alloc irq vectors, ret %d", ret);
584 goto release_fw;
585 }
586
587 psp_conf.fw_size = fw->size;
588 psp_conf.fw_buf = fw->data;
589 for (i = 0; i < PSP_MAX_REGS; i++)
590 psp_conf.psp_regs[i] = tbl[PSP_REG_BAR(ndev, i)] + PSP_REG_OFF(ndev, i);
591 ndev->psp_hdl = aie2m_psp_create(&xdna->ddev, &psp_conf);
592 if (!ndev->psp_hdl) {
593 XDNA_ERR(xdna, "failed to create psp");
594 ret = -ENOMEM;
595 goto release_fw;
596 }
597 xdna->dev_handle = ndev;
598
599 ret = aie2_hw_start(xdna);
600 if (ret) {
601 XDNA_ERR(xdna, "start npu failed, ret %d", ret);
602 goto release_fw;
603 }
604
605 xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
606 for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
607 xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
608 xrs_cfg.sys_eff_factor = 1;
609 xrs_cfg.ddev = &xdna->ddev;
610 xrs_cfg.actions = &aie2_xrs_actions;
611 xrs_cfg.total_col = ndev->total_col;
612
613 xdna->xrs_hdl = xrsm_init(&xrs_cfg);
614 if (!xdna->xrs_hdl) {
615 XDNA_ERR(xdna, "Initialize resolver failed");
616 ret = -EINVAL;
617 goto stop_hw;
618 }
619
620 release_firmware(fw);
621 aie2_msg_init(ndev);
622 amdxdna_pm_init(xdna);
623 return 0;
624
625 stop_hw:
626 aie2_hw_stop(xdna);
627 release_fw:
628 release_firmware(fw);
629
630 return ret;
631 }
632
aie2_fini(struct amdxdna_dev * xdna)633 static void aie2_fini(struct amdxdna_dev *xdna)
634 {
635 amdxdna_pm_fini(xdna);
636 aie2_hw_stop(xdna);
637 }
638
aie2_get_aie_status(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)639 static int aie2_get_aie_status(struct amdxdna_client *client,
640 struct amdxdna_drm_get_info *args)
641 {
642 struct amdxdna_drm_query_aie_status status;
643 struct amdxdna_dev *xdna = client->xdna;
644 struct amdxdna_dev_hdl *ndev;
645 int ret;
646
647 ndev = xdna->dev_handle;
648 if (copy_from_user(&status, u64_to_user_ptr(args->buffer), sizeof(status))) {
649 XDNA_ERR(xdna, "Failed to copy AIE request into kernel");
650 return -EFAULT;
651 }
652
653 if (ndev->metadata.cols * ndev->metadata.size < status.buffer_size) {
654 XDNA_ERR(xdna, "Invalid buffer size. Given Size: %u. Need Size: %u.",
655 status.buffer_size, ndev->metadata.cols * ndev->metadata.size);
656 return -EINVAL;
657 }
658
659 ret = aie2_query_status(ndev, u64_to_user_ptr(status.buffer),
660 status.buffer_size, &status.cols_filled);
661 if (ret) {
662 XDNA_ERR(xdna, "Failed to get AIE status info. Ret: %d", ret);
663 return ret;
664 }
665
666 if (copy_to_user(u64_to_user_ptr(args->buffer), &status, sizeof(status))) {
667 XDNA_ERR(xdna, "Failed to copy AIE request info to user space");
668 return -EFAULT;
669 }
670
671 return 0;
672 }
673
aie2_get_aie_metadata(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)674 static int aie2_get_aie_metadata(struct amdxdna_client *client,
675 struct amdxdna_drm_get_info *args)
676 {
677 struct amdxdna_drm_query_aie_metadata *meta;
678 struct amdxdna_dev *xdna = client->xdna;
679 struct amdxdna_dev_hdl *ndev;
680 int ret = 0;
681
682 ndev = xdna->dev_handle;
683 meta = kzalloc_obj(*meta);
684 if (!meta)
685 return -ENOMEM;
686
687 meta->col_size = ndev->metadata.size;
688 meta->cols = ndev->metadata.cols;
689 meta->rows = ndev->metadata.rows;
690
691 meta->version.major = ndev->metadata.version.major;
692 meta->version.minor = ndev->metadata.version.minor;
693
694 meta->core.row_count = ndev->metadata.core.row_count;
695 meta->core.row_start = ndev->metadata.core.row_start;
696 meta->core.dma_channel_count = ndev->metadata.core.dma_channel_count;
697 meta->core.lock_count = ndev->metadata.core.lock_count;
698 meta->core.event_reg_count = ndev->metadata.core.event_reg_count;
699
700 meta->mem.row_count = ndev->metadata.mem.row_count;
701 meta->mem.row_start = ndev->metadata.mem.row_start;
702 meta->mem.dma_channel_count = ndev->metadata.mem.dma_channel_count;
703 meta->mem.lock_count = ndev->metadata.mem.lock_count;
704 meta->mem.event_reg_count = ndev->metadata.mem.event_reg_count;
705
706 meta->shim.row_count = ndev->metadata.shim.row_count;
707 meta->shim.row_start = ndev->metadata.shim.row_start;
708 meta->shim.dma_channel_count = ndev->metadata.shim.dma_channel_count;
709 meta->shim.lock_count = ndev->metadata.shim.lock_count;
710 meta->shim.event_reg_count = ndev->metadata.shim.event_reg_count;
711
712 if (copy_to_user(u64_to_user_ptr(args->buffer), meta, sizeof(*meta)))
713 ret = -EFAULT;
714
715 kfree(meta);
716 return ret;
717 }
718
aie2_get_aie_version(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)719 static int aie2_get_aie_version(struct amdxdna_client *client,
720 struct amdxdna_drm_get_info *args)
721 {
722 struct amdxdna_drm_query_aie_version version;
723 struct amdxdna_dev *xdna = client->xdna;
724 struct amdxdna_dev_hdl *ndev;
725
726 ndev = xdna->dev_handle;
727 version.major = ndev->version.major;
728 version.minor = ndev->version.minor;
729
730 if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
731 return -EFAULT;
732
733 return 0;
734 }
735
aie2_get_firmware_version(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)736 static int aie2_get_firmware_version(struct amdxdna_client *client,
737 struct amdxdna_drm_get_info *args)
738 {
739 struct amdxdna_drm_query_firmware_version version;
740 struct amdxdna_dev *xdna = client->xdna;
741
742 version.major = xdna->fw_ver.major;
743 version.minor = xdna->fw_ver.minor;
744 version.patch = xdna->fw_ver.sub;
745 version.build = xdna->fw_ver.build;
746
747 if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
748 return -EFAULT;
749
750 return 0;
751 }
752
aie2_get_power_mode(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)753 static int aie2_get_power_mode(struct amdxdna_client *client,
754 struct amdxdna_drm_get_info *args)
755 {
756 struct amdxdna_drm_get_power_mode mode = {};
757 struct amdxdna_dev *xdna = client->xdna;
758 struct amdxdna_dev_hdl *ndev;
759
760 ndev = xdna->dev_handle;
761 mode.power_mode = ndev->pw_mode;
762
763 if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
764 return -EFAULT;
765
766 return 0;
767 }
768
aie2_get_clock_metadata(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)769 static int aie2_get_clock_metadata(struct amdxdna_client *client,
770 struct amdxdna_drm_get_info *args)
771 {
772 struct amdxdna_drm_query_clock_metadata *clock;
773 struct amdxdna_dev *xdna = client->xdna;
774 struct amdxdna_dev_hdl *ndev;
775 int ret = 0;
776
777 ndev = xdna->dev_handle;
778 clock = kzalloc_obj(*clock);
779 if (!clock)
780 return -ENOMEM;
781
782 snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
783 "MP-NPU Clock");
784 clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
785 snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
786 clock->h_clock.freq_mhz = ndev->hclk_freq;
787
788 if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
789 ret = -EFAULT;
790
791 kfree(clock);
792 return ret;
793 }
794
aie2_get_sensors(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)795 static int aie2_get_sensors(struct amdxdna_client *client,
796 struct amdxdna_drm_get_info *args)
797 {
798 struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
799 struct amdxdna_drm_query_sensor sensor = {};
800 struct amd_pmf_npu_metrics npu_metrics;
801 u32 sensors_count = 0, i;
802 int ret;
803
804 ret = AIE2_GET_PMF_NPU_METRICS(&npu_metrics);
805 if (ret)
806 return ret;
807
808 sensor.type = AMDXDNA_SENSOR_TYPE_POWER;
809 sensor.input = npu_metrics.npu_power;
810 sensor.unitm = -3;
811 scnprintf(sensor.label, sizeof(sensor.label), "Total Power");
812 scnprintf(sensor.units, sizeof(sensor.units), "mW");
813
814 if (copy_to_user(u64_to_user_ptr(args->buffer), &sensor, sizeof(sensor)))
815 return -EFAULT;
816
817 sensors_count++;
818 if (args->buffer_size <= sensors_count * sizeof(sensor))
819 goto out;
820
821 for (i = 0; i < min_t(u32, ndev->total_col, 8); i++) {
822 memset(&sensor, 0, sizeof(sensor));
823 sensor.input = npu_metrics.npu_busy[i];
824 sensor.type = AMDXDNA_SENSOR_TYPE_COLUMN_UTILIZATION;
825 sensor.unitm = 0;
826 scnprintf(sensor.label, sizeof(sensor.label), "Column %d Utilization", i);
827 scnprintf(sensor.units, sizeof(sensor.units), "%%");
828
829 if (copy_to_user(u64_to_user_ptr(args->buffer) + sensors_count * sizeof(sensor),
830 &sensor, sizeof(sensor)))
831 return -EFAULT;
832
833 sensors_count++;
834 if (args->buffer_size <= sensors_count * sizeof(sensor))
835 goto out;
836 }
837
838 out:
839 args->buffer_size = sensors_count * sizeof(sensor);
840
841 return 0;
842 }
843
aie2_hwctx_status_cb(struct amdxdna_hwctx * hwctx,void * arg)844 static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
845 {
846 struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
847 struct amdxdna_drm_get_array *array_args = arg;
848 struct amdxdna_drm_hwctx_entry __user *buf;
849 struct app_health_report report;
850 struct amdxdna_dev_hdl *ndev;
851 u32 size;
852 int ret;
853
854 if (!array_args->num_element)
855 return -EINVAL;
856
857 tmp = kzalloc_obj(*tmp);
858 if (!tmp)
859 return -ENOMEM;
860
861 tmp->pid = hwctx->client->pid;
862 tmp->context_id = hwctx->id;
863 tmp->start_col = hwctx->start_col;
864 tmp->num_col = hwctx->num_col;
865 tmp->command_submissions = hwctx->priv->seq;
866 tmp->command_completions = hwctx->priv->completed;
867 tmp->pasid = hwctx->client->pasid;
868 tmp->heap_usage = hwctx->client->heap_usage;
869 tmp->priority = hwctx->qos.priority;
870 tmp->gops = hwctx->qos.gops;
871 tmp->fps = hwctx->qos.fps;
872 tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
873 tmp->latency = hwctx->qos.latency;
874 tmp->frame_exec_time = hwctx->qos.frame_exec_time;
875 tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
876 ndev = hwctx->client->xdna->dev_handle;
877 ret = aie2_query_app_health(ndev, hwctx->fw_ctx_id, &report);
878 if (!ret) {
879 /* Fill in app health report fields */
880 tmp->txn_op_idx = report.txn_op_id;
881 tmp->ctx_pc = report.ctx_pc;
882 tmp->fatal_error_type = report.fatal_info.fatal_type;
883 tmp->fatal_error_exception_type = report.fatal_info.exception_type;
884 tmp->fatal_error_exception_pc = report.fatal_info.exception_pc;
885 tmp->fatal_error_app_module = report.fatal_info.app_module;
886 }
887
888 buf = u64_to_user_ptr(array_args->buffer);
889 size = min(sizeof(*tmp), array_args->element_size);
890
891 if (copy_to_user(buf, tmp, size))
892 return -EFAULT;
893
894 array_args->buffer += size;
895 array_args->num_element--;
896
897 return 0;
898 }
899
aie2_get_hwctx_status(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)900 static int aie2_get_hwctx_status(struct amdxdna_client *client,
901 struct amdxdna_drm_get_info *args)
902 {
903 struct amdxdna_drm_get_array array_args;
904 struct amdxdna_dev *xdna = client->xdna;
905 struct amdxdna_client *tmp_client;
906 int ret;
907
908 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
909
910 array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
911 array_args.buffer = args->buffer;
912 array_args.num_element = args->buffer_size / array_args.element_size;
913 list_for_each_entry(tmp_client, &xdna->client_list, node) {
914 ret = amdxdna_hwctx_walk(tmp_client, &array_args,
915 aie2_hwctx_status_cb);
916 if (ret)
917 break;
918 }
919
920 args->buffer_size -= (u32)(array_args.buffer - args->buffer);
921 return 0;
922 }
923
aie2_query_resource_info(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)924 static int aie2_query_resource_info(struct amdxdna_client *client,
925 struct amdxdna_drm_get_info *args)
926 {
927 struct amdxdna_drm_get_resource_info res_info;
928 const struct amdxdna_dev_priv *priv;
929 struct amdxdna_dev_hdl *ndev;
930 struct amdxdna_dev *xdna;
931
932 xdna = client->xdna;
933 ndev = xdna->dev_handle;
934 priv = ndev->priv;
935
936 res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk;
937 res_info.npu_tops_max = ndev->max_tops;
938 res_info.npu_task_max = priv->hwctx_limit;
939 res_info.npu_tops_curr = ndev->curr_tops;
940 res_info.npu_task_curr = ndev->hwctx_num;
941
942 if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info)))
943 return -EFAULT;
944
945 return 0;
946 }
947
aie2_fill_hwctx_map(struct amdxdna_hwctx * hwctx,void * arg)948 static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg)
949 {
950 struct amdxdna_dev *xdna = hwctx->client->xdna;
951 u32 *map = arg;
952
953 if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) {
954 XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id,
955 xdna->dev_handle->priv->hwctx_limit);
956 return -EINVAL;
957 }
958
959 map[hwctx->fw_ctx_id] = hwctx->id;
960 return 0;
961 }
962
aie2_get_telemetry(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)963 static int aie2_get_telemetry(struct amdxdna_client *client,
964 struct amdxdna_drm_get_info *args)
965 {
966 struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL;
967 u32 telemetry_data_sz, header_sz, elem_num;
968 struct amdxdna_dev *xdna = client->xdna;
969 struct amdxdna_client *tmp_client;
970 int ret;
971
972 elem_num = xdna->dev_handle->priv->hwctx_limit;
973 header_sz = struct_size(header, map, elem_num);
974 if (args->buffer_size <= header_sz) {
975 XDNA_ERR(xdna, "Invalid buffer size");
976 return -EINVAL;
977 }
978
979 telemetry_data_sz = args->buffer_size - header_sz;
980 if (telemetry_data_sz > SZ_4M) {
981 XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz);
982 return -EINVAL;
983 }
984
985 header = kzalloc(header_sz, GFP_KERNEL);
986 if (!header)
987 return -ENOMEM;
988
989 if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) {
990 XDNA_ERR(xdna, "Failed to copy telemetry header from user");
991 return -EFAULT;
992 }
993
994 header->map_num_elements = elem_num;
995 list_for_each_entry(tmp_client, &xdna->client_list, node) {
996 ret = amdxdna_hwctx_walk(tmp_client, &header->map,
997 aie2_fill_hwctx_map);
998 if (ret)
999 return ret;
1000 }
1001
1002 ret = aie2_query_telemetry(xdna->dev_handle,
1003 u64_to_user_ptr(args->buffer + header_sz),
1004 telemetry_data_sz, header);
1005 if (ret) {
1006 XDNA_ERR(xdna, "Query telemetry failed ret %d", ret);
1007 return ret;
1008 }
1009
1010 if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) {
1011 XDNA_ERR(xdna, "Copy header failed");
1012 return -EFAULT;
1013 }
1014
1015 return 0;
1016 }
1017
aie2_get_preempt_state(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)1018 static int aie2_get_preempt_state(struct amdxdna_client *client,
1019 struct amdxdna_drm_get_info *args)
1020 {
1021 struct amdxdna_drm_attribute_state state = {};
1022 struct amdxdna_dev *xdna = client->xdna;
1023 struct amdxdna_dev_hdl *ndev;
1024
1025 ndev = xdna->dev_handle;
1026 if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE)
1027 state.state = ndev->force_preempt_enabled;
1028 else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE)
1029 state.state = ndev->frame_boundary_preempt;
1030
1031 if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state)))
1032 return -EFAULT;
1033
1034 return 0;
1035 }
1036
aie2_get_info(struct amdxdna_client * client,struct amdxdna_drm_get_info * args)1037 static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
1038 {
1039 struct amdxdna_dev *xdna = client->xdna;
1040 int ret, idx;
1041
1042 if (!drm_dev_enter(&xdna->ddev, &idx))
1043 return -ENODEV;
1044
1045 ret = amdxdna_pm_resume_get_locked(xdna);
1046 if (ret)
1047 goto dev_exit;
1048
1049 switch (args->param) {
1050 case DRM_AMDXDNA_QUERY_AIE_STATUS:
1051 ret = aie2_get_aie_status(client, args);
1052 break;
1053 case DRM_AMDXDNA_QUERY_AIE_METADATA:
1054 ret = aie2_get_aie_metadata(client, args);
1055 break;
1056 case DRM_AMDXDNA_QUERY_AIE_VERSION:
1057 ret = aie2_get_aie_version(client, args);
1058 break;
1059 case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
1060 ret = aie2_get_clock_metadata(client, args);
1061 break;
1062 case DRM_AMDXDNA_QUERY_SENSORS:
1063 ret = aie2_get_sensors(client, args);
1064 break;
1065 case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
1066 ret = aie2_get_hwctx_status(client, args);
1067 break;
1068 case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
1069 ret = aie2_get_firmware_version(client, args);
1070 break;
1071 case DRM_AMDXDNA_GET_POWER_MODE:
1072 ret = aie2_get_power_mode(client, args);
1073 break;
1074 case DRM_AMDXDNA_QUERY_TELEMETRY:
1075 ret = aie2_get_telemetry(client, args);
1076 break;
1077 case DRM_AMDXDNA_QUERY_RESOURCE_INFO:
1078 ret = aie2_query_resource_info(client, args);
1079 break;
1080 case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE:
1081 case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE:
1082 ret = aie2_get_preempt_state(client, args);
1083 break;
1084 default:
1085 XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1086 ret = -EOPNOTSUPP;
1087 }
1088
1089 amdxdna_pm_suspend_put(xdna);
1090 XDNA_DBG(xdna, "Got param %d", args->param);
1091
1092 dev_exit:
1093 drm_dev_exit(idx);
1094 return ret;
1095 }
1096
aie2_query_ctx_status_array(struct amdxdna_client * client,struct amdxdna_drm_get_array * args)1097 static int aie2_query_ctx_status_array(struct amdxdna_client *client,
1098 struct amdxdna_drm_get_array *args)
1099 {
1100 struct amdxdna_drm_get_array array_args;
1101 struct amdxdna_dev *xdna = client->xdna;
1102 struct amdxdna_client *tmp_client;
1103 int ret;
1104
1105 drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
1106
1107 if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
1108 XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
1109 args->element_size, args->num_element);
1110 return -EINVAL;
1111 }
1112
1113 array_args.element_size = min(args->element_size,
1114 sizeof(struct amdxdna_drm_hwctx_entry));
1115 array_args.buffer = args->buffer;
1116 array_args.num_element = args->num_element * args->element_size /
1117 array_args.element_size;
1118 list_for_each_entry(tmp_client, &xdna->client_list, node) {
1119 ret = amdxdna_hwctx_walk(tmp_client, &array_args,
1120 aie2_hwctx_status_cb);
1121 if (ret)
1122 break;
1123 }
1124
1125 args->element_size = array_args.element_size;
1126 args->num_element = (u32)((array_args.buffer - args->buffer) /
1127 args->element_size);
1128
1129 return 0;
1130 }
1131
aie2_get_array(struct amdxdna_client * client,struct amdxdna_drm_get_array * args)1132 static int aie2_get_array(struct amdxdna_client *client,
1133 struct amdxdna_drm_get_array *args)
1134 {
1135 struct amdxdna_dev *xdna = client->xdna;
1136 int ret, idx;
1137
1138 if (!drm_dev_enter(&xdna->ddev, &idx))
1139 return -ENODEV;
1140
1141 ret = amdxdna_pm_resume_get_locked(xdna);
1142 if (ret)
1143 goto dev_exit;
1144
1145 switch (args->param) {
1146 case DRM_AMDXDNA_HW_CONTEXT_ALL:
1147 ret = aie2_query_ctx_status_array(client, args);
1148 break;
1149 case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
1150 ret = aie2_get_array_async_error(xdna->dev_handle, args);
1151 break;
1152 case DRM_AMDXDNA_BO_USAGE:
1153 ret = amdxdna_drm_get_bo_usage(&xdna->ddev, args);
1154 break;
1155 default:
1156 XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1157 ret = -EOPNOTSUPP;
1158 }
1159
1160 amdxdna_pm_suspend_put(xdna);
1161 XDNA_DBG(xdna, "Got param %d", args->param);
1162
1163 dev_exit:
1164 drm_dev_exit(idx);
1165 return ret;
1166 }
1167
aie2_set_power_mode(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1168 static int aie2_set_power_mode(struct amdxdna_client *client,
1169 struct amdxdna_drm_set_state *args)
1170 {
1171 struct amdxdna_drm_set_power_mode power_state;
1172 enum amdxdna_power_mode_type power_mode;
1173 struct amdxdna_dev *xdna = client->xdna;
1174
1175 if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
1176 sizeof(power_state))) {
1177 XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
1178 return -EFAULT;
1179 }
1180
1181 if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
1182 return -EINVAL;
1183
1184 power_mode = power_state.power_mode;
1185 if (power_mode > POWER_MODE_TURBO) {
1186 XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
1187 return -EINVAL;
1188 }
1189
1190 return aie2_pm_set_mode(xdna->dev_handle, power_mode);
1191 }
1192
aie2_set_preempt_state(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1193 static int aie2_set_preempt_state(struct amdxdna_client *client,
1194 struct amdxdna_drm_set_state *args)
1195 {
1196 struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
1197 struct amdxdna_drm_attribute_state state;
1198 u32 val;
1199 int ret;
1200
1201 if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state)))
1202 return -EFAULT;
1203
1204 if (state.state > 1)
1205 return -EINVAL;
1206
1207 if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad)))
1208 return -EINVAL;
1209
1210 if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) {
1211 ndev->force_preempt_enabled = state.state;
1212 } else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) {
1213 val = state.state;
1214 ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
1215 &val);
1216 if (ret)
1217 return ret;
1218
1219 ndev->frame_boundary_preempt = state.state;
1220 }
1221
1222 return 0;
1223 }
1224
aie2_set_state(struct amdxdna_client * client,struct amdxdna_drm_set_state * args)1225 static int aie2_set_state(struct amdxdna_client *client,
1226 struct amdxdna_drm_set_state *args)
1227 {
1228 struct amdxdna_dev *xdna = client->xdna;
1229 int ret, idx;
1230
1231 if (!drm_dev_enter(&xdna->ddev, &idx))
1232 return -ENODEV;
1233
1234 ret = amdxdna_pm_resume_get_locked(xdna);
1235 if (ret)
1236 goto dev_exit;
1237
1238 switch (args->param) {
1239 case DRM_AMDXDNA_SET_POWER_MODE:
1240 ret = aie2_set_power_mode(client, args);
1241 break;
1242 case DRM_AMDXDNA_SET_FORCE_PREEMPT:
1243 case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT:
1244 ret = aie2_set_preempt_state(client, args);
1245 break;
1246 default:
1247 XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
1248 ret = -EOPNOTSUPP;
1249 break;
1250 }
1251
1252 amdxdna_pm_suspend_put(xdna);
1253 dev_exit:
1254 drm_dev_exit(idx);
1255 return ret;
1256 }
1257
1258 const struct amdxdna_dev_ops aie2_ops = {
1259 .init = aie2_init,
1260 .fini = aie2_fini,
1261 .resume = aie2_hw_resume,
1262 .suspend = aie2_hw_suspend,
1263 .get_aie_info = aie2_get_info,
1264 .set_aie_state = aie2_set_state,
1265 .hwctx_init = aie2_hwctx_init,
1266 .hwctx_fini = aie2_hwctx_fini,
1267 .hwctx_config = aie2_hwctx_config,
1268 .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
1269 .cmd_submit = aie2_cmd_submit,
1270 .hmm_invalidate = aie2_hmm_invalidate,
1271 .get_array = aie2_get_array,
1272 };
1273