1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4 */
5
6 #ifndef _AMDXDNA_CTX_H_
7 #define _AMDXDNA_CTX_H_
8
9 #include <linux/bitfield.h>
10
11 #include "amdxdna_gem.h"
12
13 struct amdxdna_hwctx_priv;
14
15 enum ert_cmd_opcode {
16 ERT_START_CU = 0,
17 ERT_CMD_CHAIN = 19,
18 ERT_START_NPU = 20,
19 };
20
21 enum ert_cmd_state {
22 ERT_CMD_STATE_INVALID,
23 ERT_CMD_STATE_NEW,
24 ERT_CMD_STATE_QUEUED,
25 ERT_CMD_STATE_RUNNING,
26 ERT_CMD_STATE_COMPLETED,
27 ERT_CMD_STATE_ERROR,
28 ERT_CMD_STATE_ABORT,
29 ERT_CMD_STATE_SUBMITTED,
30 ERT_CMD_STATE_TIMEOUT,
31 ERT_CMD_STATE_NORESPONSE,
32 };
33
34 /*
35 * Interpretation of the beginning of data payload for ERT_START_NPU in
36 * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
37 */
38 struct amdxdna_cmd_start_npu {
39 u64 buffer; /* instruction buffer address */
40 u32 buffer_size; /* size of buffer in bytes */
41 u32 prop_count; /* properties count */
42 u32 prop_args[]; /* properties and regular kernel arguments */
43 };
44
45 /*
46 * Interpretation of the beginning of data payload for ERT_CMD_CHAIN in
47 * amdxdna_cmd. The rest of the payload in amdxdna_cmd is cmd BO handles.
48 */
49 struct amdxdna_cmd_chain {
50 u32 command_count;
51 u32 submit_index;
52 u32 error_index;
53 u32 reserved[3];
54 u64 data[] __counted_by(command_count);
55 };
56
57 /* Exec buffer command header format */
58 #define AMDXDNA_CMD_STATE GENMASK(3, 0)
59 #define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
60 #define AMDXDNA_CMD_COUNT GENMASK(22, 12)
61 #define AMDXDNA_CMD_OPCODE GENMASK(27, 23)
62 struct amdxdna_cmd {
63 u32 header;
64 u32 data[];
65 };
66
67 struct amdxdna_hwctx {
68 struct amdxdna_client *client;
69 struct amdxdna_hwctx_priv *priv;
70 char *name;
71
72 u32 id;
73 u32 max_opc;
74 u32 num_tiles;
75 u32 mem_size;
76 u32 fw_ctx_id;
77 u32 col_list_len;
78 u32 *col_list;
79 u32 start_col;
80 u32 num_col;
81 #define HWCTX_STAT_INIT 0
82 #define HWCTX_STAT_READY 1
83 #define HWCTX_STAT_STOP 2
84 u32 status;
85 u32 old_status;
86
87 struct amdxdna_qos_info qos;
88 struct amdxdna_hwctx_param_config_cu *cus;
89 u32 syncobj_hdl;
90
91 atomic64_t job_submit_cnt;
92 atomic64_t job_free_cnt ____cacheline_aligned_in_smp;
93 };
94
95 #define drm_job_to_xdna_job(j) \
96 container_of(j, struct amdxdna_sched_job, base)
97
98 struct amdxdna_sched_job {
99 struct drm_sched_job base;
100 struct kref refcnt;
101 struct amdxdna_hwctx *hwctx;
102 struct mm_struct *mm;
103 /* The fence to notice DRM scheduler that job is done by hardware */
104 struct dma_fence *fence;
105 /* user can wait on this fence */
106 struct dma_fence *out_fence;
107 bool job_done;
108 u64 seq;
109 struct amdxdna_gem_obj *cmd_bo;
110 size_t bo_cnt;
111 struct drm_gem_object *bos[] __counted_by(bo_cnt);
112 };
113
114 static inline u32
amdxdna_cmd_get_op(struct amdxdna_gem_obj * abo)115 amdxdna_cmd_get_op(struct amdxdna_gem_obj *abo)
116 {
117 struct amdxdna_cmd *cmd = abo->mem.kva;
118
119 return FIELD_GET(AMDXDNA_CMD_OPCODE, cmd->header);
120 }
121
122 static inline void
amdxdna_cmd_set_state(struct amdxdna_gem_obj * abo,enum ert_cmd_state s)123 amdxdna_cmd_set_state(struct amdxdna_gem_obj *abo, enum ert_cmd_state s)
124 {
125 struct amdxdna_cmd *cmd = abo->mem.kva;
126
127 cmd->header &= ~AMDXDNA_CMD_STATE;
128 cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, s);
129 }
130
131 static inline enum ert_cmd_state
amdxdna_cmd_get_state(struct amdxdna_gem_obj * abo)132 amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
133 {
134 struct amdxdna_cmd *cmd = abo->mem.kva;
135
136 return FIELD_GET(AMDXDNA_CMD_STATE, cmd->header);
137 }
138
139 void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
140 int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
141
amdxdna_hwctx_col_map(struct amdxdna_hwctx * hwctx)142 static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
143 {
144 return GENMASK(hwctx->start_col + hwctx->num_col - 1,
145 hwctx->start_col);
146 }
147
148 void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
149 void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
150 void amdxdna_hwctx_suspend(struct amdxdna_client *client);
151 void amdxdna_hwctx_resume(struct amdxdna_client *client);
152
153 int amdxdna_cmd_submit(struct amdxdna_client *client,
154 u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
155 u32 hwctx_hdl, u64 *seq);
156
157 int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
158 u64 seq, u32 timeout);
159
160 int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
161 int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
162 int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
163 int amdxdna_drm_submit_cmd_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
164
165 #endif /* _AMDXDNA_CTX_H_ */
166