1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include <linux/firmware.h>
24 
25 #include "amdgpu.h"
26 #include "amdgpu_gfx.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "soc15_common.h"
30 #include "vega10_enum.h"
31 
32 #include "v9_structs.h"
33 
34 #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
35 
36 #include "gc/gc_9_4_3_offset.h"
37 #include "gc/gc_9_4_3_sh_mask.h"
38 
39 #include "gfx_v9_4_3.h"
40 #include "gfx_v9_4_3_cleaner_shader.h"
41 #include "amdgpu_xcp.h"
42 #include "amdgpu_aca.h"
43 
44 MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin");
45 MODULE_FIRMWARE("amdgpu/gc_9_4_4_mec.bin");
46 MODULE_FIRMWARE("amdgpu/gc_9_5_0_mec.bin");
47 MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
48 MODULE_FIRMWARE("amdgpu/gc_9_4_4_rlc.bin");
49 MODULE_FIRMWARE("amdgpu/gc_9_5_0_rlc.bin");
50 MODULE_FIRMWARE("amdgpu/gc_9_4_3_sjt_mec.bin");
51 MODULE_FIRMWARE("amdgpu/gc_9_4_4_sjt_mec.bin");
52 
53 #define GFX9_MEC_HPD_SIZE 4096
54 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
55 
56 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
57 #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
58 
59 #define XCC_REG_RANGE_0_LOW  0x2000     /* XCC gfxdec0 lower Bound */
60 #define XCC_REG_RANGE_0_HIGH 0x3400     /* XCC gfxdec0 upper Bound */
61 #define XCC_REG_RANGE_1_LOW  0xA000     /* XCC gfxdec1 lower Bound */
62 #define XCC_REG_RANGE_1_HIGH 0x10000    /* XCC gfxdec1 upper Bound */
63 
64 #define NORMALIZE_XCC_REG_OFFSET(offset) \
65 	(offset & 0xFFFF)
66 
67 static const struct amdgpu_hwip_reg_entry gc_reg_list_9_4_3[] = {
68 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS),
69 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS2),
70 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT1),
71 	SOC15_REG_ENTRY_STR(GC, 0, regCP_STALLED_STAT2),
72 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STALLED_STAT1),
73 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STALLED_STAT1),
74 	SOC15_REG_ENTRY_STR(GC, 0, regCP_BUSY_STAT),
75 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_BUSY_STAT),
76 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_BUSY_STAT),
77 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPF_STATUS),
78 	SOC15_REG_ENTRY_STR(GC, 0, regCP_GFX_ERROR),
79 	SOC15_REG_ENTRY_STR(GC, 0, regCPF_UTCL1_STATUS),
80 	SOC15_REG_ENTRY_STR(GC, 0, regCPC_UTCL1_STATUS),
81 	SOC15_REG_ENTRY_STR(GC, 0, regCPG_UTCL1_STATUS),
82 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_PROTECTION_FAULT),
83 	SOC15_REG_ENTRY_STR(GC, 0, regGDS_VM_PROTECTION_FAULT),
84 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_UTCL1_STATUS),
85 	SOC15_REG_ENTRY_STR(GC, 0, regRMI_UTCL1_STATUS),
86 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_DCACHE_UTCL1_STATUS),
87 	SOC15_REG_ENTRY_STR(GC, 0, regSQC_ICACHE_UTCL1_STATUS),
88 	SOC15_REG_ENTRY_STR(GC, 0, regSQ_UTCL1_STATUS),
89 	SOC15_REG_ENTRY_STR(GC, 0, regTCP_UTCL1_STATUS),
90 	SOC15_REG_ENTRY_STR(GC, 0, regWD_UTCL1_STATUS),
91 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_CNTL),
92 	SOC15_REG_ENTRY_STR(GC, 0, regVM_L2_PROTECTION_FAULT_STATUS),
93 	SOC15_REG_ENTRY_STR(GC, 0, regCP_DEBUG),
94 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_CNTL),
95 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC1_INSTR_PNTR),
96 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC2_INSTR_PNTR),
97 	SOC15_REG_ENTRY_STR(GC, 0, regCP_CPC_STATUS),
98 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_STAT),
99 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_COMMAND),
100 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_MESSAGE),
101 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_1),
102 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_ARGUMENT_2),
103 	SOC15_REG_ENTRY_STR(GC, 0, regSMU_RLC_RESPONSE),
104 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SAFE_MODE),
105 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_SMU_SAFE_MODE),
106 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_INT_STAT),
107 	SOC15_REG_ENTRY_STR(GC, 0, regRLC_GPM_GENERAL_6),
108 	/* cp header registers */
109 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME1_HEADER_DUMP),
110 	SOC15_REG_ENTRY_STR(GC, 0, regCP_MEC_ME2_HEADER_DUMP),
111 	/* SE status registers */
112 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE0),
113 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE1),
114 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE2),
115 	SOC15_REG_ENTRY_STR(GC, 0, regGRBM_STATUS_SE3)
116 };
117 
118 static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_9_4_3[] = {
119 	/* compute queue registers */
120 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_VMID),
121 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ACTIVE),
122 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PERSISTENT_STATE),
123 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PIPE_PRIORITY),
124 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUEUE_PRIORITY),
125 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_QUANTUM),
126 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE),
127 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_BASE_HI),
128 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_RPTR),
129 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
130 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
131 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL),
132 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_CONTROL),
133 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR),
134 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_BASE_ADDR_HI),
135 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_RPTR),
136 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_IB_CONTROL),
137 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_DEQUEUE_REQUEST),
138 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR),
139 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_BASE_ADDR_HI),
140 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_CONTROL),
141 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_RPTR),
142 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR),
143 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_EVENTS),
144 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_LO),
145 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_BASE_ADDR_HI),
146 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_CONTROL),
147 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_OFFSET),
148 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CNTL_STACK_SIZE),
149 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_WG_STATE_OFFSET),
150 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_CTX_SAVE_SIZE),
151 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GDS_RESOURCE_STATE),
152 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_ERROR),
153 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_EOP_WPTR_MEM),
154 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_LO),
155 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_PQ_WPTR_HI),
156 	SOC15_REG_ENTRY_STR(GC, 0, regCP_HQD_GFX_STATUS),
157 };
158 
159 struct amdgpu_gfx_ras gfx_v9_4_3_ras;
160 
161 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev);
162 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev);
163 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev);
164 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev);
165 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
166 				struct amdgpu_cu_info *cu_info);
167 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id);
168 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, int xcc_id);
169 
gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring * kiq_ring,uint64_t queue_mask)170 static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring,
171 				uint64_t queue_mask)
172 {
173 	struct amdgpu_device *adev = kiq_ring->adev;
174 	u64 shader_mc_addr;
175 
176 	/* Cleaner shader MC address */
177 	shader_mc_addr = adev->gfx.cleaner_shader_gpu_addr >> 8;
178 
179 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
180 	amdgpu_ring_write(kiq_ring,
181 		PACKET3_SET_RESOURCES_VMID_MASK(0) |
182 		/* vmid_mask:0* queue_type:0 (KIQ) */
183 		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
184 	amdgpu_ring_write(kiq_ring,
185 			lower_32_bits(queue_mask));	/* queue mask lo */
186 	amdgpu_ring_write(kiq_ring,
187 			upper_32_bits(queue_mask));	/* queue mask hi */
188 	amdgpu_ring_write(kiq_ring, lower_32_bits(shader_mc_addr)); /* cleaner shader addr lo */
189 	amdgpu_ring_write(kiq_ring, upper_32_bits(shader_mc_addr)); /* cleaner shader addr hi */
190 	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
191 	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
192 }
193 
gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring)194 static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring,
195 				 struct amdgpu_ring *ring)
196 {
197 	struct amdgpu_device *adev = kiq_ring->adev;
198 	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
199 	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
200 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
201 
202 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
203 	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
204 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
205 			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
206 			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
207 			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
208 			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
209 			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
210 			 /*queue_type: normal compute queue */
211 			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
212 			 /* alloc format: all_on_one_pipe */
213 			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
214 			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
215 			 /* num_queues: must be 1 */
216 			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
217 	amdgpu_ring_write(kiq_ring,
218 			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
219 	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
220 	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
221 	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
222 	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
223 }
224 
gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,enum amdgpu_unmap_queues_action action,u64 gpu_addr,u64 seq)225 static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
226 				   struct amdgpu_ring *ring,
227 				   enum amdgpu_unmap_queues_action action,
228 				   u64 gpu_addr, u64 seq)
229 {
230 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
231 
232 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
233 	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
234 			  PACKET3_UNMAP_QUEUES_ACTION(action) |
235 			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
236 			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
237 			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
238 	amdgpu_ring_write(kiq_ring,
239 			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
240 
241 	if (action == PREEMPT_QUEUES_NO_UNMAP) {
242 		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
243 		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
244 		amdgpu_ring_write(kiq_ring, seq);
245 	} else {
246 		amdgpu_ring_write(kiq_ring, 0);
247 		amdgpu_ring_write(kiq_ring, 0);
248 		amdgpu_ring_write(kiq_ring, 0);
249 	}
250 }
251 
gfx_v9_4_3_kiq_query_status(struct amdgpu_ring * kiq_ring,struct amdgpu_ring * ring,u64 addr,u64 seq)252 static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring,
253 				   struct amdgpu_ring *ring,
254 				   u64 addr,
255 				   u64 seq)
256 {
257 	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
258 
259 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
260 	amdgpu_ring_write(kiq_ring,
261 			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
262 			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
263 			  PACKET3_QUERY_STATUS_COMMAND(2));
264 	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
265 	amdgpu_ring_write(kiq_ring,
266 			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
267 			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
268 	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
269 	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
270 	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
271 	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
272 }
273 
gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring * kiq_ring,uint16_t pasid,uint32_t flush_type,bool all_hub)274 static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring,
275 				uint16_t pasid, uint32_t flush_type,
276 				bool all_hub)
277 {
278 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
279 	amdgpu_ring_write(kiq_ring,
280 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
281 			PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) |
282 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
283 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
284 }
285 
gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring * kiq_ring,uint32_t queue_type,uint32_t me_id,uint32_t pipe_id,uint32_t queue_id,uint32_t xcc_id,uint32_t vmid)286 static void gfx_v9_4_3_kiq_reset_hw_queue(struct amdgpu_ring *kiq_ring, uint32_t queue_type,
287 					  uint32_t me_id, uint32_t pipe_id, uint32_t queue_id,
288 					  uint32_t xcc_id, uint32_t vmid)
289 {
290 	struct amdgpu_device *adev = kiq_ring->adev;
291 	unsigned i;
292 
293 	/* enter save mode */
294 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
295 	mutex_lock(&adev->srbm_mutex);
296 	soc15_grbm_select(adev, me_id, pipe_id, queue_id, 0, xcc_id);
297 
298 	if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
299 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 0x2);
300 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_COMPUTE_QUEUE_RESET, 0x1);
301 		/* wait till dequeue take effects */
302 		for (i = 0; i < adev->usec_timeout; i++) {
303 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
304 				break;
305 			udelay(1);
306 		}
307 		if (i >= adev->usec_timeout)
308 			dev_err(adev->dev, "fail to wait on hqd deactive\n");
309 	} else {
310 		dev_err(adev->dev, "reset queue_type(%d) not supported\n\n", queue_type);
311 	}
312 
313 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
314 	mutex_unlock(&adev->srbm_mutex);
315 	/* exit safe mode */
316 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
317 }
318 
319 static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = {
320 	.kiq_set_resources = gfx_v9_4_3_kiq_set_resources,
321 	.kiq_map_queues = gfx_v9_4_3_kiq_map_queues,
322 	.kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues,
323 	.kiq_query_status = gfx_v9_4_3_kiq_query_status,
324 	.kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs,
325 	.kiq_reset_hw_queue = gfx_v9_4_3_kiq_reset_hw_queue,
326 	.set_resources_size = 8,
327 	.map_queues_size = 7,
328 	.unmap_queues_size = 6,
329 	.query_status_size = 7,
330 	.invalidate_tlbs_size = 2,
331 };
332 
gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device * adev)333 static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev)
334 {
335 	int i, num_xcc;
336 
337 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
338 	for (i = 0; i < num_xcc; i++)
339 		adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs;
340 }
341 
gfx_v9_4_3_init_golden_registers(struct amdgpu_device * adev)342 static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
343 {
344 	int i, num_xcc, dev_inst;
345 
346 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
347 	for (i = 0; i < num_xcc; i++) {
348 		dev_inst = GET_INST(GC, i);
349 
350 		WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG,
351 			     GOLDEN_GB_ADDR_CONFIG);
352 		WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, SPARE, 0x1);
353 	}
354 }
355 
gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)356 static uint32_t gfx_v9_4_3_normalize_xcc_reg_offset(uint32_t reg)
357 {
358 	uint32_t normalized_reg = NORMALIZE_XCC_REG_OFFSET(reg);
359 
360 	/* If it is an XCC reg, normalize the reg to keep
361 	   lower 16 bits in local xcc */
362 
363 	if (((normalized_reg >= XCC_REG_RANGE_0_LOW) && (normalized_reg < XCC_REG_RANGE_0_HIGH)) ||
364 		((normalized_reg >= XCC_REG_RANGE_1_LOW) && (normalized_reg < XCC_REG_RANGE_1_HIGH)))
365 		return normalized_reg;
366 	else
367 		return reg;
368 }
369 
gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring * ring,int eng_sel,bool wc,uint32_t reg,uint32_t val)370 static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
371 				       bool wc, uint32_t reg, uint32_t val)
372 {
373 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
374 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
375 	amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) |
376 				WRITE_DATA_DST_SEL(0) |
377 				(wc ? WR_CONFIRM : 0));
378 	amdgpu_ring_write(ring, reg);
379 	amdgpu_ring_write(ring, 0);
380 	amdgpu_ring_write(ring, val);
381 }
382 
gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring * ring,int eng_sel,int mem_space,int opt,uint32_t addr0,uint32_t addr1,uint32_t ref,uint32_t mask,uint32_t inv)383 static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
384 				  int mem_space, int opt, uint32_t addr0,
385 				  uint32_t addr1, uint32_t ref, uint32_t mask,
386 				  uint32_t inv)
387 {
388 	/* Only do the normalization on regspace */
389 	if (mem_space == 0) {
390 		addr0 = gfx_v9_4_3_normalize_xcc_reg_offset(addr0);
391 		addr1 = gfx_v9_4_3_normalize_xcc_reg_offset(addr1);
392 	}
393 
394 	amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
395 	amdgpu_ring_write(ring,
396 				 /* memory (1) or register (0) */
397 				 (WAIT_REG_MEM_MEM_SPACE(mem_space) |
398 				 WAIT_REG_MEM_OPERATION(opt) | /* wait */
399 				 WAIT_REG_MEM_FUNCTION(3) |  /* equal */
400 				 WAIT_REG_MEM_ENGINE(eng_sel)));
401 
402 	if (mem_space)
403 		BUG_ON(addr0 & 0x3); /* Dword align */
404 	amdgpu_ring_write(ring, addr0);
405 	amdgpu_ring_write(ring, addr1);
406 	amdgpu_ring_write(ring, ref);
407 	amdgpu_ring_write(ring, mask);
408 	amdgpu_ring_write(ring, inv); /* poll interval */
409 }
410 
gfx_v9_4_3_ring_test_ring(struct amdgpu_ring * ring)411 static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring)
412 {
413 	uint32_t scratch_reg0_offset, xcc_offset;
414 	struct amdgpu_device *adev = ring->adev;
415 	uint32_t tmp = 0;
416 	unsigned i;
417 	int r;
418 
419 	/* Use register offset which is local to XCC in the packet */
420 	xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0);
421 	scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0);
422 	WREG32(scratch_reg0_offset, 0xCAFEDEAD);
423 	tmp = RREG32(scratch_reg0_offset);
424 
425 	r = amdgpu_ring_alloc(ring, 3);
426 	if (r)
427 		return r;
428 
429 	amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
430 	amdgpu_ring_write(ring, xcc_offset - PACKET3_SET_UCONFIG_REG_START);
431 	amdgpu_ring_write(ring, 0xDEADBEEF);
432 	amdgpu_ring_commit(ring);
433 
434 	for (i = 0; i < adev->usec_timeout; i++) {
435 		tmp = RREG32(scratch_reg0_offset);
436 		if (tmp == 0xDEADBEEF)
437 			break;
438 		udelay(1);
439 	}
440 
441 	if (i >= adev->usec_timeout)
442 		r = -ETIMEDOUT;
443 	return r;
444 }
445 
gfx_v9_4_3_ring_test_ib(struct amdgpu_ring * ring,long timeout)446 static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
447 {
448 	struct amdgpu_device *adev = ring->adev;
449 	struct amdgpu_ib ib;
450 	struct dma_fence *f = NULL;
451 
452 	unsigned index;
453 	uint64_t gpu_addr;
454 	uint32_t tmp;
455 	long r;
456 
457 	r = amdgpu_device_wb_get(adev, &index);
458 	if (r)
459 		return r;
460 
461 	gpu_addr = adev->wb.gpu_addr + (index * 4);
462 	adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
463 	memset(&ib, 0, sizeof(ib));
464 
465 	r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
466 	if (r)
467 		goto err1;
468 
469 	ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
470 	ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
471 	ib.ptr[2] = lower_32_bits(gpu_addr);
472 	ib.ptr[3] = upper_32_bits(gpu_addr);
473 	ib.ptr[4] = 0xDEADBEEF;
474 	ib.length_dw = 5;
475 
476 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
477 	if (r)
478 		goto err2;
479 
480 	r = dma_fence_wait_timeout(f, false, timeout);
481 	if (r == 0) {
482 		r = -ETIMEDOUT;
483 		goto err2;
484 	} else if (r < 0) {
485 		goto err2;
486 	}
487 
488 	tmp = adev->wb.wb[index];
489 	if (tmp == 0xDEADBEEF)
490 		r = 0;
491 	else
492 		r = -EINVAL;
493 
494 err2:
495 	amdgpu_ib_free(&ib, NULL);
496 	dma_fence_put(f);
497 err1:
498 	amdgpu_device_wb_free(adev, index);
499 	return r;
500 }
501 
502 
503 /* This value might differs per partition */
gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device * adev)504 static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev)
505 {
506 	uint64_t clock;
507 
508 	mutex_lock(&adev->gfx.gpu_clock_mutex);
509 	WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
510 	clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) |
511 		((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
512 	mutex_unlock(&adev->gfx.gpu_clock_mutex);
513 
514 	return clock;
515 }
516 
gfx_v9_4_3_free_microcode(struct amdgpu_device * adev)517 static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev)
518 {
519 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
520 	amdgpu_ucode_release(&adev->gfx.me_fw);
521 	amdgpu_ucode_release(&adev->gfx.ce_fw);
522 	amdgpu_ucode_release(&adev->gfx.rlc_fw);
523 	amdgpu_ucode_release(&adev->gfx.mec_fw);
524 	amdgpu_ucode_release(&adev->gfx.mec2_fw);
525 
526 	kfree(adev->gfx.rlc.register_list_format);
527 }
528 
gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device * adev,const char * chip_name)529 static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev,
530 					  const char *chip_name)
531 {
532 	int err;
533 	const struct rlc_firmware_header_v2_0 *rlc_hdr;
534 	uint16_t version_major;
535 	uint16_t version_minor;
536 
537 
538 	err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
539 				   AMDGPU_UCODE_REQUIRED,
540 				   "amdgpu/%s_rlc.bin", chip_name);
541 	if (err)
542 		goto out;
543 	rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
544 
545 	version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
546 	version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
547 	err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
548 out:
549 	if (err)
550 		amdgpu_ucode_release(&adev->gfx.rlc_fw);
551 
552 	return err;
553 }
554 
gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device * adev,const char * chip_name)555 static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev,
556 					  const char *chip_name)
557 {
558 	int err;
559 
560 	if (amdgpu_sriov_vf(adev)) {
561 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
562 					   AMDGPU_UCODE_REQUIRED,
563 					   "amdgpu/%s_sjt_mec.bin", chip_name);
564 
565 		if (err)
566 			err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
567 							AMDGPU_UCODE_REQUIRED,
568 							"amdgpu/%s_mec.bin", chip_name);
569 	} else
570 		err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw,
571 					   AMDGPU_UCODE_REQUIRED,
572 					   "amdgpu/%s_mec.bin", chip_name);
573 	if (err)
574 		goto out;
575 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
576 	amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
577 
578 	adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
579 	adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
580 
581 out:
582 	if (err)
583 		amdgpu_ucode_release(&adev->gfx.mec_fw);
584 	return err;
585 }
586 
gfx_v9_4_3_init_microcode(struct amdgpu_device * adev)587 static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev)
588 {
589 	char ucode_prefix[15];
590 	int r;
591 
592 	amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
593 
594 	r = gfx_v9_4_3_init_rlc_microcode(adev, ucode_prefix);
595 	if (r)
596 		return r;
597 
598 	r = gfx_v9_4_3_init_cp_compute_microcode(adev, ucode_prefix);
599 	if (r)
600 		return r;
601 
602 	return r;
603 }
604 
gfx_v9_4_3_mec_fini(struct amdgpu_device * adev)605 static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev)
606 {
607 	amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
608 	amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
609 }
610 
gfx_v9_4_3_mec_init(struct amdgpu_device * adev)611 static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev)
612 {
613 	int r, i, num_xcc;
614 	u32 *hpd;
615 	const __le32 *fw_data;
616 	unsigned fw_size;
617 	u32 *fw;
618 	size_t mec_hpd_size;
619 
620 	const struct gfx_firmware_header_v1_0 *mec_hdr;
621 
622 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
623 	for (i = 0; i < num_xcc; i++)
624 		bitmap_zero(adev->gfx.mec_bitmap[i].queue_bitmap,
625 			AMDGPU_MAX_COMPUTE_QUEUES);
626 
627 	/* take ownership of the relevant compute queues */
628 	amdgpu_gfx_compute_queue_acquire(adev);
629 	mec_hpd_size =
630 		adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE;
631 	if (mec_hpd_size) {
632 		r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
633 					      AMDGPU_GEM_DOMAIN_VRAM |
634 					      AMDGPU_GEM_DOMAIN_GTT,
635 					      &adev->gfx.mec.hpd_eop_obj,
636 					      &adev->gfx.mec.hpd_eop_gpu_addr,
637 					      (void **)&hpd);
638 		if (r) {
639 			dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
640 			gfx_v9_4_3_mec_fini(adev);
641 			return r;
642 		}
643 
644 		if (amdgpu_emu_mode == 1) {
645 			for (i = 0; i < mec_hpd_size / 4; i++) {
646 				memset((void *)(hpd + i), 0, 4);
647 				if (i % 50 == 0)
648 					msleep(1);
649 			}
650 		} else {
651 			memset(hpd, 0, mec_hpd_size);
652 		}
653 
654 		amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
655 		amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
656 	}
657 
658 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
659 
660 	fw_data = (const __le32 *)
661 		(adev->gfx.mec_fw->data +
662 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
663 	fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes);
664 
665 	r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
666 				      PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
667 				      &adev->gfx.mec.mec_fw_obj,
668 				      &adev->gfx.mec.mec_fw_gpu_addr,
669 				      (void **)&fw);
670 	if (r) {
671 		dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
672 		gfx_v9_4_3_mec_fini(adev);
673 		return r;
674 	}
675 
676 	memcpy(fw, fw_data, fw_size);
677 
678 	amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
679 	amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
680 
681 	return 0;
682 }
683 
gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device * adev,u32 se_num,u32 sh_num,u32 instance,int xcc_id)684 static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num,
685 					u32 sh_num, u32 instance, int xcc_id)
686 {
687 	u32 data;
688 
689 	if (instance == 0xffffffff)
690 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
691 				     INSTANCE_BROADCAST_WRITES, 1);
692 	else
693 		data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
694 				     INSTANCE_INDEX, instance);
695 
696 	if (se_num == 0xffffffff)
697 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
698 				     SE_BROADCAST_WRITES, 1);
699 	else
700 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
701 
702 	if (sh_num == 0xffffffff)
703 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
704 				     SH_BROADCAST_WRITES, 1);
705 	else
706 		data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
707 
708 	WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data);
709 }
710 
wave_read_ind(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t address)711 static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address)
712 {
713 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
714 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
715 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
716 		(address << SQ_IND_INDEX__INDEX__SHIFT) |
717 		(SQ_IND_INDEX__FORCE_READ_MASK));
718 	return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
719 }
720 
wave_read_regs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t regno,uint32_t num,uint32_t * out)721 static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
722 			   uint32_t wave, uint32_t thread,
723 			   uint32_t regno, uint32_t num, uint32_t *out)
724 {
725 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX,
726 		(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
727 		(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
728 		(regno << SQ_IND_INDEX__INDEX__SHIFT) |
729 		(thread << SQ_IND_INDEX__THREAD_ID__SHIFT) |
730 		(SQ_IND_INDEX__FORCE_READ_MASK) |
731 		(SQ_IND_INDEX__AUTO_INCR_MASK));
732 	while (num--)
733 		*(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA);
734 }
735 
gfx_v9_4_3_read_wave_data(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t * dst,int * no_fields)736 static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev,
737 				      uint32_t xcc_id, uint32_t simd, uint32_t wave,
738 				      uint32_t *dst, int *no_fields)
739 {
740 	/* type 1 wave data */
741 	dst[(*no_fields)++] = 1;
742 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS);
743 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO);
744 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI);
745 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO);
746 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI);
747 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID);
748 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0);
749 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1);
750 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC);
751 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC);
752 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS);
753 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS);
754 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0);
755 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0);
756 	dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE);
757 }
758 
gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t start,uint32_t size,uint32_t * dst)759 static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
760 				       uint32_t wave, uint32_t start,
761 				       uint32_t size, uint32_t *dst)
762 {
763 	wave_read_regs(adev, xcc_id, simd, wave, 0,
764 		       start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
765 }
766 
gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device * adev,uint32_t xcc_id,uint32_t simd,uint32_t wave,uint32_t thread,uint32_t start,uint32_t size,uint32_t * dst)767 static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
768 				       uint32_t wave, uint32_t thread,
769 				       uint32_t start, uint32_t size,
770 				       uint32_t *dst)
771 {
772 	wave_read_regs(adev, xcc_id, simd, wave, thread,
773 		       start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
774 }
775 
gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device * adev,u32 me,u32 pipe,u32 q,u32 vm,u32 xcc_id)776 static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
777 					u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
778 {
779 	soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
780 }
781 
gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device * adev)782 static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
783 {
784 	u32 xcp_ctl;
785 
786 	/* Value is expected to be the same on all, fetch from first instance */
787 	xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
788 
789 	return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
790 }
791 
gfx_v9_4_3_switch_compute_partition(struct amdgpu_device * adev,int num_xccs_per_xcp)792 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
793 						int num_xccs_per_xcp)
794 {
795 	int ret, i, num_xcc;
796 	u32 tmp = 0;
797 
798 	if (adev->psp.funcs) {
799 		ret = psp_spatial_partition(&adev->psp,
800 					    NUM_XCC(adev->gfx.xcc_mask) /
801 						    num_xccs_per_xcp);
802 		if (ret)
803 			return ret;
804 	} else {
805 		num_xcc = NUM_XCC(adev->gfx.xcc_mask);
806 
807 		for (i = 0; i < num_xcc; i++) {
808 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP,
809 					    num_xccs_per_xcp);
810 			tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID,
811 					    i % num_xccs_per_xcp);
812 			WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL,
813 				     tmp);
814 		}
815 		ret = 0;
816 	}
817 
818 	adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp;
819 
820 	return ret;
821 }
822 
gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device * adev,int ih_node)823 static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node)
824 {
825 	int xcc;
826 
827 	xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0));
828 	if (!xcc) {
829 		dev_err(adev->dev, "Couldn't find xcc mapping from IH node");
830 		return -EINVAL;
831 	}
832 
833 	return xcc - 1;
834 }
835 
836 static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
837 	.get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter,
838 	.select_se_sh = &gfx_v9_4_3_xcc_select_se_sh,
839 	.read_wave_data = &gfx_v9_4_3_read_wave_data,
840 	.read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs,
841 	.read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs,
842 	.select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
843 	.switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
844 	.ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
845 	.get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
846 };
847 
gfx_v9_4_3_aca_bank_parser(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)848 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
849 				      struct aca_bank *bank, enum aca_smu_type type,
850 				      void *data)
851 {
852 	struct aca_bank_info info;
853 	u64 misc0;
854 	u32 instlo;
855 	int ret;
856 
857 	ret = aca_bank_info_decode(bank, &info);
858 	if (ret)
859 		return ret;
860 
861 	/* NOTE: overwrite info.die_id with xcd id for gfx */
862 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
863 	instlo &= GENMASK(31, 1);
864 	info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1;
865 
866 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
867 
868 	switch (type) {
869 	case ACA_SMU_TYPE_UE:
870 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
871 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, 1ULL);
872 		break;
873 	case ACA_SMU_TYPE_CE:
874 		bank->aca_err_type = ACA_ERROR_TYPE_CE;
875 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
876 						     ACA_REG__MISC0__ERRCNT(misc0));
877 		break;
878 	default:
879 		return -EINVAL;
880 	}
881 
882 	return ret;
883 }
884 
gfx_v9_4_3_aca_bank_is_valid(struct aca_handle * handle,struct aca_bank * bank,enum aca_smu_type type,void * data)885 static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
886 					 enum aca_smu_type type, void *data)
887 {
888 	u32 instlo;
889 
890 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
891 	instlo &= GENMASK(31, 1);
892 	switch (instlo) {
893 	case mmSMNAID_XCD0_MCA_SMU:
894 	case mmSMNAID_XCD1_MCA_SMU:
895 	case mmSMNXCD_XCD0_MCA_SMU:
896 		return true;
897 	default:
898 		break;
899 	}
900 
901 	return false;
902 }
903 
904 static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = {
905 	.aca_bank_parser = gfx_v9_4_3_aca_bank_parser,
906 	.aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid,
907 };
908 
909 static const struct aca_info gfx_v9_4_3_aca_info = {
910 	.hwip = ACA_HWIP_TYPE_SMU,
911 	.mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
912 	.bank_ops = &gfx_v9_4_3_aca_bank_ops,
913 };
914 
gfx_v9_4_3_gpu_early_init(struct amdgpu_device * adev)915 static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev)
916 {
917 	adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs;
918 	adev->gfx.ras = &gfx_v9_4_3_ras;
919 
920 	adev->gfx.config.max_hw_contexts = 8;
921 	adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
922 	adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
923 	adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
924 	adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
925 	adev->gfx.config.gb_addr_config = GOLDEN_GB_ADDR_CONFIG;
926 
927 	adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
928 			REG_GET_FIELD(
929 					adev->gfx.config.gb_addr_config,
930 					GB_ADDR_CONFIG,
931 					NUM_PIPES);
932 
933 	adev->gfx.config.max_tile_pipes =
934 		adev->gfx.config.gb_addr_config_fields.num_pipes;
935 
936 	adev->gfx.config.gb_addr_config_fields.num_banks = 1 <<
937 			REG_GET_FIELD(
938 					adev->gfx.config.gb_addr_config,
939 					GB_ADDR_CONFIG,
940 					NUM_BANKS);
941 	adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 <<
942 			REG_GET_FIELD(
943 					adev->gfx.config.gb_addr_config,
944 					GB_ADDR_CONFIG,
945 					MAX_COMPRESSED_FRAGS);
946 	adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 <<
947 			REG_GET_FIELD(
948 					adev->gfx.config.gb_addr_config,
949 					GB_ADDR_CONFIG,
950 					NUM_RB_PER_SE);
951 	adev->gfx.config.gb_addr_config_fields.num_se = 1 <<
952 			REG_GET_FIELD(
953 					adev->gfx.config.gb_addr_config,
954 					GB_ADDR_CONFIG,
955 					NUM_SHADER_ENGINES);
956 	adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 +
957 			REG_GET_FIELD(
958 					adev->gfx.config.gb_addr_config,
959 					GB_ADDR_CONFIG,
960 					PIPE_INTERLEAVE_SIZE));
961 
962 	return 0;
963 }
964 
gfx_v9_4_3_compute_ring_init(struct amdgpu_device * adev,int ring_id,int xcc_id,int mec,int pipe,int queue)965 static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id,
966 				        int xcc_id, int mec, int pipe, int queue)
967 {
968 	unsigned irq_type;
969 	struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
970 	unsigned int hw_prio;
971 	uint32_t xcc_doorbell_start;
972 
973 	ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings +
974 				       ring_id];
975 
976 	/* mec0 is me1 */
977 	ring->xcc_id = xcc_id;
978 	ring->me = mec + 1;
979 	ring->pipe = pipe;
980 	ring->queue = queue;
981 
982 	ring->ring_obj = NULL;
983 	ring->use_doorbell = true;
984 	xcc_doorbell_start = adev->doorbell_index.mec_ring0 +
985 			     xcc_id * adev->doorbell_index.xcc_doorbell_range;
986 	ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1;
987 	ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr +
988 			     (ring_id + xcc_id * adev->gfx.num_compute_rings) *
989 				     GFX9_MEC_HPD_SIZE;
990 	ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
991 	sprintf(ring->name, "comp_%d.%d.%d.%d",
992 			ring->xcc_id, ring->me, ring->pipe, ring->queue);
993 
994 	irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
995 		+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
996 		+ ring->pipe;
997 	hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
998 			AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
999 	/* type-2 packets are deprecated on MEC, use type-3 instead */
1000 	return amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1001 				hw_prio, NULL);
1002 }
1003 
gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device * adev)1004 static void gfx_v9_4_3_alloc_ip_dump(struct amdgpu_device *adev)
1005 {
1006 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
1007 	uint32_t *ptr, num_xcc, inst;
1008 
1009 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1010 
1011 	ptr = kcalloc(reg_count * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1012 	if (!ptr) {
1013 		DRM_ERROR("Failed to allocate memory for GFX IP Dump\n");
1014 		adev->gfx.ip_dump_core = NULL;
1015 	} else {
1016 		adev->gfx.ip_dump_core = ptr;
1017 	}
1018 
1019 	/* Allocate memory for compute queue registers for all the instances */
1020 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
1021 	inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
1022 		adev->gfx.mec.num_queue_per_pipe;
1023 
1024 	ptr = kcalloc(reg_count * inst * num_xcc, sizeof(uint32_t), GFP_KERNEL);
1025 	if (!ptr) {
1026 		DRM_ERROR("Failed to allocate memory for Compute Queues IP Dump\n");
1027 		adev->gfx.ip_dump_compute_queues = NULL;
1028 	} else {
1029 		adev->gfx.ip_dump_compute_queues = ptr;
1030 	}
1031 }
1032 
gfx_v9_4_3_sw_init(struct amdgpu_ip_block * ip_block)1033 static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
1034 {
1035 	int i, j, k, r, ring_id, xcc_id, num_xcc;
1036 	struct amdgpu_device *adev = ip_block->adev;
1037 
1038 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1039 	case IP_VERSION(9, 4, 3):
1040 	case IP_VERSION(9, 4, 4):
1041 		adev->gfx.cleaner_shader_ptr = gfx_9_4_3_cleaner_shader_hex;
1042 		adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_3_cleaner_shader_hex);
1043 		if (adev->gfx.mec_fw_version >= 153) {
1044 			adev->gfx.enable_cleaner_shader = true;
1045 			r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
1046 			if (r) {
1047 				adev->gfx.enable_cleaner_shader = false;
1048 				dev_err(adev->dev, "Failed to initialize cleaner shader\n");
1049 			}
1050 		}
1051 		break;
1052 	default:
1053 		adev->gfx.enable_cleaner_shader = false;
1054 		break;
1055 	}
1056 
1057 	adev->gfx.mec.num_mec = 2;
1058 	adev->gfx.mec.num_pipe_per_mec = 4;
1059 	adev->gfx.mec.num_queue_per_pipe = 8;
1060 
1061 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1062 
1063 	/* EOP Event */
1064 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq);
1065 	if (r)
1066 		return r;
1067 
1068 	/* Bad opcode Event */
1069 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP,
1070 			      GFX_9_0__SRCID__CP_BAD_OPCODE_ERROR,
1071 			      &adev->gfx.bad_op_irq);
1072 	if (r)
1073 		return r;
1074 
1075 	/* Privileged reg */
1076 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT,
1077 			      &adev->gfx.priv_reg_irq);
1078 	if (r)
1079 		return r;
1080 
1081 	/* Privileged inst */
1082 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT,
1083 			      &adev->gfx.priv_inst_irq);
1084 	if (r)
1085 		return r;
1086 
1087 	adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1088 
1089 	r = adev->gfx.rlc.funcs->init(adev);
1090 	if (r) {
1091 		DRM_ERROR("Failed to init rlc BOs!\n");
1092 		return r;
1093 	}
1094 
1095 	r = gfx_v9_4_3_mec_init(adev);
1096 	if (r) {
1097 		DRM_ERROR("Failed to init MEC BOs!\n");
1098 		return r;
1099 	}
1100 
1101 	/* set up the compute queues - allocate horizontally across pipes */
1102 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1103 		ring_id = 0;
1104 		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
1105 			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
1106 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec;
1107 				     k++) {
1108 					if (!amdgpu_gfx_is_mec_queue_enabled(
1109 							adev, xcc_id, i, k, j))
1110 						continue;
1111 
1112 					r = gfx_v9_4_3_compute_ring_init(adev,
1113 								       ring_id,
1114 								       xcc_id,
1115 								       i, k, j);
1116 					if (r)
1117 						return r;
1118 
1119 					ring_id++;
1120 				}
1121 			}
1122 		}
1123 
1124 		r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id);
1125 		if (r) {
1126 			DRM_ERROR("Failed to init KIQ BOs!\n");
1127 			return r;
1128 		}
1129 
1130 		r = amdgpu_gfx_kiq_init_ring(adev, xcc_id);
1131 		if (r)
1132 			return r;
1133 
1134 		/* create MQD for all compute queues as wel as KIQ for SRIOV case */
1135 		r = amdgpu_gfx_mqd_sw_init(adev,
1136 				sizeof(struct v9_mqd_allocation), xcc_id);
1137 		if (r)
1138 			return r;
1139 	}
1140 
1141 	adev->gfx.compute_supported_reset =
1142 		amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
1143 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1144 	case IP_VERSION(9, 4, 3):
1145 	case IP_VERSION(9, 4, 4):
1146 		if (adev->gfx.mec_fw_version >= 155) {
1147 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
1148 			adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
1149 		}
1150 		break;
1151 	default:
1152 		break;
1153 	}
1154 	r = gfx_v9_4_3_gpu_early_init(adev);
1155 	if (r)
1156 		return r;
1157 
1158 	r = amdgpu_gfx_ras_sw_init(adev);
1159 	if (r)
1160 		return r;
1161 
1162 	r = amdgpu_gfx_sysfs_init(adev);
1163 	if (r)
1164 		return r;
1165 
1166 	gfx_v9_4_3_alloc_ip_dump(adev);
1167 
1168 	return 0;
1169 }
1170 
gfx_v9_4_3_sw_fini(struct amdgpu_ip_block * ip_block)1171 static int gfx_v9_4_3_sw_fini(struct amdgpu_ip_block *ip_block)
1172 {
1173 	int i, num_xcc;
1174 	struct amdgpu_device *adev = ip_block->adev;
1175 
1176 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1177 	for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++)
1178 		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
1179 
1180 	for (i = 0; i < num_xcc; i++) {
1181 		amdgpu_gfx_mqd_sw_fini(adev, i);
1182 		amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[i].ring);
1183 		amdgpu_gfx_kiq_fini(adev, i);
1184 	}
1185 
1186 	amdgpu_gfx_cleaner_shader_sw_fini(adev);
1187 
1188 	gfx_v9_4_3_mec_fini(adev);
1189 	amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1190 	gfx_v9_4_3_free_microcode(adev);
1191 	amdgpu_gfx_sysfs_fini(adev);
1192 
1193 	kfree(adev->gfx.ip_dump_core);
1194 	kfree(adev->gfx.ip_dump_compute_queues);
1195 
1196 	return 0;
1197 }
1198 
1199 #define DEFAULT_SH_MEM_BASES	(0x6000)
gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device * adev,int xcc_id)1200 static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev,
1201 					     int xcc_id)
1202 {
1203 	int i;
1204 	uint32_t sh_mem_config;
1205 	uint32_t sh_mem_bases;
1206 	uint32_t data;
1207 
1208 	/*
1209 	 * Configure apertures:
1210 	 * LDS:         0x60000000'00000000 - 0x60000001'00000000 (4GB)
1211 	 * Scratch:     0x60000001'00000000 - 0x60000002'00000000 (4GB)
1212 	 * GPUVM:       0x60010000'00000000 - 0x60020000'00000000 (1TB)
1213 	 */
1214 	sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1215 
1216 	sh_mem_config = SH_MEM_ADDRESS_MODE_64 |
1217 			SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1218 			SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
1219 
1220 	mutex_lock(&adev->srbm_mutex);
1221 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1222 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1223 		/* CP and shaders */
1224 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config);
1225 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases);
1226 
1227 		/* Enable trap for each kfd vmid. */
1228 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL);
1229 		data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
1230 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data);
1231 	}
1232 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
1233 	mutex_unlock(&adev->srbm_mutex);
1234 
1235 	/*
1236 	 * Initialize all compute VMIDs to have no GDS, GWS, or OA
1237 	 * access. These should be enabled by FW for target VMIDs.
1238 	 */
1239 	for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) {
1240 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0);
1241 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0);
1242 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0);
1243 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0);
1244 	}
1245 }
1246 
gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device * adev,int xcc_id)1247 static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id)
1248 {
1249 	int vmid;
1250 
1251 	/*
1252 	 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
1253 	 * access. Compute VMIDs should be enabled by FW for target VMIDs,
1254 	 * the driver can enable them for graphics. VMID0 should maintain
1255 	 * access so that HWS firmware can save/restore entries.
1256 	 */
1257 	for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) {
1258 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0);
1259 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0);
1260 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0);
1261 		WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0);
1262 	}
1263 }
1264 
gfx_v9_4_3_xcc_constants_init(struct amdgpu_device * adev,int xcc_id)1265 static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev,
1266 					  int xcc_id)
1267 {
1268 	u32 tmp;
1269 	int i;
1270 
1271 	/* XXX SH_MEM regs */
1272 	/* where to put LDS, scratch, GPUVM in FSA64 space */
1273 	mutex_lock(&adev->srbm_mutex);
1274 	for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) {
1275 		soc15_grbm_select(adev, 0, 0, 0, i, GET_INST(GC, xcc_id));
1276 		/* CP and shaders */
1277 		if (i == 0) {
1278 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1279 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1280 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1281 					    !!adev->gmc.noretry);
1282 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1283 					 regSH_MEM_CONFIG, tmp);
1284 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1285 					 regSH_MEM_BASES, 0);
1286 		} else {
1287 			tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
1288 					    SH_MEM_ALIGNMENT_MODE_UNALIGNED);
1289 			tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
1290 					    !!adev->gmc.noretry);
1291 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1292 					 regSH_MEM_CONFIG, tmp);
1293 			tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
1294 					    (adev->gmc.private_aperture_start >>
1295 					     48));
1296 			tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE,
1297 					    (adev->gmc.shared_aperture_start >>
1298 					     48));
1299 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id),
1300 					 regSH_MEM_BASES, tmp);
1301 		}
1302 	}
1303 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, 0));
1304 
1305 	mutex_unlock(&adev->srbm_mutex);
1306 
1307 	gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id);
1308 	gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id);
1309 }
1310 
gfx_v9_4_3_constants_init(struct amdgpu_device * adev)1311 static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev)
1312 {
1313 	int i, num_xcc;
1314 
1315 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1316 
1317 	gfx_v9_4_3_get_cu_info(adev, &adev->gfx.cu_info);
1318 	adev->gfx.config.db_debug2 =
1319 		RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2);
1320 
1321 	for (i = 0; i < num_xcc; i++)
1322 		gfx_v9_4_3_xcc_constants_init(adev, i);
1323 }
1324 
1325 static void
gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device * adev,int xcc_id)1326 gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev,
1327 					   int xcc_id)
1328 {
1329 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1);
1330 }
1331 
gfx_v9_4_3_xcc_init_pg(struct amdgpu_device * adev,int xcc_id)1332 static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id)
1333 {
1334 	/*
1335 	 * Rlc save restore list is workable since v2_1.
1336 	 */
1337 	gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id);
1338 }
1339 
gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device * adev,int xcc_id)1340 static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id)
1341 {
1342 	uint32_t data;
1343 
1344 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG);
1345 	data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK;
1346 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data);
1347 }
1348 
gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device * adev)1349 static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev)
1350 {
1351 	uint32_t rlc_setting;
1352 
1353 	/* if RLC is not enabled, do nothing */
1354 	rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL);
1355 	if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
1356 		return false;
1357 
1358 	return true;
1359 }
1360 
gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device * adev,int xcc_id)1361 static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id)
1362 {
1363 	uint32_t data;
1364 	unsigned i;
1365 
1366 	data = RLC_SAFE_MODE__CMD_MASK;
1367 	data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
1368 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1369 
1370 	/* wait for RLC_SAFE_MODE */
1371 	for (i = 0; i < adev->usec_timeout; i++) {
1372 		if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
1373 			break;
1374 		udelay(1);
1375 	}
1376 }
1377 
gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device * adev,int xcc_id)1378 static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev,
1379 					   int xcc_id)
1380 {
1381 	uint32_t data;
1382 
1383 	data = RLC_SAFE_MODE__CMD_MASK;
1384 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data);
1385 }
1386 
gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device * adev)1387 static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
1388 {
1389 	int xcc_id, num_xcc;
1390 	struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
1391 
1392 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1393 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
1394 		reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)];
1395 		reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0);
1396 		reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1);
1397 		reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2);
1398 		reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3);
1399 		reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL);
1400 		reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
1401 		reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
1402 	}
1403 	adev->gfx.rlc.rlcg_reg_access_supported = true;
1404 }
1405 
gfx_v9_4_3_rlc_init(struct amdgpu_device * adev)1406 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
1407 {
1408 	/* init spm vmid with 0xf */
1409 	if (adev->gfx.rlc.funcs->update_spm_vmid)
1410 		adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf);
1411 
1412 	return 0;
1413 }
1414 
gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device * adev,int xcc_id)1415 static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev,
1416 					       int xcc_id)
1417 {
1418 	u32 i, j, k;
1419 	u32 mask;
1420 
1421 	mutex_lock(&adev->grbm_idx_mutex);
1422 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1423 		for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1424 			gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff,
1425 						    xcc_id);
1426 			for (k = 0; k < adev->usec_timeout; k++) {
1427 				if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0)
1428 					break;
1429 				udelay(1);
1430 			}
1431 			if (k == adev->usec_timeout) {
1432 				gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff,
1433 							    0xffffffff,
1434 							    0xffffffff, xcc_id);
1435 				mutex_unlock(&adev->grbm_idx_mutex);
1436 				DRM_INFO("Timeout wait for RLC serdes %u,%u\n",
1437 					 i, j);
1438 				return;
1439 			}
1440 		}
1441 	}
1442 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
1443 				    xcc_id);
1444 	mutex_unlock(&adev->grbm_idx_mutex);
1445 
1446 	mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
1447 		RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK |
1448 		RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK |
1449 		RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK;
1450 	for (k = 0; k < adev->usec_timeout; k++) {
1451 		if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0)
1452 			break;
1453 		udelay(1);
1454 	}
1455 }
1456 
gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device * adev,bool enable,int xcc_id)1457 static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev,
1458 						     bool enable, int xcc_id)
1459 {
1460 	u32 tmp;
1461 
1462 	/* These interrupts should be enabled to drive DS clock */
1463 
1464 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0);
1465 
1466 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
1467 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
1468 	tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
1469 
1470 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp);
1471 }
1472 
gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device * adev,int xcc_id)1473 static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id)
1474 {
1475 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1476 			      RLC_ENABLE_F32, 0);
1477 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
1478 	gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id);
1479 }
1480 
gfx_v9_4_3_rlc_stop(struct amdgpu_device * adev)1481 static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev)
1482 {
1483 	int i, num_xcc;
1484 
1485 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1486 	for (i = 0; i < num_xcc; i++)
1487 		gfx_v9_4_3_xcc_rlc_stop(adev, i);
1488 }
1489 
gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device * adev,int xcc_id)1490 static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id)
1491 {
1492 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1493 			      SOFT_RESET_RLC, 1);
1494 	udelay(50);
1495 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET,
1496 			      SOFT_RESET_RLC, 0);
1497 	udelay(50);
1498 }
1499 
gfx_v9_4_3_rlc_reset(struct amdgpu_device * adev)1500 static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev)
1501 {
1502 	int i, num_xcc;
1503 
1504 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1505 	for (i = 0; i < num_xcc; i++)
1506 		gfx_v9_4_3_xcc_rlc_reset(adev, i);
1507 }
1508 
gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device * adev,int xcc_id)1509 static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id)
1510 {
1511 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL,
1512 			      RLC_ENABLE_F32, 1);
1513 	udelay(50);
1514 
1515 	/* carrizo do enable cp interrupt after cp inited */
1516 	if (!(adev->flags & AMD_IS_APU)) {
1517 		gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
1518 		udelay(50);
1519 	}
1520 }
1521 
gfx_v9_4_3_rlc_start(struct amdgpu_device * adev)1522 static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev)
1523 {
1524 #ifdef AMDGPU_RLC_DEBUG_RETRY
1525 	u32 rlc_ucode_ver;
1526 #endif
1527 	int i, num_xcc;
1528 
1529 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1530 	for (i = 0; i < num_xcc; i++) {
1531 		gfx_v9_4_3_xcc_rlc_start(adev, i);
1532 #ifdef AMDGPU_RLC_DEBUG_RETRY
1533 		/* RLC_GPM_GENERAL_6 : RLC Ucode version */
1534 		rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6);
1535 		if (rlc_ucode_ver == 0x108) {
1536 			dev_info(adev->dev,
1537 				 "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n",
1538 				 rlc_ucode_ver, adev->gfx.rlc_fw_version);
1539 			/* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles,
1540 			 * default is 0x9C4 to create a 100us interval */
1541 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4);
1542 			/* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr
1543 			 * to disable the page fault retry interrupts, default is
1544 			 * 0x100 (256) */
1545 			WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100);
1546 		}
1547 #endif
1548 	}
1549 }
1550 
gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device * adev,int xcc_id)1551 static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev,
1552 					     int xcc_id)
1553 {
1554 	const struct rlc_firmware_header_v2_0 *hdr;
1555 	const __le32 *fw_data;
1556 	unsigned i, fw_size;
1557 
1558 	if (!adev->gfx.rlc_fw)
1559 		return -EINVAL;
1560 
1561 	hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1562 	amdgpu_ucode_print_rlc_hdr(&hdr->header);
1563 
1564 	fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
1565 			   le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1566 	fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
1567 
1568 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR,
1569 			RLCG_UCODE_LOADING_START_ADDRESS);
1570 	for (i = 0; i < fw_size; i++) {
1571 		if (amdgpu_emu_mode == 1 && i % 100 == 0) {
1572 			dev_info(adev->dev, "Write RLC ucode data %u DWs\n", i);
1573 			msleep(1);
1574 		}
1575 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
1576 	}
1577 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
1578 
1579 	return 0;
1580 }
1581 
gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device * adev,int xcc_id)1582 static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id)
1583 {
1584 	int r;
1585 
1586 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1587 		gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id);
1588 		/* legacy rlc firmware loading */
1589 		r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id);
1590 		if (r)
1591 			return r;
1592 		gfx_v9_4_3_xcc_rlc_start(adev, xcc_id);
1593 	}
1594 
1595 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
1596 	/* disable CG */
1597 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0);
1598 	gfx_v9_4_3_xcc_init_pg(adev, xcc_id);
1599 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
1600 
1601 	return 0;
1602 }
1603 
gfx_v9_4_3_rlc_resume(struct amdgpu_device * adev)1604 static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev)
1605 {
1606 	int r, i, num_xcc;
1607 
1608 	if (amdgpu_sriov_vf(adev))
1609 		return 0;
1610 
1611 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1612 	for (i = 0; i < num_xcc; i++) {
1613 		r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
1614 		if (r)
1615 			return r;
1616 	}
1617 
1618 	return 0;
1619 }
1620 
gfx_v9_4_3_update_spm_vmid(struct amdgpu_device * adev,struct amdgpu_ring * ring,unsigned vmid)1621 static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1622 				       unsigned vmid)
1623 {
1624 	u32 reg, pre_data, data;
1625 
1626 	reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL);
1627 	if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev))
1628 		pre_data = RREG32_NO_KIQ(reg);
1629 	else
1630 		pre_data = RREG32(reg);
1631 
1632 	data =	pre_data & (~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK);
1633 	data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
1634 
1635 	if (pre_data != data) {
1636 		if (amdgpu_sriov_is_pp_one_vf(adev) && !amdgpu_sriov_runtime(adev)) {
1637 			WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1638 		} else
1639 			WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data);
1640 	}
1641 }
1642 
1643 static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = {
1644 	{SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)},
1645 	{SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)},
1646 };
1647 
gfx_v9_4_3_check_rlcg_range(struct amdgpu_device * adev,uint32_t offset,struct soc15_reg_rlcg * entries,int arr_size)1648 static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev,
1649 					uint32_t offset,
1650 					struct soc15_reg_rlcg *entries, int arr_size)
1651 {
1652 	int i, inst;
1653 	uint32_t reg;
1654 
1655 	if (!entries)
1656 		return false;
1657 
1658 	for (i = 0; i < arr_size; i++) {
1659 		const struct soc15_reg_rlcg *entry;
1660 
1661 		entry = &entries[i];
1662 		inst = adev->ip_map.logical_to_dev_inst ?
1663 			       adev->ip_map.logical_to_dev_inst(
1664 				       adev, entry->hwip, entry->instance) :
1665 			       entry->instance;
1666 		reg = adev->reg_offset[entry->hwip][inst][entry->segment] +
1667 		      entry->reg;
1668 		if (offset == reg)
1669 			return true;
1670 	}
1671 
1672 	return false;
1673 }
1674 
gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device * adev,u32 offset)1675 static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
1676 {
1677 	return gfx_v9_4_3_check_rlcg_range(adev, offset,
1678 					(void *)rlcg_access_gc_9_4_3,
1679 					ARRAY_SIZE(rlcg_access_gc_9_4_3));
1680 }
1681 
gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device * adev,bool enable,int xcc_id)1682 static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev,
1683 					     bool enable, int xcc_id)
1684 {
1685 	if (enable) {
1686 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0);
1687 	} else {
1688 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL,
1689 			(CP_MEC_CNTL__MEC_INVALIDATE_ICACHE_MASK |
1690 			 CP_MEC_CNTL__MEC_ME1_PIPE0_RESET_MASK |
1691 			 CP_MEC_CNTL__MEC_ME1_PIPE1_RESET_MASK |
1692 			 CP_MEC_CNTL__MEC_ME1_PIPE2_RESET_MASK |
1693 			 CP_MEC_CNTL__MEC_ME1_PIPE3_RESET_MASK |
1694 			 CP_MEC_CNTL__MEC_ME2_PIPE0_RESET_MASK |
1695 			 CP_MEC_CNTL__MEC_ME2_PIPE1_RESET_MASK |
1696 			 CP_MEC_CNTL__MEC_ME1_HALT_MASK |
1697 			 CP_MEC_CNTL__MEC_ME2_HALT_MASK));
1698 		adev->gfx.kiq[xcc_id].ring.sched.ready = false;
1699 	}
1700 	udelay(50);
1701 }
1702 
gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device * adev,int xcc_id)1703 static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev,
1704 						    int xcc_id)
1705 {
1706 	const struct gfx_firmware_header_v1_0 *mec_hdr;
1707 	const __le32 *fw_data;
1708 	unsigned i;
1709 	u32 tmp;
1710 	u32 mec_ucode_addr_offset;
1711 	u32 mec_ucode_data_offset;
1712 
1713 	if (!adev->gfx.mec_fw)
1714 		return -EINVAL;
1715 
1716 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
1717 
1718 	mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1719 	amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
1720 
1721 	fw_data = (const __le32 *)
1722 		(adev->gfx.mec_fw->data +
1723 		 le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
1724 	tmp = 0;
1725 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
1726 	tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
1727 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp);
1728 
1729 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO,
1730 		adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000);
1731 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI,
1732 		upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr));
1733 
1734 	mec_ucode_addr_offset =
1735 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR);
1736 	mec_ucode_data_offset =
1737 		SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA);
1738 
1739 	/* MEC1 */
1740 	WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset);
1741 	for (i = 0; i < mec_hdr->jt_size; i++)
1742 		WREG32(mec_ucode_data_offset,
1743 		       le32_to_cpup(fw_data + mec_hdr->jt_offset + i));
1744 
1745 	WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version);
1746 	/* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
1747 
1748 	return 0;
1749 }
1750 
1751 /* KIQ functions */
gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring * ring,int xcc_id)1752 static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id)
1753 {
1754 	uint32_t tmp;
1755 	struct amdgpu_device *adev = ring->adev;
1756 
1757 	/* tell RLC which is KIQ queue */
1758 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS);
1759 	tmp &= 0xffffff00;
1760 	tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
1761 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp | 0x80);
1762 }
1763 
gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring * ring,struct v9_mqd * mqd)1764 static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
1765 {
1766 	struct amdgpu_device *adev = ring->adev;
1767 
1768 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
1769 		if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) {
1770 			mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
1771 			mqd->cp_hqd_queue_priority =
1772 				AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
1773 		}
1774 	}
1775 }
1776 
gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring * ring,int xcc_id)1777 static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id)
1778 {
1779 	struct amdgpu_device *adev = ring->adev;
1780 	struct v9_mqd *mqd = ring->mqd_ptr;
1781 	uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr;
1782 	uint32_t tmp;
1783 
1784 	mqd->header = 0xC0310800;
1785 	mqd->compute_pipelinestat_enable = 0x00000001;
1786 	mqd->compute_static_thread_mgmt_se0 = 0xffffffff;
1787 	mqd->compute_static_thread_mgmt_se1 = 0xffffffff;
1788 	mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
1789 	mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
1790 	mqd->compute_misc_reserved = 0x00000003;
1791 
1792 	mqd->dynamic_cu_mask_addr_lo =
1793 		lower_32_bits(ring->mqd_gpu_addr
1794 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1795 	mqd->dynamic_cu_mask_addr_hi =
1796 		upper_32_bits(ring->mqd_gpu_addr
1797 			      + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
1798 
1799 	eop_base_addr = ring->eop_gpu_addr >> 8;
1800 	mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
1801 	mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
1802 
1803 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1804 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL);
1805 	tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE,
1806 			(order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1));
1807 
1808 	mqd->cp_hqd_eop_control = tmp;
1809 
1810 	/* enable doorbell? */
1811 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL);
1812 
1813 	if (ring->use_doorbell) {
1814 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1815 				    DOORBELL_OFFSET, ring->doorbell_index);
1816 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1817 				    DOORBELL_EN, 1);
1818 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1819 				    DOORBELL_SOURCE, 0);
1820 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1821 				    DOORBELL_HIT, 0);
1822 		if (amdgpu_sriov_multi_vf_mode(adev))
1823 			tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1824 					    DOORBELL_MODE, 1);
1825 	} else {
1826 		tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
1827 					 DOORBELL_EN, 0);
1828 	}
1829 
1830 	mqd->cp_hqd_pq_doorbell_control = tmp;
1831 
1832 	/* disable the queue if it's active */
1833 	ring->wptr = 0;
1834 	mqd->cp_hqd_dequeue_request = 0;
1835 	mqd->cp_hqd_pq_rptr = 0;
1836 	mqd->cp_hqd_pq_wptr_lo = 0;
1837 	mqd->cp_hqd_pq_wptr_hi = 0;
1838 
1839 	/* set the pointer to the MQD */
1840 	mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc;
1841 	mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr);
1842 
1843 	/* set MQD vmid to 0 */
1844 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL);
1845 	tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0);
1846 	mqd->cp_mqd_control = tmp;
1847 
1848 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1849 	hqd_gpu_addr = ring->gpu_addr >> 8;
1850 	mqd->cp_hqd_pq_base_lo = hqd_gpu_addr;
1851 	mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr);
1852 
1853 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1854 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL);
1855 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE,
1856 			    (order_base_2(ring->ring_size / 4) - 1));
1857 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
1858 			((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8));
1859 #ifdef __BIG_ENDIAN
1860 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
1861 #endif
1862 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
1863 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0);
1864 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
1865 	tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1);
1866 	mqd->cp_hqd_pq_control = tmp;
1867 
1868 	/* set the wb address whether it's enabled or not */
1869 	wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
1870 	mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc;
1871 	mqd->cp_hqd_pq_rptr_report_addr_hi =
1872 		upper_32_bits(wb_gpu_addr) & 0xffff;
1873 
1874 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1875 	wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
1876 	mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc;
1877 	mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff;
1878 
1879 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
1880 	ring->wptr = 0;
1881 	mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR);
1882 
1883 	/* set the vmid for the queue */
1884 	mqd->cp_hqd_vmid = 0;
1885 
1886 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE);
1887 	tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53);
1888 	mqd->cp_hqd_persistent_state = tmp;
1889 
1890 	/* set MIN_IB_AVAIL_SIZE */
1891 	tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL);
1892 	tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
1893 	mqd->cp_hqd_ib_control = tmp;
1894 
1895 	/* set static priority for a queue/ring */
1896 	gfx_v9_4_3_mqd_set_priority(ring, mqd);
1897 	mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM);
1898 
1899 	/* map_queues packet doesn't need activate the queue,
1900 	 * so only kiq need set this field.
1901 	 */
1902 	if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
1903 		mqd->cp_hqd_active = 1;
1904 
1905 	return 0;
1906 }
1907 
gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring * ring,int xcc_id)1908 static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring,
1909 					    int xcc_id)
1910 {
1911 	struct amdgpu_device *adev = ring->adev;
1912 	struct v9_mqd *mqd = ring->mqd_ptr;
1913 	int j;
1914 
1915 	/* disable wptr polling */
1916 	WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
1917 
1918 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR,
1919 	       mqd->cp_hqd_eop_base_addr_lo);
1920 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI,
1921 	       mqd->cp_hqd_eop_base_addr_hi);
1922 
1923 	/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
1924 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL,
1925 	       mqd->cp_hqd_eop_control);
1926 
1927 	/* enable doorbell? */
1928 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1929 	       mqd->cp_hqd_pq_doorbell_control);
1930 
1931 	/* disable the queue if it's active */
1932 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
1933 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
1934 		for (j = 0; j < adev->usec_timeout; j++) {
1935 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
1936 				break;
1937 			udelay(1);
1938 		}
1939 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
1940 		       mqd->cp_hqd_dequeue_request);
1941 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR,
1942 		       mqd->cp_hqd_pq_rptr);
1943 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
1944 		       mqd->cp_hqd_pq_wptr_lo);
1945 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
1946 		       mqd->cp_hqd_pq_wptr_hi);
1947 	}
1948 
1949 	/* set the pointer to the MQD */
1950 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR,
1951 	       mqd->cp_mqd_base_addr_lo);
1952 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI,
1953 	       mqd->cp_mqd_base_addr_hi);
1954 
1955 	/* set MQD vmid to 0 */
1956 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL,
1957 	       mqd->cp_mqd_control);
1958 
1959 	/* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
1960 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE,
1961 	       mqd->cp_hqd_pq_base_lo);
1962 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI,
1963 	       mqd->cp_hqd_pq_base_hi);
1964 
1965 	/* set up the HQD, this is similar to CP_RB0_CNTL */
1966 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL,
1967 	       mqd->cp_hqd_pq_control);
1968 
1969 	/* set the wb address whether it's enabled or not */
1970 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR,
1971 				mqd->cp_hqd_pq_rptr_report_addr_lo);
1972 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
1973 				mqd->cp_hqd_pq_rptr_report_addr_hi);
1974 
1975 	/* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
1976 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR,
1977 	       mqd->cp_hqd_pq_wptr_poll_addr_lo);
1978 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
1979 	       mqd->cp_hqd_pq_wptr_poll_addr_hi);
1980 
1981 	/* enable the doorbell if requested */
1982 	if (ring->use_doorbell) {
1983 		WREG32_SOC15(
1984 			GC, GET_INST(GC, xcc_id),
1985 			regCP_MEC_DOORBELL_RANGE_LOWER,
1986 			((adev->doorbell_index.kiq +
1987 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1988 			 2) << 2);
1989 		WREG32_SOC15(
1990 			GC, GET_INST(GC, xcc_id),
1991 			regCP_MEC_DOORBELL_RANGE_UPPER,
1992 			((adev->doorbell_index.userqueue_end +
1993 			  xcc_id * adev->doorbell_index.xcc_doorbell_range) *
1994 			 2) << 2);
1995 	}
1996 
1997 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL,
1998 	       mqd->cp_hqd_pq_doorbell_control);
1999 
2000 	/* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2001 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO,
2002 	       mqd->cp_hqd_pq_wptr_lo);
2003 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI,
2004 	       mqd->cp_hqd_pq_wptr_hi);
2005 
2006 	/* set the vmid for the queue */
2007 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid);
2008 
2009 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE,
2010 	       mqd->cp_hqd_persistent_state);
2011 
2012 	/* activate the queue */
2013 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE,
2014 	       mqd->cp_hqd_active);
2015 
2016 	if (ring->use_doorbell)
2017 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1);
2018 
2019 	return 0;
2020 }
2021 
gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring * ring,int xcc_id)2022 static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
2023 					    int xcc_id)
2024 {
2025 	struct amdgpu_device *adev = ring->adev;
2026 	int j;
2027 
2028 	/* disable the queue if it's active */
2029 	if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) {
2030 
2031 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1);
2032 
2033 		for (j = 0; j < adev->usec_timeout; j++) {
2034 			if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
2035 				break;
2036 			udelay(1);
2037 		}
2038 
2039 		if (j == AMDGPU_MAX_USEC_TIMEOUT) {
2040 			DRM_DEBUG("%s dequeue request failed.\n", ring->name);
2041 
2042 			/* Manual disable if dequeue request times out */
2043 			WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0);
2044 		}
2045 
2046 		WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST,
2047 		      0);
2048 	}
2049 
2050 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
2051 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
2052 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
2053 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
2054 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
2055 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
2056 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0);
2057 	WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0);
2058 
2059 	return 0;
2060 }
2061 
gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring * ring,int xcc_id)2062 static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
2063 {
2064 	struct amdgpu_device *adev = ring->adev;
2065 	struct v9_mqd *mqd = ring->mqd_ptr;
2066 	struct v9_mqd *tmp_mqd;
2067 
2068 	gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id);
2069 
2070 	/* GPU could be in bad state during probe, driver trigger the reset
2071 	 * after load the SMU, in this case , the mqd is not be initialized.
2072 	 * driver need to re-init the mqd.
2073 	 * check mqd->cp_hqd_pq_control since this value should not be 0
2074 	 */
2075 	tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup;
2076 	if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) {
2077 		/* for GPU_RESET case , reset MQD to a clean status */
2078 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2079 			memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation));
2080 
2081 		/* reset ring buffer */
2082 		ring->wptr = 0;
2083 		amdgpu_ring_clear_ring(ring);
2084 		mutex_lock(&adev->srbm_mutex);
2085 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2086 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2087 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2088 		mutex_unlock(&adev->srbm_mutex);
2089 	} else {
2090 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2091 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2092 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2093 		mutex_lock(&adev->srbm_mutex);
2094 		if (amdgpu_sriov_vf(adev) && adev->in_suspend)
2095 			amdgpu_ring_clear_ring(ring);
2096 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2097 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2098 		gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id);
2099 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2100 		mutex_unlock(&adev->srbm_mutex);
2101 
2102 		if (adev->gfx.kiq[xcc_id].mqd_backup)
2103 			memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation));
2104 	}
2105 
2106 	return 0;
2107 }
2108 
gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring * ring,int xcc_id,bool restore)2109 static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
2110 {
2111 	struct amdgpu_device *adev = ring->adev;
2112 	struct v9_mqd *mqd = ring->mqd_ptr;
2113 	int mqd_idx = ring - &adev->gfx.compute_ring[0];
2114 	struct v9_mqd *tmp_mqd;
2115 
2116 	/* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
2117 	 * is not be initialized before
2118 	 */
2119 	tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
2120 
2121 	if (!restore && (!tmp_mqd->cp_hqd_pq_control ||
2122 	    (!amdgpu_in_reset(adev) && !adev->in_suspend))) {
2123 		memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
2124 		((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
2125 		((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
2126 		mutex_lock(&adev->srbm_mutex);
2127 		soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, xcc_id));
2128 		gfx_v9_4_3_xcc_mqd_init(ring, xcc_id);
2129 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2130 		mutex_unlock(&adev->srbm_mutex);
2131 
2132 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2133 			memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
2134 	} else {
2135 		/* restore MQD to a clean status */
2136 		if (adev->gfx.mec.mqd_backup[mqd_idx])
2137 			memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
2138 		/* reset ring buffer */
2139 		ring->wptr = 0;
2140 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
2141 		amdgpu_ring_clear_ring(ring);
2142 	}
2143 
2144 	return 0;
2145 }
2146 
gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device * adev,int xcc_id)2147 static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
2148 {
2149 	struct amdgpu_ring *ring;
2150 	int j;
2151 
2152 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2153 		ring = &adev->gfx.compute_ring[j +  xcc_id * adev->gfx.num_compute_rings];
2154 		if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2155 			mutex_lock(&adev->srbm_mutex);
2156 			soc15_grbm_select(adev, ring->me,
2157 					ring->pipe,
2158 					ring->queue, 0, GET_INST(GC, xcc_id));
2159 			gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id);
2160 			soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2161 			mutex_unlock(&adev->srbm_mutex);
2162 		}
2163 	}
2164 
2165 	return 0;
2166 }
2167 
gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device * adev,int xcc_id)2168 static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
2169 {
2170 	gfx_v9_4_3_xcc_kiq_init_queue(&adev->gfx.kiq[xcc_id].ring, xcc_id);
2171 	return 0;
2172 }
2173 
gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device * adev,int xcc_id)2174 static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
2175 {
2176 	struct amdgpu_ring *ring;
2177 	int i, r;
2178 
2179 	gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
2180 
2181 	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
2182 		ring = &adev->gfx.compute_ring[i + xcc_id *
2183 			adev->gfx.num_compute_rings];
2184 
2185 		r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
2186 		if (r)
2187 			return r;
2188 	}
2189 
2190 	return amdgpu_gfx_enable_kcq(adev, xcc_id);
2191 }
2192 
gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device * adev,int xcc_id)2193 static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
2194 {
2195 	struct amdgpu_ring *ring;
2196 	int r, j;
2197 
2198 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
2199 
2200 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2201 		gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id);
2202 
2203 		r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id);
2204 		if (r)
2205 			return r;
2206 	} else {
2207 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2208 	}
2209 
2210 	r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id);
2211 	if (r)
2212 		return r;
2213 
2214 	r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id);
2215 	if (r)
2216 		return r;
2217 
2218 	for (j = 0; j < adev->gfx.num_compute_rings; j++) {
2219 		ring = &adev->gfx.compute_ring
2220 				[j + xcc_id * adev->gfx.num_compute_rings];
2221 		r = amdgpu_ring_test_helper(ring);
2222 		if (r)
2223 			return r;
2224 	}
2225 
2226 	gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, true, xcc_id);
2227 
2228 	return 0;
2229 }
2230 
gfx_v9_4_3_cp_resume(struct amdgpu_device * adev)2231 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
2232 {
2233 	int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
2234 
2235 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2236 	if (amdgpu_sriov_vf(adev)) {
2237 		enum amdgpu_gfx_partition mode;
2238 
2239 		mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2240 						       AMDGPU_XCP_FL_NONE);
2241 		if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2242 			return -EINVAL;
2243 		num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
2244 		adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
2245 		num_xcp = num_xcc / num_xcc_per_xcp;
2246 		r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
2247 
2248 	} else {
2249 		if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
2250 						    AMDGPU_XCP_FL_NONE) ==
2251 		    AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
2252 			r = amdgpu_xcp_switch_partition_mode(
2253 				adev->xcp_mgr, amdgpu_user_partt_mode);
2254 	}
2255 	if (r)
2256 		return r;
2257 
2258 	for (i = 0; i < num_xcc; i++) {
2259 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
2260 		if (r)
2261 			return r;
2262 	}
2263 
2264 	return 0;
2265 }
2266 
gfx_v9_4_3_xcc_fini(struct amdgpu_device * adev,int xcc_id)2267 static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id)
2268 {
2269 	if (amdgpu_gfx_disable_kcq(adev, xcc_id))
2270 		DRM_ERROR("XCD %d KCQ disable failed\n", xcc_id);
2271 
2272 	if (amdgpu_sriov_vf(adev)) {
2273 		/* must disable polling for SRIOV when hw finished, otherwise
2274 		 * CPC engine may still keep fetching WB address which is already
2275 		 * invalid after sw finished and trigger DMAR reading error in
2276 		 * hypervisor side.
2277 		 */
2278 		WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0);
2279 		return;
2280 	}
2281 
2282 	/* Use deinitialize sequence from CAIL when unbinding device
2283 	 * from driver, otherwise KIQ is hanging when binding back
2284 	 */
2285 	if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
2286 		mutex_lock(&adev->srbm_mutex);
2287 		soc15_grbm_select(adev, adev->gfx.kiq[xcc_id].ring.me,
2288 				  adev->gfx.kiq[xcc_id].ring.pipe,
2289 				  adev->gfx.kiq[xcc_id].ring.queue, 0,
2290 				  GET_INST(GC, xcc_id));
2291 		gfx_v9_4_3_xcc_q_fini_register(&adev->gfx.kiq[xcc_id].ring,
2292 						 xcc_id);
2293 		soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
2294 		mutex_unlock(&adev->srbm_mutex);
2295 	}
2296 
2297 	gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id);
2298 	gfx_v9_4_3_xcc_cp_compute_enable(adev, false, xcc_id);
2299 }
2300 
gfx_v9_4_3_hw_init(struct amdgpu_ip_block * ip_block)2301 static int gfx_v9_4_3_hw_init(struct amdgpu_ip_block *ip_block)
2302 {
2303 	int r;
2304 	struct amdgpu_device *adev = ip_block->adev;
2305 
2306 	amdgpu_gfx_cleaner_shader_init(adev, adev->gfx.cleaner_shader_size,
2307 				       adev->gfx.cleaner_shader_ptr);
2308 
2309 	if (!amdgpu_sriov_vf(adev))
2310 		gfx_v9_4_3_init_golden_registers(adev);
2311 
2312 	gfx_v9_4_3_constants_init(adev);
2313 
2314 	r = adev->gfx.rlc.funcs->resume(adev);
2315 	if (r)
2316 		return r;
2317 
2318 	r = gfx_v9_4_3_cp_resume(adev);
2319 	if (r)
2320 		return r;
2321 
2322 	return r;
2323 }
2324 
gfx_v9_4_3_hw_fini(struct amdgpu_ip_block * ip_block)2325 static int gfx_v9_4_3_hw_fini(struct amdgpu_ip_block *ip_block)
2326 {
2327 	struct amdgpu_device *adev = ip_block->adev;
2328 	int i, num_xcc;
2329 
2330 	amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
2331 	amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
2332 	amdgpu_irq_put(adev, &adev->gfx.bad_op_irq, 0);
2333 
2334 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2335 	for (i = 0; i < num_xcc; i++) {
2336 		gfx_v9_4_3_xcc_fini(adev, i);
2337 	}
2338 
2339 	return 0;
2340 }
2341 
gfx_v9_4_3_suspend(struct amdgpu_ip_block * ip_block)2342 static int gfx_v9_4_3_suspend(struct amdgpu_ip_block *ip_block)
2343 {
2344 	return gfx_v9_4_3_hw_fini(ip_block);
2345 }
2346 
gfx_v9_4_3_resume(struct amdgpu_ip_block * ip_block)2347 static int gfx_v9_4_3_resume(struct amdgpu_ip_block *ip_block)
2348 {
2349 	return gfx_v9_4_3_hw_init(ip_block);
2350 }
2351 
gfx_v9_4_3_is_idle(struct amdgpu_ip_block * ip_block)2352 static bool gfx_v9_4_3_is_idle(struct amdgpu_ip_block *ip_block)
2353 {
2354 	struct amdgpu_device *adev = ip_block->adev;
2355 	int i, num_xcc;
2356 
2357 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2358 	for (i = 0; i < num_xcc; i++) {
2359 		if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS),
2360 					GRBM_STATUS, GUI_ACTIVE))
2361 			return false;
2362 	}
2363 	return true;
2364 }
2365 
gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block * ip_block)2366 static int gfx_v9_4_3_wait_for_idle(struct amdgpu_ip_block *ip_block)
2367 {
2368 	unsigned i;
2369 	struct amdgpu_device *adev = ip_block->adev;
2370 
2371 	for (i = 0; i < adev->usec_timeout; i++) {
2372 		if (gfx_v9_4_3_is_idle(ip_block))
2373 			return 0;
2374 		udelay(1);
2375 	}
2376 	return -ETIMEDOUT;
2377 }
2378 
gfx_v9_4_3_soft_reset(struct amdgpu_ip_block * ip_block)2379 static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
2380 {
2381 	u32 grbm_soft_reset = 0;
2382 	u32 tmp;
2383 	struct amdgpu_device *adev = ip_block->adev;
2384 
2385 	/* GRBM_STATUS */
2386 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS);
2387 	if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK |
2388 		   GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK |
2389 		   GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK |
2390 		   GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK |
2391 		   GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK |
2392 		   GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) {
2393 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2394 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2395 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2396 						GRBM_SOFT_RESET, SOFT_RESET_GFX, 1);
2397 	}
2398 
2399 	if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) {
2400 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2401 						GRBM_SOFT_RESET, SOFT_RESET_CP, 1);
2402 	}
2403 
2404 	/* GRBM_STATUS2 */
2405 	tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2);
2406 	if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY))
2407 		grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
2408 						GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
2409 
2410 
2411 	if (grbm_soft_reset) {
2412 		/* stop the rlc */
2413 		adev->gfx.rlc.funcs->stop(adev);
2414 
2415 		/* Disable MEC parsing/prefetching */
2416 		gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
2417 
2418 		if (grbm_soft_reset) {
2419 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2420 			tmp |= grbm_soft_reset;
2421 			dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2422 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2423 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2424 
2425 			udelay(50);
2426 
2427 			tmp &= ~grbm_soft_reset;
2428 			WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
2429 			tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
2430 		}
2431 
2432 		/* Wait a little for things to settle down */
2433 		udelay(50);
2434 	}
2435 	return 0;
2436 }
2437 
gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring * ring,uint32_t vmid,uint32_t gds_base,uint32_t gds_size,uint32_t gws_base,uint32_t gws_size,uint32_t oa_base,uint32_t oa_size)2438 static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring,
2439 					  uint32_t vmid,
2440 					  uint32_t gds_base, uint32_t gds_size,
2441 					  uint32_t gws_base, uint32_t gws_size,
2442 					  uint32_t oa_base, uint32_t oa_size)
2443 {
2444 	struct amdgpu_device *adev = ring->adev;
2445 
2446 	/* GDS Base */
2447 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2448 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid,
2449 				   gds_base);
2450 
2451 	/* GDS Size */
2452 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2453 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid,
2454 				   gds_size);
2455 
2456 	/* GWS */
2457 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2458 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid,
2459 				   gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base);
2460 
2461 	/* OA */
2462 	gfx_v9_4_3_write_data_to_reg(ring, 0, false,
2463 				   SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid,
2464 				   (1 << (oa_size + oa_base)) - (1 << oa_base));
2465 }
2466 
gfx_v9_4_3_early_init(struct amdgpu_ip_block * ip_block)2467 static int gfx_v9_4_3_early_init(struct amdgpu_ip_block *ip_block)
2468 {
2469 	struct amdgpu_device *adev = ip_block->adev;
2470 
2471 	adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
2472 					  AMDGPU_MAX_COMPUTE_RINGS);
2473 	gfx_v9_4_3_set_kiq_pm4_funcs(adev);
2474 	gfx_v9_4_3_set_ring_funcs(adev);
2475 	gfx_v9_4_3_set_irq_funcs(adev);
2476 	gfx_v9_4_3_set_gds_init(adev);
2477 	gfx_v9_4_3_set_rlc_funcs(adev);
2478 
2479 	/* init rlcg reg access ctrl */
2480 	gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev);
2481 
2482 	return gfx_v9_4_3_init_microcode(adev);
2483 }
2484 
gfx_v9_4_3_late_init(struct amdgpu_ip_block * ip_block)2485 static int gfx_v9_4_3_late_init(struct amdgpu_ip_block *ip_block)
2486 {
2487 	struct amdgpu_device *adev = ip_block->adev;
2488 	int r;
2489 
2490 	r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
2491 	if (r)
2492 		return r;
2493 
2494 	r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
2495 	if (r)
2496 		return r;
2497 
2498 	r = amdgpu_irq_get(adev, &adev->gfx.bad_op_irq, 0);
2499 	if (r)
2500 		return r;
2501 
2502 	if (adev->gfx.ras &&
2503 	    adev->gfx.ras->enable_watchdog_timer)
2504 		adev->gfx.ras->enable_watchdog_timer(adev);
2505 
2506 	return 0;
2507 }
2508 
gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2509 static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
2510 					    bool enable, int xcc_id)
2511 {
2512 	uint32_t def, data;
2513 
2514 	if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG))
2515 		return;
2516 
2517 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2518 				  regRLC_CGTT_MGCG_OVERRIDE);
2519 
2520 	if (enable)
2521 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2522 	else
2523 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK;
2524 
2525 	if (def != data)
2526 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2527 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2528 
2529 }
2530 
gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device * adev,bool enable,int xcc_id)2531 static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
2532 						bool enable, int xcc_id)
2533 {
2534 	uint32_t def, data;
2535 
2536 	if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG))
2537 		return;
2538 
2539 	def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id),
2540 				  regRLC_CGTT_MGCG_OVERRIDE);
2541 
2542 	if (enable)
2543 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2544 	else
2545 		data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK;
2546 
2547 	if (def != data)
2548 		WREG32_SOC15(GC, GET_INST(GC, xcc_id),
2549 			     regRLC_CGTT_MGCG_OVERRIDE, data);
2550 }
2551 
2552 static void
gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2553 gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev,
2554 						bool enable, int xcc_id)
2555 {
2556 	uint32_t data, def;
2557 
2558 	/* It is disabled by HW by default */
2559 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
2560 		/* 1 - RLC_CGTT_MGCG_OVERRIDE */
2561 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2562 
2563 		data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2564 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2565 			  RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2566 			  RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2567 
2568 		if (def != data)
2569 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2570 
2571 		/* MGLS is a global flag to control all MGLS in GFX */
2572 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
2573 			/* 2 - RLC memory Light sleep */
2574 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
2575 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2576 				data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2577 				if (def != data)
2578 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2579 			}
2580 			/* 3 - CP memory Light sleep */
2581 			if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
2582 				def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2583 				data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2584 				if (def != data)
2585 					WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2586 			}
2587 		}
2588 	} else {
2589 		/* 1 - MGCG_OVERRIDE */
2590 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2591 
2592 		data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
2593 			 RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
2594 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
2595 			 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
2596 
2597 		if (def != data)
2598 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2599 
2600 		/* 2 - disable MGLS in RLC */
2601 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL);
2602 		if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
2603 			data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
2604 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data);
2605 		}
2606 
2607 		/* 3 - disable MGLS in CP */
2608 		data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL);
2609 		if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
2610 			data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
2611 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data);
2612 		}
2613 	}
2614 
2615 }
2616 
2617 static void
gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2618 gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
2619 						bool enable, int xcc_id)
2620 {
2621 	uint32_t def, data;
2622 
2623 	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
2624 
2625 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE);
2626 		/* unset CGCG override */
2627 		data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK;
2628 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2629 			data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2630 		else
2631 			data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK;
2632 		/* update CGCG and CGLS override bits */
2633 		if (def != data)
2634 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data);
2635 
2636 		/* CGCG Hysteresis: 400us */
2637 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2638 
2639 		data = (0x2710
2640 			<< RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
2641 		       RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
2642 		if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS)
2643 			data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
2644 				RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK;
2645 		if (def != data)
2646 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2647 
2648 		/* set IDLE_POLL_COUNT(0x33450100)*/
2649 		def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL);
2650 		data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) |
2651 			(0x3345 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
2652 		if (def != data)
2653 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data);
2654 	} else {
2655 		def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL);
2656 		/* reset CGCG/CGLS bits */
2657 		data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
2658 		/* disable cgcg and cgls in FSM */
2659 		if (def != data)
2660 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data);
2661 	}
2662 
2663 }
2664 
gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device * adev,bool enable,int xcc_id)2665 static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev,
2666 						  bool enable, int xcc_id)
2667 {
2668 	amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id);
2669 
2670 	if (enable) {
2671 		/* FGCG */
2672 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2673 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2674 
2675 		/* CGCG/CGLS should be enabled after MGCG/MGLS
2676 		 * ===  MGCG + MGLS ===
2677 		 */
2678 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2679 								xcc_id);
2680 		/* ===  CGCG + CGLS === */
2681 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2682 								xcc_id);
2683 	} else {
2684 		/* CGCG/CGLS should be disabled before MGCG/MGLS
2685 		 * ===  CGCG + CGLS ===
2686 		 */
2687 		gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable,
2688 								xcc_id);
2689 		/* ===  MGCG + MGLS === */
2690 		gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable,
2691 								xcc_id);
2692 
2693 		/* FGCG */
2694 		gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id);
2695 		gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id);
2696 	}
2697 
2698 	amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id);
2699 
2700 	return 0;
2701 }
2702 
2703 static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = {
2704 	.is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled,
2705 	.set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode,
2706 	.unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode,
2707 	.init = gfx_v9_4_3_rlc_init,
2708 	.resume = gfx_v9_4_3_rlc_resume,
2709 	.stop = gfx_v9_4_3_rlc_stop,
2710 	.reset = gfx_v9_4_3_rlc_reset,
2711 	.start = gfx_v9_4_3_rlc_start,
2712 	.update_spm_vmid = gfx_v9_4_3_update_spm_vmid,
2713 	.is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range,
2714 };
2715 
gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block * ip_block,enum amd_powergating_state state)2716 static int gfx_v9_4_3_set_powergating_state(struct amdgpu_ip_block *ip_block,
2717 					  enum amd_powergating_state state)
2718 {
2719 	return 0;
2720 }
2721 
gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block * ip_block,enum amd_clockgating_state state)2722 static int gfx_v9_4_3_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2723 					  enum amd_clockgating_state state)
2724 {
2725 	struct amdgpu_device *adev = ip_block->adev;
2726 	int i, num_xcc;
2727 
2728 	if (amdgpu_sriov_vf(adev))
2729 		return 0;
2730 
2731 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
2732 	for (i = 0; i < num_xcc; i++)
2733 		gfx_v9_4_3_xcc_update_gfx_clock_gating(
2734 			adev, state == AMD_CG_STATE_GATE, i);
2735 
2736 	return 0;
2737 }
2738 
gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block * ip_block,u64 * flags)2739 static void gfx_v9_4_3_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
2740 {
2741 	struct amdgpu_device *adev = ip_block->adev;
2742 	int data;
2743 
2744 	if (amdgpu_sriov_vf(adev))
2745 		*flags = 0;
2746 
2747 	/* AMD_CG_SUPPORT_GFX_MGCG */
2748 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE));
2749 	if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK))
2750 		*flags |= AMD_CG_SUPPORT_GFX_MGCG;
2751 
2752 	/* AMD_CG_SUPPORT_GFX_CGCG */
2753 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL));
2754 	if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK)
2755 		*flags |= AMD_CG_SUPPORT_GFX_CGCG;
2756 
2757 	/* AMD_CG_SUPPORT_GFX_CGLS */
2758 	if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK)
2759 		*flags |= AMD_CG_SUPPORT_GFX_CGLS;
2760 
2761 	/* AMD_CG_SUPPORT_GFX_RLC_LS */
2762 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL));
2763 	if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK)
2764 		*flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS;
2765 
2766 	/* AMD_CG_SUPPORT_GFX_CP_LS */
2767 	data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL));
2768 	if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
2769 		*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
2770 }
2771 
gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring * ring)2772 static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring)
2773 {
2774 	struct amdgpu_device *adev = ring->adev;
2775 	u32 ref_and_mask, reg_mem_engine;
2776 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
2777 
2778 	if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
2779 		switch (ring->me) {
2780 		case 1:
2781 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe;
2782 			break;
2783 		case 2:
2784 			ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe;
2785 			break;
2786 		default:
2787 			return;
2788 		}
2789 		reg_mem_engine = 0;
2790 	} else {
2791 		ref_and_mask = nbio_hf_reg->ref_and_mask_cp0;
2792 		reg_mem_engine = 1; /* pfp */
2793 	}
2794 
2795 	gfx_v9_4_3_wait_reg_mem(ring, reg_mem_engine, 0, 1,
2796 			      adev->nbio.funcs->get_hdp_flush_req_offset(adev),
2797 			      adev->nbio.funcs->get_hdp_flush_done_offset(adev),
2798 			      ref_and_mask, ref_and_mask, 0x20);
2799 }
2800 
gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring * ring,struct amdgpu_job * job,struct amdgpu_ib * ib,uint32_t flags)2801 static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring,
2802 					  struct amdgpu_job *job,
2803 					  struct amdgpu_ib *ib,
2804 					  uint32_t flags)
2805 {
2806 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
2807 	u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
2808 
2809 	/* Currently, there is a high possibility to get wave ID mismatch
2810 	 * between ME and GDS, leading to a hw deadlock, because ME generates
2811 	 * different wave IDs than the GDS expects. This situation happens
2812 	 * randomly when at least 5 compute pipes use GDS ordered append.
2813 	 * The wave IDs generated by ME are also wrong after suspend/resume.
2814 	 * Those are probably bugs somewhere else in the kernel driver.
2815 	 *
2816 	 * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and
2817 	 * GDS to 0 for this ring (me/pipe).
2818 	 */
2819 	if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) {
2820 		amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2821 		amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID);
2822 		amdgpu_ring_write(ring, ring->adev->gds.gds_compute_max_wave_id);
2823 	}
2824 
2825 	amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2826 	BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
2827 	amdgpu_ring_write(ring,
2828 #ifdef __BIG_ENDIAN
2829 				(2 << 0) |
2830 #endif
2831 				lower_32_bits(ib->gpu_addr));
2832 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
2833 	amdgpu_ring_write(ring, control);
2834 }
2835 
gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned flags)2836 static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
2837 				     u64 seq, unsigned flags)
2838 {
2839 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
2840 	bool int_sel = flags & AMDGPU_FENCE_FLAG_INT;
2841 	bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY;
2842 
2843 	/* RELEASE_MEM - flush caches, send int */
2844 	amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6));
2845 	amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN |
2846 					       EOP_TC_NC_ACTION_EN) :
2847 					      (EOP_TCL1_ACTION_EN |
2848 					       EOP_TC_ACTION_EN |
2849 					       EOP_TC_WB_ACTION_EN |
2850 					       EOP_TC_MD_ACTION_EN)) |
2851 				 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
2852 				 EVENT_INDEX(5)));
2853 	amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
2854 
2855 	/*
2856 	 * the address should be Qword aligned if 64bit write, Dword
2857 	 * aligned if only send 32bit data low (discard data high)
2858 	 */
2859 	if (write64bit)
2860 		BUG_ON(addr & 0x7);
2861 	else
2862 		BUG_ON(addr & 0x3);
2863 	amdgpu_ring_write(ring, lower_32_bits(addr));
2864 	amdgpu_ring_write(ring, upper_32_bits(addr));
2865 	amdgpu_ring_write(ring, lower_32_bits(seq));
2866 	amdgpu_ring_write(ring, upper_32_bits(seq));
2867 	amdgpu_ring_write(ring, 0);
2868 }
2869 
gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring * ring)2870 static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
2871 {
2872 	int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
2873 	uint32_t seq = ring->fence_drv.sync_seq;
2874 	uint64_t addr = ring->fence_drv.gpu_addr;
2875 
2876 	gfx_v9_4_3_wait_reg_mem(ring, usepfp, 1, 0,
2877 			      lower_32_bits(addr), upper_32_bits(addr),
2878 			      seq, 0xffffffff, 4);
2879 }
2880 
gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring * ring,unsigned vmid,uint64_t pd_addr)2881 static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring,
2882 					unsigned vmid, uint64_t pd_addr)
2883 {
2884 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
2885 }
2886 
gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring * ring)2887 static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring)
2888 {
2889 	return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */
2890 }
2891 
gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring * ring)2892 static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring)
2893 {
2894 	u64 wptr;
2895 
2896 	/* XXX check if swapping is necessary on BE */
2897 	if (ring->use_doorbell)
2898 		wptr = atomic64_read((atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]);
2899 	else
2900 		BUG();
2901 	return wptr;
2902 }
2903 
gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring * ring)2904 static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring)
2905 {
2906 	struct amdgpu_device *adev = ring->adev;
2907 
2908 	/* XXX check if swapping is necessary on BE */
2909 	if (ring->use_doorbell) {
2910 		atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], ring->wptr);
2911 		WDOORBELL64(ring->doorbell_index, ring->wptr);
2912 	} else {
2913 		BUG(); /* only DOORBELL method supported on gfx9 now */
2914 	}
2915 }
2916 
gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring * ring,u64 addr,u64 seq,unsigned int flags)2917 static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr,
2918 					 u64 seq, unsigned int flags)
2919 {
2920 	struct amdgpu_device *adev = ring->adev;
2921 
2922 	/* we only allocate 32bit for each seq wb address */
2923 	BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
2924 
2925 	/* write fence seq to the "addr" */
2926 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2927 	amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2928 				 WRITE_DATA_DST_SEL(5) | WR_CONFIRM));
2929 	amdgpu_ring_write(ring, lower_32_bits(addr));
2930 	amdgpu_ring_write(ring, upper_32_bits(addr));
2931 	amdgpu_ring_write(ring, lower_32_bits(seq));
2932 
2933 	if (flags & AMDGPU_FENCE_FLAG_INT) {
2934 		/* set register to trigger INT */
2935 		amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2936 		amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2937 					 WRITE_DATA_DST_SEL(0) | WR_CONFIRM));
2938 		amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS));
2939 		amdgpu_ring_write(ring, 0);
2940 		amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */
2941 	}
2942 }
2943 
gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t reg_val_offs)2944 static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
2945 				    uint32_t reg_val_offs)
2946 {
2947 	struct amdgpu_device *adev = ring->adev;
2948 
2949 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
2950 
2951 	amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
2952 	amdgpu_ring_write(ring, 0 |	/* src: register*/
2953 				(5 << 8) |	/* dst: memory */
2954 				(1 << 20));	/* write confirm */
2955 	amdgpu_ring_write(ring, reg);
2956 	amdgpu_ring_write(ring, 0);
2957 	amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
2958 				reg_val_offs * 4));
2959 	amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
2960 				reg_val_offs * 4));
2961 }
2962 
gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring * ring,uint32_t reg,uint32_t val)2963 static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
2964 				    uint32_t val)
2965 {
2966 	uint32_t cmd = 0;
2967 
2968 	reg = gfx_v9_4_3_normalize_xcc_reg_offset(reg);
2969 
2970 	switch (ring->funcs->type) {
2971 	case AMDGPU_RING_TYPE_GFX:
2972 		cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
2973 		break;
2974 	case AMDGPU_RING_TYPE_KIQ:
2975 		cmd = (1 << 16); /* no inc addr */
2976 		break;
2977 	default:
2978 		cmd = WR_CONFIRM;
2979 		break;
2980 	}
2981 	amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2982 	amdgpu_ring_write(ring, cmd);
2983 	amdgpu_ring_write(ring, reg);
2984 	amdgpu_ring_write(ring, 0);
2985 	amdgpu_ring_write(ring, val);
2986 }
2987 
gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring * ring,uint32_t reg,uint32_t val,uint32_t mask)2988 static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
2989 					uint32_t val, uint32_t mask)
2990 {
2991 	gfx_v9_4_3_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
2992 }
2993 
gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring * ring,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)2994 static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
2995 						  uint32_t reg0, uint32_t reg1,
2996 						  uint32_t ref, uint32_t mask)
2997 {
2998 	amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1,
2999 						   ref, mask);
3000 }
3001 
gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring * ring,unsigned vmid)3002 static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
3003 					  unsigned vmid)
3004 {
3005 	struct amdgpu_device *adev = ring->adev;
3006 	uint32_t value = 0;
3007 
3008 	value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
3009 	value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
3010 	value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
3011 	value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
3012 	amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
3013 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regSQ_CMD, value);
3014 	amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
3015 }
3016 
gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(struct amdgpu_device * adev,int me,int pipe,enum amdgpu_interrupt_state state,int xcc_id)3017 static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3018 	struct amdgpu_device *adev, int me, int pipe,
3019 	enum amdgpu_interrupt_state state, int xcc_id)
3020 {
3021 	u32 mec_int_cntl, mec_int_cntl_reg;
3022 
3023 	/*
3024 	 * amdgpu controls only the first MEC. That's why this function only
3025 	 * handles the setting of interrupts for this specific MEC. All other
3026 	 * pipes' interrupts are set by amdkfd.
3027 	 */
3028 
3029 	if (me == 1) {
3030 		switch (pipe) {
3031 		case 0:
3032 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3033 			break;
3034 		case 1:
3035 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3036 			break;
3037 		case 2:
3038 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3039 			break;
3040 		case 3:
3041 			mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3042 			break;
3043 		default:
3044 			DRM_DEBUG("invalid pipe %d\n", pipe);
3045 			return;
3046 		}
3047 	} else {
3048 		DRM_DEBUG("invalid me %d\n", me);
3049 		return;
3050 	}
3051 
3052 	switch (state) {
3053 	case AMDGPU_IRQ_STATE_DISABLE:
3054 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3055 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3056 					     TIME_STAMP_INT_ENABLE, 0);
3057 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3058 		break;
3059 	case AMDGPU_IRQ_STATE_ENABLE:
3060 		mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
3061 		mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3062 					     TIME_STAMP_INT_ENABLE, 1);
3063 		WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
3064 		break;
3065 	default:
3066 		break;
3067 	}
3068 }
3069 
gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device * adev,int xcc_id,int me,int pipe)3070 static u32 gfx_v9_4_3_get_cpc_int_cntl(struct amdgpu_device *adev,
3071 				     int xcc_id, int me, int pipe)
3072 {
3073 	/*
3074 	 * amdgpu controls only the first MEC. That's why this function only
3075 	 * handles the setting of interrupts for this specific MEC. All other
3076 	 * pipes' interrupts are set by amdkfd.
3077 	 */
3078 	if (me != 1)
3079 		return 0;
3080 
3081 	switch (pipe) {
3082 	case 0:
3083 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL);
3084 	case 1:
3085 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL);
3086 	case 2:
3087 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL);
3088 	case 3:
3089 		return SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL);
3090 	default:
3091 		return 0;
3092 	}
3093 }
3094 
gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3095 static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev,
3096 					     struct amdgpu_irq_src *source,
3097 					     unsigned type,
3098 					     enum amdgpu_interrupt_state state)
3099 {
3100 	u32 mec_int_cntl_reg, mec_int_cntl;
3101 	int i, j, k, num_xcc;
3102 
3103 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3104 	switch (state) {
3105 	case AMDGPU_IRQ_STATE_DISABLE:
3106 	case AMDGPU_IRQ_STATE_ENABLE:
3107 		for (i = 0; i < num_xcc; i++) {
3108 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3109 					      PRIV_REG_INT_ENABLE,
3110 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3111 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3112 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3113 					/* MECs start at 1 */
3114 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3115 
3116 					if (mec_int_cntl_reg) {
3117 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3118 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3119 									     PRIV_REG_INT_ENABLE,
3120 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3121 									     1 : 0);
3122 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3123 					}
3124 				}
3125 			}
3126 		}
3127 		break;
3128 	default:
3129 		break;
3130 	}
3131 
3132 	return 0;
3133 }
3134 
gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3135 static int gfx_v9_4_3_set_bad_op_fault_state(struct amdgpu_device *adev,
3136 					     struct amdgpu_irq_src *source,
3137 					     unsigned type,
3138 					     enum amdgpu_interrupt_state state)
3139 {
3140 	u32 mec_int_cntl_reg, mec_int_cntl;
3141 	int i, j, k, num_xcc;
3142 
3143 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3144 	switch (state) {
3145 	case AMDGPU_IRQ_STATE_DISABLE:
3146 	case AMDGPU_IRQ_STATE_ENABLE:
3147 		for (i = 0; i < num_xcc; i++) {
3148 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3149 					      OPCODE_ERROR_INT_ENABLE,
3150 					      state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3151 			for (j = 0; j < adev->gfx.mec.num_mec; j++) {
3152 				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
3153 					/* MECs start at 1 */
3154 					mec_int_cntl_reg = gfx_v9_4_3_get_cpc_int_cntl(adev, i, j + 1, k);
3155 
3156 					if (mec_int_cntl_reg) {
3157 						mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, i);
3158 						mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
3159 									     OPCODE_ERROR_INT_ENABLE,
3160 									     state == AMDGPU_IRQ_STATE_ENABLE ?
3161 									     1 : 0);
3162 						WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, i);
3163 					}
3164 				}
3165 			}
3166 		}
3167 		break;
3168 	default:
3169 		break;
3170 	}
3171 
3172 	return 0;
3173 }
3174 
gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)3175 static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev,
3176 					      struct amdgpu_irq_src *source,
3177 					      unsigned type,
3178 					      enum amdgpu_interrupt_state state)
3179 {
3180 	int i, num_xcc;
3181 
3182 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3183 	switch (state) {
3184 	case AMDGPU_IRQ_STATE_DISABLE:
3185 	case AMDGPU_IRQ_STATE_ENABLE:
3186 		for (i = 0; i < num_xcc; i++)
3187 			WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0,
3188 				PRIV_INSTR_INT_ENABLE,
3189 				state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
3190 		break;
3191 	default:
3192 		break;
3193 	}
3194 
3195 	return 0;
3196 }
3197 
gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)3198 static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev,
3199 					    struct amdgpu_irq_src *src,
3200 					    unsigned type,
3201 					    enum amdgpu_interrupt_state state)
3202 {
3203 	int i, num_xcc;
3204 
3205 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
3206 	for (i = 0; i < num_xcc; i++) {
3207 		switch (type) {
3208 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP:
3209 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3210 				adev, 1, 0, state, i);
3211 			break;
3212 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP:
3213 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3214 				adev, 1, 1, state, i);
3215 			break;
3216 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP:
3217 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3218 				adev, 1, 2, state, i);
3219 			break;
3220 		case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP:
3221 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3222 				adev, 1, 3, state, i);
3223 			break;
3224 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP:
3225 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3226 				adev, 2, 0, state, i);
3227 			break;
3228 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP:
3229 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3230 				adev, 2, 1, state, i);
3231 			break;
3232 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP:
3233 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3234 				adev, 2, 2, state, i);
3235 			break;
3236 		case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP:
3237 			gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
3238 				adev, 2, 3, state, i);
3239 			break;
3240 		default:
3241 			break;
3242 		}
3243 	}
3244 
3245 	return 0;
3246 }
3247 
gfx_v9_4_3_eop_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3248 static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev,
3249 			    struct amdgpu_irq_src *source,
3250 			    struct amdgpu_iv_entry *entry)
3251 {
3252 	int i, xcc_id;
3253 	u8 me_id, pipe_id, queue_id;
3254 	struct amdgpu_ring *ring;
3255 
3256 	DRM_DEBUG("IH: CP EOP\n");
3257 	me_id = (entry->ring_id & 0x0c) >> 2;
3258 	pipe_id = (entry->ring_id & 0x03) >> 0;
3259 	queue_id = (entry->ring_id & 0x70) >> 4;
3260 
3261 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3262 
3263 	if (xcc_id == -EINVAL)
3264 		return -EINVAL;
3265 
3266 	switch (me_id) {
3267 	case 0:
3268 	case 1:
3269 	case 2:
3270 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3271 			ring = &adev->gfx.compute_ring
3272 					[i +
3273 					 xcc_id * adev->gfx.num_compute_rings];
3274 			/* Per-queue interrupt is supported for MEC starting from VI.
3275 			  * The interrupt can only be enabled/disabled per pipe instead of per queue.
3276 			  */
3277 
3278 			if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id))
3279 				amdgpu_fence_process(ring);
3280 		}
3281 		break;
3282 	}
3283 	return 0;
3284 }
3285 
gfx_v9_4_3_fault(struct amdgpu_device * adev,struct amdgpu_iv_entry * entry)3286 static void gfx_v9_4_3_fault(struct amdgpu_device *adev,
3287 			   struct amdgpu_iv_entry *entry)
3288 {
3289 	u8 me_id, pipe_id, queue_id;
3290 	struct amdgpu_ring *ring;
3291 	int i, xcc_id;
3292 
3293 	me_id = (entry->ring_id & 0x0c) >> 2;
3294 	pipe_id = (entry->ring_id & 0x03) >> 0;
3295 	queue_id = (entry->ring_id & 0x70) >> 4;
3296 
3297 	xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, entry->node_id);
3298 
3299 	if (xcc_id == -EINVAL)
3300 		return;
3301 
3302 	switch (me_id) {
3303 	case 0:
3304 	case 1:
3305 	case 2:
3306 		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
3307 			ring = &adev->gfx.compute_ring
3308 					[i +
3309 					 xcc_id * adev->gfx.num_compute_rings];
3310 			if (ring->me == me_id && ring->pipe == pipe_id &&
3311 			    ring->queue == queue_id)
3312 				drm_sched_fault(&ring->sched);
3313 		}
3314 		break;
3315 	}
3316 }
3317 
gfx_v9_4_3_priv_reg_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3318 static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev,
3319 				 struct amdgpu_irq_src *source,
3320 				 struct amdgpu_iv_entry *entry)
3321 {
3322 	DRM_ERROR("Illegal register access in command stream\n");
3323 	gfx_v9_4_3_fault(adev, entry);
3324 	return 0;
3325 }
3326 
gfx_v9_4_3_bad_op_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3327 static int gfx_v9_4_3_bad_op_irq(struct amdgpu_device *adev,
3328 				 struct amdgpu_irq_src *source,
3329 				 struct amdgpu_iv_entry *entry)
3330 {
3331 	DRM_ERROR("Illegal opcode in command stream\n");
3332 	gfx_v9_4_3_fault(adev, entry);
3333 	return 0;
3334 }
3335 
gfx_v9_4_3_priv_inst_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)3336 static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev,
3337 				  struct amdgpu_irq_src *source,
3338 				  struct amdgpu_iv_entry *entry)
3339 {
3340 	DRM_ERROR("Illegal instruction in command stream\n");
3341 	gfx_v9_4_3_fault(adev, entry);
3342 	return 0;
3343 }
3344 
gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring * ring)3345 static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring)
3346 {
3347 	const unsigned int cp_coher_cntl =
3348 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
3349 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
3350 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
3351 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
3352 			PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
3353 
3354 	/* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
3355 	amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
3356 	amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
3357 	amdgpu_ring_write(ring, 0xffffffff);  /* CP_COHER_SIZE */
3358 	amdgpu_ring_write(ring, 0xffffff);  /* CP_COHER_SIZE_HI */
3359 	amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
3360 	amdgpu_ring_write(ring, 0);  /* CP_COHER_BASE_HI */
3361 	amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
3362 }
3363 
gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring * ring,uint32_t pipe,bool enable)3364 static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring,
3365 					uint32_t pipe, bool enable)
3366 {
3367 	struct amdgpu_device *adev = ring->adev;
3368 	uint32_t val;
3369 	uint32_t wcl_cs_reg;
3370 
3371 	/* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */
3372 	val = enable ? 0x1 : 0x7f;
3373 
3374 	switch (pipe) {
3375 	case 0:
3376 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0);
3377 		break;
3378 	case 1:
3379 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1);
3380 		break;
3381 	case 2:
3382 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2);
3383 		break;
3384 	case 3:
3385 		wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3);
3386 		break;
3387 	default:
3388 		DRM_DEBUG("invalid pipe %d\n", pipe);
3389 		return;
3390 	}
3391 
3392 	amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val);
3393 
3394 }
gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring * ring,bool enable)3395 static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
3396 {
3397 	struct amdgpu_device *adev = ring->adev;
3398 	uint32_t val;
3399 	int i;
3400 
3401 	/* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit
3402 	 * number of gfx waves. Setting 5 bit will make sure gfx only gets
3403 	 * around 25% of gpu resources.
3404 	 */
3405 	val = enable ? 0x1f : 0x07ffffff;
3406 	amdgpu_ring_emit_wreg(ring,
3407 			      SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX),
3408 			      val);
3409 
3410 	/* Restrict waves for normal/low priority compute queues as well
3411 	 * to get best QoS for high priority compute jobs.
3412 	 *
3413 	 * amdgpu controls only 1st ME(0-3 CS pipes).
3414 	 */
3415 	for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
3416 		if (i != ring->pipe)
3417 			gfx_v9_4_3_emit_wave_limit_cs(ring, i, enable);
3418 
3419 	}
3420 }
3421 
gfx_v9_4_3_unmap_done(struct amdgpu_device * adev,uint32_t me,uint32_t pipe,uint32_t queue,uint32_t xcc_id)3422 static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
3423 				uint32_t pipe, uint32_t queue,
3424 				uint32_t xcc_id)
3425 {
3426 	int i, r;
3427 	/* make sure dequeue is complete*/
3428 	gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
3429 	mutex_lock(&adev->srbm_mutex);
3430 	soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
3431 	for (i = 0; i < adev->usec_timeout; i++) {
3432 		if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
3433 			break;
3434 		udelay(1);
3435 	}
3436 	if (i >= adev->usec_timeout)
3437 		r = -ETIMEDOUT;
3438 	else
3439 		r = 0;
3440 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
3441 	mutex_unlock(&adev->srbm_mutex);
3442 	gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
3443 
3444 	return r;
3445 
3446 }
3447 
gfx_v9_4_3_pipe_reset_support(struct amdgpu_device * adev)3448 static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
3449 {
3450 	/*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
3451 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
3452 			adev->gfx.mec_fw_version >= 0x0000009b)
3453 		return true;
3454 	else
3455 		dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
3456 
3457 	return false;
3458 }
3459 
gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring * ring)3460 static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
3461 {
3462 	struct amdgpu_device *adev = ring->adev;
3463 	uint32_t reset_pipe, clean_pipe;
3464 	int r;
3465 
3466 	if (!gfx_v9_4_3_pipe_reset_support(adev))
3467 		return -EINVAL;
3468 
3469 	gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
3470 	mutex_lock(&adev->srbm_mutex);
3471 
3472 	reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
3473 	clean_pipe = reset_pipe;
3474 
3475 	if (ring->me == 1) {
3476 		switch (ring->pipe) {
3477 		case 0:
3478 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3479 						   MEC_ME1_PIPE0_RESET, 1);
3480 			break;
3481 		case 1:
3482 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3483 						   MEC_ME1_PIPE1_RESET, 1);
3484 			break;
3485 		case 2:
3486 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3487 						   MEC_ME1_PIPE2_RESET, 1);
3488 			break;
3489 		case 3:
3490 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3491 						   MEC_ME1_PIPE3_RESET, 1);
3492 			break;
3493 		default:
3494 			break;
3495 		}
3496 	} else {
3497 		if (ring->pipe)
3498 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3499 						   MEC_ME2_PIPE1_RESET, 1);
3500 		else
3501 			reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
3502 						   MEC_ME2_PIPE0_RESET, 1);
3503 	}
3504 
3505 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
3506 	WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
3507 	mutex_unlock(&adev->srbm_mutex);
3508 	gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
3509 
3510 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3511 	return r;
3512 }
3513 
gfx_v9_4_3_reset_kcq(struct amdgpu_ring * ring,unsigned int vmid)3514 static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
3515 				unsigned int vmid)
3516 {
3517 	struct amdgpu_device *adev = ring->adev;
3518 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
3519 	struct amdgpu_ring *kiq_ring = &kiq->ring;
3520 	unsigned long flags;
3521 	int r;
3522 
3523 	if (amdgpu_sriov_vf(adev))
3524 		return -EINVAL;
3525 
3526 	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
3527 		return -EINVAL;
3528 
3529 	spin_lock_irqsave(&kiq->ring_lock, flags);
3530 
3531 	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
3532 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3533 		return -ENOMEM;
3534 	}
3535 
3536 	kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
3537 				   0, 0);
3538 	amdgpu_ring_commit(kiq_ring);
3539 
3540 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3541 
3542 	r = amdgpu_ring_test_ring(kiq_ring);
3543 	if (r) {
3544 		dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
3545 				ring->name);
3546 		goto pipe_reset;
3547 	}
3548 
3549 	r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
3550 	if (r)
3551 		dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
3552 
3553 pipe_reset:
3554 	if(r) {
3555 		r = gfx_v9_4_3_reset_hw_pipe(ring);
3556 		dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
3557 				r ? "failed" : "successfully");
3558 		if (r)
3559 			return r;
3560 	}
3561 
3562 	r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
3563 	if (r) {
3564 		dev_err(adev->dev, "fail to init kcq\n");
3565 		return r;
3566 	}
3567 	spin_lock_irqsave(&kiq->ring_lock, flags);
3568 	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
3569 	if (r) {
3570 		spin_unlock_irqrestore(&kiq->ring_lock, flags);
3571 		return -ENOMEM;
3572 	}
3573 	kiq->pmf->kiq_map_queues(kiq_ring, ring);
3574 	amdgpu_ring_commit(kiq_ring);
3575 	spin_unlock_irqrestore(&kiq->ring_lock, flags);
3576 
3577 	r = amdgpu_ring_test_ring(kiq_ring);
3578 	if (r) {
3579 		dev_err(adev->dev, "fail to remap queue\n");
3580 		return r;
3581 	}
3582 	return amdgpu_ring_test_ring(ring);
3583 }
3584 
3585 enum amdgpu_gfx_cp_ras_mem_id {
3586 	AMDGPU_GFX_CP_MEM1 = 1,
3587 	AMDGPU_GFX_CP_MEM2,
3588 	AMDGPU_GFX_CP_MEM3,
3589 	AMDGPU_GFX_CP_MEM4,
3590 	AMDGPU_GFX_CP_MEM5,
3591 };
3592 
3593 enum amdgpu_gfx_gcea_ras_mem_id {
3594 	AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4,
3595 	AMDGPU_GFX_GCEA_IORD_CMDMEM,
3596 	AMDGPU_GFX_GCEA_GMIWR_CMDMEM,
3597 	AMDGPU_GFX_GCEA_GMIRD_CMDMEM,
3598 	AMDGPU_GFX_GCEA_DRAMWR_CMDMEM,
3599 	AMDGPU_GFX_GCEA_DRAMRD_CMDMEM,
3600 	AMDGPU_GFX_GCEA_MAM_DMEM0,
3601 	AMDGPU_GFX_GCEA_MAM_DMEM1,
3602 	AMDGPU_GFX_GCEA_MAM_DMEM2,
3603 	AMDGPU_GFX_GCEA_MAM_DMEM3,
3604 	AMDGPU_GFX_GCEA_MAM_AMEM0,
3605 	AMDGPU_GFX_GCEA_MAM_AMEM1,
3606 	AMDGPU_GFX_GCEA_MAM_AMEM2,
3607 	AMDGPU_GFX_GCEA_MAM_AMEM3,
3608 	AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER,
3609 	AMDGPU_GFX_GCEA_WRET_TAGMEM,
3610 	AMDGPU_GFX_GCEA_RRET_TAGMEM,
3611 	AMDGPU_GFX_GCEA_IOWR_DATAMEM,
3612 	AMDGPU_GFX_GCEA_GMIWR_DATAMEM,
3613 	AMDGPU_GFX_GCEA_DRAM_DATAMEM,
3614 };
3615 
3616 enum amdgpu_gfx_gc_cane_ras_mem_id {
3617 	AMDGPU_GFX_GC_CANE_MEM0 = 0,
3618 };
3619 
3620 enum amdgpu_gfx_gcutcl2_ras_mem_id {
3621 	AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160,
3622 };
3623 
3624 enum amdgpu_gfx_gds_ras_mem_id {
3625 	AMDGPU_GFX_GDS_MEM0 = 0,
3626 };
3627 
3628 enum amdgpu_gfx_lds_ras_mem_id {
3629 	AMDGPU_GFX_LDS_BANK0 = 0,
3630 	AMDGPU_GFX_LDS_BANK1,
3631 	AMDGPU_GFX_LDS_BANK2,
3632 	AMDGPU_GFX_LDS_BANK3,
3633 	AMDGPU_GFX_LDS_BANK4,
3634 	AMDGPU_GFX_LDS_BANK5,
3635 	AMDGPU_GFX_LDS_BANK6,
3636 	AMDGPU_GFX_LDS_BANK7,
3637 	AMDGPU_GFX_LDS_BANK8,
3638 	AMDGPU_GFX_LDS_BANK9,
3639 	AMDGPU_GFX_LDS_BANK10,
3640 	AMDGPU_GFX_LDS_BANK11,
3641 	AMDGPU_GFX_LDS_BANK12,
3642 	AMDGPU_GFX_LDS_BANK13,
3643 	AMDGPU_GFX_LDS_BANK14,
3644 	AMDGPU_GFX_LDS_BANK15,
3645 	AMDGPU_GFX_LDS_BANK16,
3646 	AMDGPU_GFX_LDS_BANK17,
3647 	AMDGPU_GFX_LDS_BANK18,
3648 	AMDGPU_GFX_LDS_BANK19,
3649 	AMDGPU_GFX_LDS_BANK20,
3650 	AMDGPU_GFX_LDS_BANK21,
3651 	AMDGPU_GFX_LDS_BANK22,
3652 	AMDGPU_GFX_LDS_BANK23,
3653 	AMDGPU_GFX_LDS_BANK24,
3654 	AMDGPU_GFX_LDS_BANK25,
3655 	AMDGPU_GFX_LDS_BANK26,
3656 	AMDGPU_GFX_LDS_BANK27,
3657 	AMDGPU_GFX_LDS_BANK28,
3658 	AMDGPU_GFX_LDS_BANK29,
3659 	AMDGPU_GFX_LDS_BANK30,
3660 	AMDGPU_GFX_LDS_BANK31,
3661 	AMDGPU_GFX_LDS_SP_BUFFER_A,
3662 	AMDGPU_GFX_LDS_SP_BUFFER_B,
3663 };
3664 
3665 enum amdgpu_gfx_rlc_ras_mem_id {
3666 	AMDGPU_GFX_RLC_GPMF32 = 1,
3667 	AMDGPU_GFX_RLC_RLCVF32,
3668 	AMDGPU_GFX_RLC_SCRATCH,
3669 	AMDGPU_GFX_RLC_SRM_ARAM,
3670 	AMDGPU_GFX_RLC_SRM_DRAM,
3671 	AMDGPU_GFX_RLC_TCTAG,
3672 	AMDGPU_GFX_RLC_SPM_SE,
3673 	AMDGPU_GFX_RLC_SPM_GRBMT,
3674 };
3675 
3676 enum amdgpu_gfx_sp_ras_mem_id {
3677 	AMDGPU_GFX_SP_SIMDID0 = 0,
3678 };
3679 
3680 enum amdgpu_gfx_spi_ras_mem_id {
3681 	AMDGPU_GFX_SPI_MEM0 = 0,
3682 	AMDGPU_GFX_SPI_MEM1,
3683 	AMDGPU_GFX_SPI_MEM2,
3684 	AMDGPU_GFX_SPI_MEM3,
3685 };
3686 
3687 enum amdgpu_gfx_sqc_ras_mem_id {
3688 	AMDGPU_GFX_SQC_INST_CACHE_A = 100,
3689 	AMDGPU_GFX_SQC_INST_CACHE_B = 101,
3690 	AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102,
3691 	AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103,
3692 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104,
3693 	AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105,
3694 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106,
3695 	AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107,
3696 	AMDGPU_GFX_SQC_DATA_CACHE_A = 200,
3697 	AMDGPU_GFX_SQC_DATA_CACHE_B = 201,
3698 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202,
3699 	AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203,
3700 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204,
3701 	AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205,
3702 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206,
3703 	AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207,
3704 	AMDGPU_GFX_SQC_DIRTY_BIT_A = 208,
3705 	AMDGPU_GFX_SQC_DIRTY_BIT_B = 209,
3706 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210,
3707 	AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211,
3708 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212,
3709 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213,
3710 	AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108,
3711 };
3712 
3713 enum amdgpu_gfx_sq_ras_mem_id {
3714 	AMDGPU_GFX_SQ_SGPR_MEM0 = 0,
3715 	AMDGPU_GFX_SQ_SGPR_MEM1,
3716 	AMDGPU_GFX_SQ_SGPR_MEM2,
3717 	AMDGPU_GFX_SQ_SGPR_MEM3,
3718 };
3719 
3720 enum amdgpu_gfx_ta_ras_mem_id {
3721 	AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1,
3722 	AMDGPU_GFX_TA_FS_AFIFO_RAM_HI,
3723 	AMDGPU_GFX_TA_FS_CFIFO_RAM,
3724 	AMDGPU_GFX_TA_FSX_LFIFO,
3725 	AMDGPU_GFX_TA_FS_DFIFO_RAM,
3726 };
3727 
3728 enum amdgpu_gfx_tcc_ras_mem_id {
3729 	AMDGPU_GFX_TCC_MEM1 = 1,
3730 };
3731 
3732 enum amdgpu_gfx_tca_ras_mem_id {
3733 	AMDGPU_GFX_TCA_MEM1 = 1,
3734 };
3735 
3736 enum amdgpu_gfx_tci_ras_mem_id {
3737 	AMDGPU_GFX_TCIW_MEM = 1,
3738 };
3739 
3740 enum amdgpu_gfx_tcp_ras_mem_id {
3741 	AMDGPU_GFX_TCP_LFIFO0 = 1,
3742 	AMDGPU_GFX_TCP_SET0BANK0_RAM,
3743 	AMDGPU_GFX_TCP_SET0BANK1_RAM,
3744 	AMDGPU_GFX_TCP_SET0BANK2_RAM,
3745 	AMDGPU_GFX_TCP_SET0BANK3_RAM,
3746 	AMDGPU_GFX_TCP_SET1BANK0_RAM,
3747 	AMDGPU_GFX_TCP_SET1BANK1_RAM,
3748 	AMDGPU_GFX_TCP_SET1BANK2_RAM,
3749 	AMDGPU_GFX_TCP_SET1BANK3_RAM,
3750 	AMDGPU_GFX_TCP_SET2BANK0_RAM,
3751 	AMDGPU_GFX_TCP_SET2BANK1_RAM,
3752 	AMDGPU_GFX_TCP_SET2BANK2_RAM,
3753 	AMDGPU_GFX_TCP_SET2BANK3_RAM,
3754 	AMDGPU_GFX_TCP_SET3BANK0_RAM,
3755 	AMDGPU_GFX_TCP_SET3BANK1_RAM,
3756 	AMDGPU_GFX_TCP_SET3BANK2_RAM,
3757 	AMDGPU_GFX_TCP_SET3BANK3_RAM,
3758 	AMDGPU_GFX_TCP_VM_FIFO,
3759 	AMDGPU_GFX_TCP_DB_TAGRAM0,
3760 	AMDGPU_GFX_TCP_DB_TAGRAM1,
3761 	AMDGPU_GFX_TCP_DB_TAGRAM2,
3762 	AMDGPU_GFX_TCP_DB_TAGRAM3,
3763 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0,
3764 	AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1,
3765 	AMDGPU_GFX_TCP_CMD_FIFO,
3766 };
3767 
3768 enum amdgpu_gfx_td_ras_mem_id {
3769 	AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1,
3770 	AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM,
3771 	AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM,
3772 };
3773 
3774 enum amdgpu_gfx_tcx_ras_mem_id {
3775 	AMDGPU_GFX_TCX_FIFOD0 = 0,
3776 	AMDGPU_GFX_TCX_FIFOD1,
3777 	AMDGPU_GFX_TCX_FIFOD2,
3778 	AMDGPU_GFX_TCX_FIFOD3,
3779 	AMDGPU_GFX_TCX_FIFOD4,
3780 	AMDGPU_GFX_TCX_FIFOD5,
3781 	AMDGPU_GFX_TCX_FIFOD6,
3782 	AMDGPU_GFX_TCX_FIFOD7,
3783 	AMDGPU_GFX_TCX_FIFOB0,
3784 	AMDGPU_GFX_TCX_FIFOB1,
3785 	AMDGPU_GFX_TCX_FIFOB2,
3786 	AMDGPU_GFX_TCX_FIFOB3,
3787 	AMDGPU_GFX_TCX_FIFOB4,
3788 	AMDGPU_GFX_TCX_FIFOB5,
3789 	AMDGPU_GFX_TCX_FIFOB6,
3790 	AMDGPU_GFX_TCX_FIFOB7,
3791 	AMDGPU_GFX_TCX_FIFOA0,
3792 	AMDGPU_GFX_TCX_FIFOA1,
3793 	AMDGPU_GFX_TCX_FIFOA2,
3794 	AMDGPU_GFX_TCX_FIFOA3,
3795 	AMDGPU_GFX_TCX_FIFOA4,
3796 	AMDGPU_GFX_TCX_FIFOA5,
3797 	AMDGPU_GFX_TCX_FIFOA6,
3798 	AMDGPU_GFX_TCX_FIFOA7,
3799 	AMDGPU_GFX_TCX_CFIFO0,
3800 	AMDGPU_GFX_TCX_CFIFO1,
3801 	AMDGPU_GFX_TCX_CFIFO2,
3802 	AMDGPU_GFX_TCX_CFIFO3,
3803 	AMDGPU_GFX_TCX_CFIFO4,
3804 	AMDGPU_GFX_TCX_CFIFO5,
3805 	AMDGPU_GFX_TCX_CFIFO6,
3806 	AMDGPU_GFX_TCX_CFIFO7,
3807 	AMDGPU_GFX_TCX_FIFO_ACKB0,
3808 	AMDGPU_GFX_TCX_FIFO_ACKB1,
3809 	AMDGPU_GFX_TCX_FIFO_ACKB2,
3810 	AMDGPU_GFX_TCX_FIFO_ACKB3,
3811 	AMDGPU_GFX_TCX_FIFO_ACKB4,
3812 	AMDGPU_GFX_TCX_FIFO_ACKB5,
3813 	AMDGPU_GFX_TCX_FIFO_ACKB6,
3814 	AMDGPU_GFX_TCX_FIFO_ACKB7,
3815 	AMDGPU_GFX_TCX_FIFO_ACKD0,
3816 	AMDGPU_GFX_TCX_FIFO_ACKD1,
3817 	AMDGPU_GFX_TCX_FIFO_ACKD2,
3818 	AMDGPU_GFX_TCX_FIFO_ACKD3,
3819 	AMDGPU_GFX_TCX_FIFO_ACKD4,
3820 	AMDGPU_GFX_TCX_FIFO_ACKD5,
3821 	AMDGPU_GFX_TCX_FIFO_ACKD6,
3822 	AMDGPU_GFX_TCX_FIFO_ACKD7,
3823 	AMDGPU_GFX_TCX_DST_FIFOA0,
3824 	AMDGPU_GFX_TCX_DST_FIFOA1,
3825 	AMDGPU_GFX_TCX_DST_FIFOA2,
3826 	AMDGPU_GFX_TCX_DST_FIFOA3,
3827 	AMDGPU_GFX_TCX_DST_FIFOA4,
3828 	AMDGPU_GFX_TCX_DST_FIFOA5,
3829 	AMDGPU_GFX_TCX_DST_FIFOA6,
3830 	AMDGPU_GFX_TCX_DST_FIFOA7,
3831 	AMDGPU_GFX_TCX_DST_FIFOB0,
3832 	AMDGPU_GFX_TCX_DST_FIFOB1,
3833 	AMDGPU_GFX_TCX_DST_FIFOB2,
3834 	AMDGPU_GFX_TCX_DST_FIFOB3,
3835 	AMDGPU_GFX_TCX_DST_FIFOB4,
3836 	AMDGPU_GFX_TCX_DST_FIFOB5,
3837 	AMDGPU_GFX_TCX_DST_FIFOB6,
3838 	AMDGPU_GFX_TCX_DST_FIFOB7,
3839 	AMDGPU_GFX_TCX_DST_FIFOD0,
3840 	AMDGPU_GFX_TCX_DST_FIFOD1,
3841 	AMDGPU_GFX_TCX_DST_FIFOD2,
3842 	AMDGPU_GFX_TCX_DST_FIFOD3,
3843 	AMDGPU_GFX_TCX_DST_FIFOD4,
3844 	AMDGPU_GFX_TCX_DST_FIFOD5,
3845 	AMDGPU_GFX_TCX_DST_FIFOD6,
3846 	AMDGPU_GFX_TCX_DST_FIFOD7,
3847 	AMDGPU_GFX_TCX_DST_FIFO_ACKB0,
3848 	AMDGPU_GFX_TCX_DST_FIFO_ACKB1,
3849 	AMDGPU_GFX_TCX_DST_FIFO_ACKB2,
3850 	AMDGPU_GFX_TCX_DST_FIFO_ACKB3,
3851 	AMDGPU_GFX_TCX_DST_FIFO_ACKB4,
3852 	AMDGPU_GFX_TCX_DST_FIFO_ACKB5,
3853 	AMDGPU_GFX_TCX_DST_FIFO_ACKB6,
3854 	AMDGPU_GFX_TCX_DST_FIFO_ACKB7,
3855 	AMDGPU_GFX_TCX_DST_FIFO_ACKD0,
3856 	AMDGPU_GFX_TCX_DST_FIFO_ACKD1,
3857 	AMDGPU_GFX_TCX_DST_FIFO_ACKD2,
3858 	AMDGPU_GFX_TCX_DST_FIFO_ACKD3,
3859 	AMDGPU_GFX_TCX_DST_FIFO_ACKD4,
3860 	AMDGPU_GFX_TCX_DST_FIFO_ACKD5,
3861 	AMDGPU_GFX_TCX_DST_FIFO_ACKD6,
3862 	AMDGPU_GFX_TCX_DST_FIFO_ACKD7,
3863 };
3864 
3865 enum amdgpu_gfx_atc_l2_ras_mem_id {
3866 	AMDGPU_GFX_ATC_L2_MEM0 = 0,
3867 };
3868 
3869 enum amdgpu_gfx_utcl2_ras_mem_id {
3870 	AMDGPU_GFX_UTCL2_MEM0 = 0,
3871 };
3872 
3873 enum amdgpu_gfx_vml2_ras_mem_id {
3874 	AMDGPU_GFX_VML2_MEM0 = 0,
3875 };
3876 
3877 enum amdgpu_gfx_vml2_walker_ras_mem_id {
3878 	AMDGPU_GFX_VML2_WALKER_MEM0 = 0,
3879 };
3880 
3881 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = {
3882 	{AMDGPU_GFX_CP_MEM1, "CP_MEM1"},
3883 	{AMDGPU_GFX_CP_MEM2, "CP_MEM2"},
3884 	{AMDGPU_GFX_CP_MEM3, "CP_MEM3"},
3885 	{AMDGPU_GFX_CP_MEM4, "CP_MEM4"},
3886 	{AMDGPU_GFX_CP_MEM5, "CP_MEM5"},
3887 };
3888 
3889 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = {
3890 	{AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM"},
3891 	{AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM"},
3892 	{AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM"},
3893 	{AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM"},
3894 	{AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM"},
3895 	{AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM"},
3896 	{AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0"},
3897 	{AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1"},
3898 	{AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2"},
3899 	{AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3"},
3900 	{AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0"},
3901 	{AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1"},
3902 	{AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2"},
3903 	{AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3"},
3904 	{AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER"},
3905 	{AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM"},
3906 	{AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM"},
3907 	{AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM"},
3908 	{AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM"},
3909 	{AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM"},
3910 };
3911 
3912 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = {
3913 	{AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0"},
3914 };
3915 
3916 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = {
3917 	{AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95"},
3918 };
3919 
3920 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = {
3921 	{AMDGPU_GFX_GDS_MEM0, "GDS_MEM"},
3922 };
3923 
3924 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = {
3925 	{AMDGPU_GFX_LDS_BANK0, "LDS_BANK0"},
3926 	{AMDGPU_GFX_LDS_BANK1, "LDS_BANK1"},
3927 	{AMDGPU_GFX_LDS_BANK2, "LDS_BANK2"},
3928 	{AMDGPU_GFX_LDS_BANK3, "LDS_BANK3"},
3929 	{AMDGPU_GFX_LDS_BANK4, "LDS_BANK4"},
3930 	{AMDGPU_GFX_LDS_BANK5, "LDS_BANK5"},
3931 	{AMDGPU_GFX_LDS_BANK6, "LDS_BANK6"},
3932 	{AMDGPU_GFX_LDS_BANK7, "LDS_BANK7"},
3933 	{AMDGPU_GFX_LDS_BANK8, "LDS_BANK8"},
3934 	{AMDGPU_GFX_LDS_BANK9, "LDS_BANK9"},
3935 	{AMDGPU_GFX_LDS_BANK10, "LDS_BANK10"},
3936 	{AMDGPU_GFX_LDS_BANK11, "LDS_BANK11"},
3937 	{AMDGPU_GFX_LDS_BANK12, "LDS_BANK12"},
3938 	{AMDGPU_GFX_LDS_BANK13, "LDS_BANK13"},
3939 	{AMDGPU_GFX_LDS_BANK14, "LDS_BANK14"},
3940 	{AMDGPU_GFX_LDS_BANK15, "LDS_BANK15"},
3941 	{AMDGPU_GFX_LDS_BANK16, "LDS_BANK16"},
3942 	{AMDGPU_GFX_LDS_BANK17, "LDS_BANK17"},
3943 	{AMDGPU_GFX_LDS_BANK18, "LDS_BANK18"},
3944 	{AMDGPU_GFX_LDS_BANK19, "LDS_BANK19"},
3945 	{AMDGPU_GFX_LDS_BANK20, "LDS_BANK20"},
3946 	{AMDGPU_GFX_LDS_BANK21, "LDS_BANK21"},
3947 	{AMDGPU_GFX_LDS_BANK22, "LDS_BANK22"},
3948 	{AMDGPU_GFX_LDS_BANK23, "LDS_BANK23"},
3949 	{AMDGPU_GFX_LDS_BANK24, "LDS_BANK24"},
3950 	{AMDGPU_GFX_LDS_BANK25, "LDS_BANK25"},
3951 	{AMDGPU_GFX_LDS_BANK26, "LDS_BANK26"},
3952 	{AMDGPU_GFX_LDS_BANK27, "LDS_BANK27"},
3953 	{AMDGPU_GFX_LDS_BANK28, "LDS_BANK28"},
3954 	{AMDGPU_GFX_LDS_BANK29, "LDS_BANK29"},
3955 	{AMDGPU_GFX_LDS_BANK30, "LDS_BANK30"},
3956 	{AMDGPU_GFX_LDS_BANK31, "LDS_BANK31"},
3957 	{AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A"},
3958 	{AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B"},
3959 };
3960 
3961 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = {
3962 	{AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32"},
3963 	{AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32"},
3964 	{AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH"},
3965 	{AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM"},
3966 	{AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM"},
3967 	{AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG"},
3968 	{AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE"},
3969 	{AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT"},
3970 };
3971 
3972 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = {
3973 	{AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0"},
3974 };
3975 
3976 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = {
3977 	{AMDGPU_GFX_SPI_MEM0, "SPI_MEM0"},
3978 	{AMDGPU_GFX_SPI_MEM1, "SPI_MEM1"},
3979 	{AMDGPU_GFX_SPI_MEM2, "SPI_MEM2"},
3980 	{AMDGPU_GFX_SPI_MEM3, "SPI_MEM3"},
3981 };
3982 
3983 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = {
3984 	{AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A"},
3985 	{AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B"},
3986 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A"},
3987 	{AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B"},
3988 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A"},
3989 	{AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B"},
3990 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A"},
3991 	{AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B"},
3992 	{AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A"},
3993 	{AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B"},
3994 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A"},
3995 	{AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B"},
3996 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A"},
3997 	{AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B"},
3998 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A"},
3999 	{AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B"},
4000 	{AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A"},
4001 	{AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B"},
4002 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0"},
4003 	{AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1"},
4004 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A"},
4005 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B"},
4006 	{AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE"},
4007 };
4008 
4009 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = {
4010 	{AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0"},
4011 	{AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1"},
4012 	{AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2"},
4013 	{AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3"},
4014 };
4015 
4016 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = {
4017 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO"},
4018 	{AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI"},
4019 	{AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM"},
4020 	{AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO"},
4021 	{AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM"},
4022 };
4023 
4024 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = {
4025 	{AMDGPU_GFX_TCC_MEM1, "TCC_MEM1"},
4026 };
4027 
4028 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = {
4029 	{AMDGPU_GFX_TCA_MEM1, "TCA_MEM1"},
4030 };
4031 
4032 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = {
4033 	{AMDGPU_GFX_TCIW_MEM, "TCIW_MEM"},
4034 };
4035 
4036 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = {
4037 	{AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0"},
4038 	{AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM"},
4039 	{AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM"},
4040 	{AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM"},
4041 	{AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM"},
4042 	{AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM"},
4043 	{AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM"},
4044 	{AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM"},
4045 	{AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM"},
4046 	{AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM"},
4047 	{AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM"},
4048 	{AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM"},
4049 	{AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM"},
4050 	{AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM"},
4051 	{AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM"},
4052 	{AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM"},
4053 	{AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM"},
4054 	{AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO"},
4055 	{AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0"},
4056 	{AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1"},
4057 	{AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2"},
4058 	{AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3"},
4059 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0"},
4060 	{AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1"},
4061 	{AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO"},
4062 };
4063 
4064 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = {
4065 	{AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM"},
4066 	{AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM"},
4067 	{AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM"},
4068 };
4069 
4070 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = {
4071 	{AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0"},
4072 	{AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1"},
4073 	{AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2"},
4074 	{AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3"},
4075 	{AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4"},
4076 	{AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5"},
4077 	{AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6"},
4078 	{AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7"},
4079 	{AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0"},
4080 	{AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1"},
4081 	{AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2"},
4082 	{AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3"},
4083 	{AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4"},
4084 	{AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5"},
4085 	{AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6"},
4086 	{AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7"},
4087 	{AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0"},
4088 	{AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1"},
4089 	{AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2"},
4090 	{AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3"},
4091 	{AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4"},
4092 	{AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5"},
4093 	{AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6"},
4094 	{AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7"},
4095 	{AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0"},
4096 	{AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1"},
4097 	{AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2"},
4098 	{AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3"},
4099 	{AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4"},
4100 	{AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5"},
4101 	{AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6"},
4102 	{AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7"},
4103 	{AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0"},
4104 	{AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1"},
4105 	{AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2"},
4106 	{AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3"},
4107 	{AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4"},
4108 	{AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5"},
4109 	{AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6"},
4110 	{AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7"},
4111 	{AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0"},
4112 	{AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1"},
4113 	{AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2"},
4114 	{AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3"},
4115 	{AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4"},
4116 	{AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5"},
4117 	{AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6"},
4118 	{AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7"},
4119 	{AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0"},
4120 	{AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1"},
4121 	{AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2"},
4122 	{AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3"},
4123 	{AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4"},
4124 	{AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5"},
4125 	{AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6"},
4126 	{AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7"},
4127 	{AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0"},
4128 	{AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1"},
4129 	{AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2"},
4130 	{AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3"},
4131 	{AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4"},
4132 	{AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5"},
4133 	{AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6"},
4134 	{AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7"},
4135 	{AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0"},
4136 	{AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1"},
4137 	{AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2"},
4138 	{AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3"},
4139 	{AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4"},
4140 	{AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5"},
4141 	{AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6"},
4142 	{AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7"},
4143 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0"},
4144 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1"},
4145 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2"},
4146 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3"},
4147 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4"},
4148 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5"},
4149 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6"},
4150 	{AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7"},
4151 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0"},
4152 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1"},
4153 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2"},
4154 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3"},
4155 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4"},
4156 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5"},
4157 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6"},
4158 	{AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7"},
4159 };
4160 
4161 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = {
4162 	{AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM"},
4163 };
4164 
4165 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = {
4166 	{AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM"},
4167 };
4168 
4169 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = {
4170 	{AMDGPU_GFX_VML2_MEM, "VML2_MEM"},
4171 };
4172 
4173 static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = {
4174 	{AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM"},
4175 };
4176 
4177 static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = {
4178 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list)
4179 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list)
4180 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list)
4181 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list)
4182 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list)
4183 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list)
4184 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list)
4185 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list)
4186 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list)
4187 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list)
4188 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list)
4189 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list)
4190 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list)
4191 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list)
4192 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list)
4193 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list)
4194 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list)
4195 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list)
4196 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list)
4197 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list)
4198 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list)
4199 	AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list)
4200 };
4201 
4202 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = {
4203 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH),
4204 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4205 	    AMDGPU_GFX_RLC_MEM, 1},
4206 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI),
4207 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4208 	    AMDGPU_GFX_CP_MEM, 1},
4209 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI),
4210 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4211 	    AMDGPU_GFX_CP_MEM, 1},
4212 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI),
4213 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4214 	    AMDGPU_GFX_CP_MEM, 1},
4215 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI),
4216 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4217 	    AMDGPU_GFX_GDS_MEM, 1},
4218 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI),
4219 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4220 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4221 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI),
4222 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4223 	    AMDGPU_GFX_SPI_MEM, 1},
4224 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI),
4225 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4226 	    AMDGPU_GFX_SP_MEM, 4},
4227 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI),
4228 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4229 	    AMDGPU_GFX_SP_MEM, 4},
4230 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI),
4231 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4232 	    AMDGPU_GFX_SQ_MEM, 4},
4233 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI),
4234 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4235 	    AMDGPU_GFX_SQC_MEM, 4},
4236 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI),
4237 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4238 	    AMDGPU_GFX_TCX_MEM, 1},
4239 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI),
4240 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4241 	    AMDGPU_GFX_TCC_MEM, 1},
4242 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI),
4243 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4244 	    AMDGPU_GFX_TA_MEM, 4},
4245 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG),
4246 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4247 	    AMDGPU_GFX_TCI_MEM, 1},
4248 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG),
4249 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4250 	    AMDGPU_GFX_TCP_MEM, 4},
4251 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI),
4252 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4253 	    AMDGPU_GFX_TD_MEM, 4},
4254 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI),
4255 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4256 	    AMDGPU_GFX_GCEA_MEM, 1},
4257 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI),
4258 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4259 	    AMDGPU_GFX_LDS_MEM, 4},
4260 };
4261 
4262 static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = {
4263 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH),
4264 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC"},
4265 	    AMDGPU_GFX_RLC_MEM, 1},
4266 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI),
4267 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC"},
4268 	    AMDGPU_GFX_CP_MEM, 1},
4269 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI),
4270 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF"},
4271 	    AMDGPU_GFX_CP_MEM, 1},
4272 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI),
4273 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG"},
4274 	    AMDGPU_GFX_CP_MEM, 1},
4275 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI),
4276 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS"},
4277 	    AMDGPU_GFX_GDS_MEM, 1},
4278 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI),
4279 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE"},
4280 	    AMDGPU_GFX_GC_CANE_MEM, 1},
4281 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI),
4282 	    1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI"},
4283 	    AMDGPU_GFX_SPI_MEM, 1},
4284 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI),
4285 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0"},
4286 	    AMDGPU_GFX_SP_MEM, 4},
4287 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI),
4288 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1"},
4289 	    AMDGPU_GFX_SP_MEM, 4},
4290 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI),
4291 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ"},
4292 	    AMDGPU_GFX_SQ_MEM, 4},
4293 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI),
4294 	    5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC"},
4295 	    AMDGPU_GFX_SQC_MEM, 4},
4296 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI),
4297 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX"},
4298 	    AMDGPU_GFX_TCX_MEM, 1},
4299 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI),
4300 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC"},
4301 	    AMDGPU_GFX_TCC_MEM, 1},
4302 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI),
4303 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA"},
4304 	    AMDGPU_GFX_TA_MEM, 4},
4305 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG),
4306 	    27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI"},
4307 	    AMDGPU_GFX_TCI_MEM, 1},
4308 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG),
4309 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP"},
4310 	    AMDGPU_GFX_TCP_MEM, 4},
4311 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI),
4312 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD"},
4313 	    AMDGPU_GFX_TD_MEM, 4},
4314 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI),
4315 	    2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA"},
4316 	    AMDGPU_GFX_TCA_MEM, 1},
4317 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI),
4318 	    16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA"},
4319 	    AMDGPU_GFX_GCEA_MEM, 1},
4320 	{{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI),
4321 	    10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS"},
4322 	    AMDGPU_GFX_LDS_MEM, 4},
4323 };
4324 
gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4325 static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
4326 					void *ras_error_status, int xcc_id)
4327 {
4328 	struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
4329 	unsigned long ce_count = 0, ue_count = 0;
4330 	uint32_t i, j, k;
4331 
4332 	/* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */
4333 	struct amdgpu_smuio_mcm_config_info mcm_info = {
4334 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
4335 		.die_id = xcc_id & 0x01 ? 1 : 0,
4336 	};
4337 
4338 	mutex_lock(&adev->grbm_idx_mutex);
4339 
4340 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4341 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4342 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4343 				/* no need to select if instance number is 1 */
4344 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4345 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4346 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4347 
4348 				amdgpu_ras_inst_query_ras_error_count(adev,
4349 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4350 					1,
4351 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent,
4352 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size,
4353 					GET_INST(GC, xcc_id),
4354 					AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE,
4355 					&ce_count);
4356 
4357 				amdgpu_ras_inst_query_ras_error_count(adev,
4358 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4359 					1,
4360 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4361 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4362 					GET_INST(GC, xcc_id),
4363 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4364 					&ue_count);
4365 			}
4366 		}
4367 	}
4368 
4369 	/* handle extra register entries of UE */
4370 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4371 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4372 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4373 				/* no need to select if instance number is 1 */
4374 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4375 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4376 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4377 
4378 				amdgpu_ras_inst_query_ras_error_count(adev,
4379 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4380 					1,
4381 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
4382 					gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
4383 					GET_INST(GC, xcc_id),
4384 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
4385 					&ue_count);
4386 			}
4387 		}
4388 	}
4389 
4390 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4391 			xcc_id);
4392 	mutex_unlock(&adev->grbm_idx_mutex);
4393 
4394 	/* the caller should make sure initialize value of
4395 	 * err_data->ue_count and err_data->ce_count
4396 	 */
4397 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
4398 	amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, ce_count);
4399 }
4400 
gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4401 static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
4402 					void *ras_error_status, int xcc_id)
4403 {
4404 	uint32_t i, j, k;
4405 
4406 	mutex_lock(&adev->grbm_idx_mutex);
4407 
4408 	for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) {
4409 		for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) {
4410 			for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) {
4411 				/* no need to select if instance number is 1 */
4412 				if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 ||
4413 				    gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1)
4414 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4415 
4416 				amdgpu_ras_inst_reset_ras_error_count(adev,
4417 					&(gfx_v9_4_3_ce_reg_list[i].reg_entry),
4418 					1,
4419 					GET_INST(GC, xcc_id));
4420 
4421 				amdgpu_ras_inst_reset_ras_error_count(adev,
4422 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4423 					1,
4424 					GET_INST(GC, xcc_id));
4425 			}
4426 		}
4427 	}
4428 
4429 	/* handle extra register entries of UE */
4430 	for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
4431 		for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
4432 			for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
4433 				/* no need to select if instance number is 1 */
4434 				if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
4435 					gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
4436 					gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
4437 
4438 				amdgpu_ras_inst_reset_ras_error_count(adev,
4439 					&(gfx_v9_4_3_ue_reg_list[i].reg_entry),
4440 					1,
4441 					GET_INST(GC, xcc_id));
4442 			}
4443 		}
4444 	}
4445 
4446 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4447 			xcc_id);
4448 	mutex_unlock(&adev->grbm_idx_mutex);
4449 }
4450 
gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device * adev,void * ras_error_status,int xcc_id)4451 static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
4452 					void *ras_error_status, int xcc_id)
4453 {
4454 	uint32_t i;
4455 	uint32_t data;
4456 
4457 	if (amdgpu_sriov_vf(adev))
4458 		return;
4459 
4460 	data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
4461 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
4462 			     amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
4463 
4464 	if (amdgpu_watchdog_timer.timeout_fatal_disable &&
4465 	    (amdgpu_watchdog_timer.period < 1 ||
4466 	     amdgpu_watchdog_timer.period > 0x23)) {
4467 		dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
4468 		amdgpu_watchdog_timer.period = 0x23;
4469 	}
4470 	data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
4471 			     amdgpu_watchdog_timer.period);
4472 
4473 	mutex_lock(&adev->grbm_idx_mutex);
4474 	for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4475 		gfx_v9_4_3_xcc_select_se_sh(adev, i, 0xffffffff, 0xffffffff, xcc_id);
4476 		WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data);
4477 	}
4478 	gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4479 			xcc_id);
4480 	mutex_unlock(&adev->grbm_idx_mutex);
4481 }
4482 
gfx_v9_4_3_query_ras_error_count(struct amdgpu_device * adev,void * ras_error_status)4483 static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev,
4484 					void *ras_error_status)
4485 {
4486 	amdgpu_gfx_ras_error_func(adev, ras_error_status,
4487 			gfx_v9_4_3_inst_query_ras_err_count);
4488 }
4489 
gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device * adev)4490 static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev)
4491 {
4492 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_reset_ras_err_count);
4493 }
4494 
gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device * adev)4495 static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev)
4496 {
4497 	amdgpu_gfx_ras_error_func(adev, NULL, gfx_v9_4_3_inst_enable_watchdog_timer);
4498 }
4499 
gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring * ring,uint32_t num_nop)4500 static void gfx_v9_4_3_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
4501 {
4502 	/* Header itself is a NOP packet */
4503 	if (num_nop == 1) {
4504 		amdgpu_ring_write(ring, ring->funcs->nop);
4505 		return;
4506 	}
4507 
4508 	/* Max HW optimization till 0x3ffe, followed by remaining one NOP at a time*/
4509 	amdgpu_ring_write(ring, PACKET3(PACKET3_NOP, min(num_nop - 2, 0x3ffe)));
4510 
4511 	/* Header is at index 0, followed by num_nops - 1 NOP packet's */
4512 	amdgpu_ring_insert_nop(ring, num_nop - 1);
4513 }
4514 
gfx_v9_4_3_ip_print(struct amdgpu_ip_block * ip_block,struct drm_printer * p)4515 static void gfx_v9_4_3_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
4516 {
4517 	struct amdgpu_device *adev = ip_block->adev;
4518 	uint32_t i, j, k;
4519 	uint32_t xcc_id, xcc_offset, inst_offset;
4520 	uint32_t num_xcc, reg, num_inst;
4521 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4522 
4523 	if (!adev->gfx.ip_dump_core)
4524 		return;
4525 
4526 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4527 	drm_printf(p, "Number of Instances:%d\n", num_xcc);
4528 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4529 		xcc_offset = xcc_id * reg_count;
4530 		drm_printf(p, "\nInstance id:%d\n", xcc_id);
4531 		for (i = 0; i < reg_count; i++)
4532 			drm_printf(p, "%-50s \t 0x%08x\n",
4533 				   gc_reg_list_9_4_3[i].reg_name,
4534 				   adev->gfx.ip_dump_core[xcc_offset + i]);
4535 	}
4536 
4537 	/* print compute queue registers for all instances */
4538 	if (!adev->gfx.ip_dump_compute_queues)
4539 		return;
4540 
4541 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4542 		adev->gfx.mec.num_queue_per_pipe;
4543 
4544 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4545 	drm_printf(p, "\nnum_xcc: %d num_mec: %d num_pipe: %d num_queue: %d\n",
4546 		   num_xcc,
4547 		   adev->gfx.mec.num_mec,
4548 		   adev->gfx.mec.num_pipe_per_mec,
4549 		   adev->gfx.mec.num_queue_per_pipe);
4550 
4551 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4552 		xcc_offset = xcc_id * reg_count * num_inst;
4553 		inst_offset = 0;
4554 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4555 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4556 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4557 					drm_printf(p,
4558 						   "\nxcc:%d mec:%d, pipe:%d, queue:%d\n",
4559 						    xcc_id, i, j, k);
4560 					for (reg = 0; reg < reg_count; reg++) {
4561 						drm_printf(p,
4562 							   "%-50s \t 0x%08x\n",
4563 							   gc_cp_reg_list_9_4_3[reg].reg_name,
4564 							   adev->gfx.ip_dump_compute_queues
4565 								[xcc_offset + inst_offset +
4566 								reg]);
4567 					}
4568 					inst_offset += reg_count;
4569 				}
4570 			}
4571 		}
4572 	}
4573 }
4574 
gfx_v9_4_3_ip_dump(struct amdgpu_ip_block * ip_block)4575 static void gfx_v9_4_3_ip_dump(struct amdgpu_ip_block *ip_block)
4576 {
4577 	struct amdgpu_device *adev = ip_block->adev;
4578 	uint32_t i, j, k;
4579 	uint32_t num_xcc, reg, num_inst;
4580 	uint32_t xcc_id, xcc_offset, inst_offset;
4581 	uint32_t reg_count = ARRAY_SIZE(gc_reg_list_9_4_3);
4582 
4583 	if (!adev->gfx.ip_dump_core)
4584 		return;
4585 
4586 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4587 
4588 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4589 		xcc_offset = xcc_id * reg_count;
4590 		for (i = 0; i < reg_count; i++)
4591 			adev->gfx.ip_dump_core[xcc_offset + i] =
4592 				RREG32(SOC15_REG_ENTRY_OFFSET_INST(gc_reg_list_9_4_3[i],
4593 								   GET_INST(GC, xcc_id)));
4594 	}
4595 
4596 	/* dump compute queue registers for all instances */
4597 	if (!adev->gfx.ip_dump_compute_queues)
4598 		return;
4599 
4600 	num_inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
4601 		adev->gfx.mec.num_queue_per_pipe;
4602 	reg_count = ARRAY_SIZE(gc_cp_reg_list_9_4_3);
4603 	mutex_lock(&adev->srbm_mutex);
4604 	for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
4605 		xcc_offset = xcc_id * reg_count * num_inst;
4606 		inst_offset = 0;
4607 		for (i = 0; i < adev->gfx.mec.num_mec; i++) {
4608 			for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
4609 				for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
4610 					/* ME0 is for GFX so start from 1 for CP */
4611 					soc15_grbm_select(adev, 1 + i, j, k, 0,
4612 							  GET_INST(GC, xcc_id));
4613 
4614 					for (reg = 0; reg < reg_count; reg++) {
4615 						adev->gfx.ip_dump_compute_queues
4616 							[xcc_offset +
4617 							 inst_offset + reg] =
4618 							RREG32(SOC15_REG_ENTRY_OFFSET_INST(
4619 								gc_cp_reg_list_9_4_3[reg],
4620 								GET_INST(GC, xcc_id)));
4621 					}
4622 					inst_offset += reg_count;
4623 				}
4624 			}
4625 		}
4626 	}
4627 	soc15_grbm_select(adev, 0, 0, 0, 0, 0);
4628 	mutex_unlock(&adev->srbm_mutex);
4629 }
4630 
gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring * ring)4631 static void gfx_v9_4_3_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
4632 {
4633 	/* Emit the cleaner shader */
4634 	amdgpu_ring_write(ring, PACKET3(PACKET3_RUN_CLEANER_SHADER, 0));
4635 	amdgpu_ring_write(ring, 0);  /* RESERVED field, programmed to zero */
4636 }
4637 
4638 static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = {
4639 	.name = "gfx_v9_4_3",
4640 	.early_init = gfx_v9_4_3_early_init,
4641 	.late_init = gfx_v9_4_3_late_init,
4642 	.sw_init = gfx_v9_4_3_sw_init,
4643 	.sw_fini = gfx_v9_4_3_sw_fini,
4644 	.hw_init = gfx_v9_4_3_hw_init,
4645 	.hw_fini = gfx_v9_4_3_hw_fini,
4646 	.suspend = gfx_v9_4_3_suspend,
4647 	.resume = gfx_v9_4_3_resume,
4648 	.is_idle = gfx_v9_4_3_is_idle,
4649 	.wait_for_idle = gfx_v9_4_3_wait_for_idle,
4650 	.soft_reset = gfx_v9_4_3_soft_reset,
4651 	.set_clockgating_state = gfx_v9_4_3_set_clockgating_state,
4652 	.set_powergating_state = gfx_v9_4_3_set_powergating_state,
4653 	.get_clockgating_state = gfx_v9_4_3_get_clockgating_state,
4654 	.dump_ip_state = gfx_v9_4_3_ip_dump,
4655 	.print_ip_state = gfx_v9_4_3_ip_print,
4656 };
4657 
4658 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = {
4659 	.type = AMDGPU_RING_TYPE_COMPUTE,
4660 	.align_mask = 0xff,
4661 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4662 	.support_64bit_ptrs = true,
4663 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4664 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4665 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4666 	.emit_frame_size =
4667 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4668 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4669 		5 + /* hdp invalidate */
4670 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4671 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4672 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4673 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4674 		8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */
4675 		7 + /* gfx_v9_4_3_emit_mem_sync */
4676 		5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */
4677 		15 + /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */
4678 		2, /* gfx_v9_4_3_ring_emit_cleaner_shader */
4679 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4680 	.emit_ib = gfx_v9_4_3_ring_emit_ib_compute,
4681 	.emit_fence = gfx_v9_4_3_ring_emit_fence,
4682 	.emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync,
4683 	.emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush,
4684 	.emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch,
4685 	.emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
4686 	.test_ring = gfx_v9_4_3_ring_test_ring,
4687 	.test_ib = gfx_v9_4_3_ring_test_ib,
4688 	.insert_nop = gfx_v9_4_3_ring_insert_nop,
4689 	.pad_ib = amdgpu_ring_generic_pad_ib,
4690 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4691 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4692 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4693 	.soft_recovery = gfx_v9_4_3_ring_soft_recovery,
4694 	.emit_mem_sync = gfx_v9_4_3_emit_mem_sync,
4695 	.emit_wave_limit = gfx_v9_4_3_emit_wave_limit,
4696 	.reset = gfx_v9_4_3_reset_kcq,
4697 	.emit_cleaner_shader = gfx_v9_4_3_ring_emit_cleaner_shader,
4698 	.begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
4699 	.end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
4700 };
4701 
4702 static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
4703 	.type = AMDGPU_RING_TYPE_KIQ,
4704 	.align_mask = 0xff,
4705 	.nop = PACKET3(PACKET3_NOP, 0x3FFF),
4706 	.support_64bit_ptrs = true,
4707 	.get_rptr = gfx_v9_4_3_ring_get_rptr_compute,
4708 	.get_wptr = gfx_v9_4_3_ring_get_wptr_compute,
4709 	.set_wptr = gfx_v9_4_3_ring_set_wptr_compute,
4710 	.emit_frame_size =
4711 		20 + /* gfx_v9_4_3_ring_emit_gds_switch */
4712 		7 + /* gfx_v9_4_3_ring_emit_hdp_flush */
4713 		5 + /* hdp invalidate */
4714 		7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */
4715 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
4716 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
4717 		2 + /* gfx_v9_4_3_ring_emit_vm_flush */
4718 		8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */
4719 	.emit_ib_size =	7, /* gfx_v9_4_3_ring_emit_ib_compute */
4720 	.emit_fence = gfx_v9_4_3_ring_emit_fence_kiq,
4721 	.test_ring = gfx_v9_4_3_ring_test_ring,
4722 	.insert_nop = amdgpu_ring_insert_nop,
4723 	.pad_ib = amdgpu_ring_generic_pad_ib,
4724 	.emit_rreg = gfx_v9_4_3_ring_emit_rreg,
4725 	.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
4726 	.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
4727 	.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
4728 };
4729 
gfx_v9_4_3_set_ring_funcs(struct amdgpu_device * adev)4730 static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
4731 {
4732 	int i, j, num_xcc;
4733 
4734 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
4735 	for (i = 0; i < num_xcc; i++) {
4736 		adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq;
4737 
4738 		for (j = 0; j < adev->gfx.num_compute_rings; j++)
4739 			adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs
4740 					= &gfx_v9_4_3_ring_funcs_compute;
4741 	}
4742 }
4743 
4744 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = {
4745 	.set = gfx_v9_4_3_set_eop_interrupt_state,
4746 	.process = gfx_v9_4_3_eop_irq,
4747 };
4748 
4749 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = {
4750 	.set = gfx_v9_4_3_set_priv_reg_fault_state,
4751 	.process = gfx_v9_4_3_priv_reg_irq,
4752 };
4753 
4754 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_bad_op_irq_funcs = {
4755 	.set = gfx_v9_4_3_set_bad_op_fault_state,
4756 	.process = gfx_v9_4_3_bad_op_irq,
4757 };
4758 
4759 static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = {
4760 	.set = gfx_v9_4_3_set_priv_inst_fault_state,
4761 	.process = gfx_v9_4_3_priv_inst_irq,
4762 };
4763 
gfx_v9_4_3_set_irq_funcs(struct amdgpu_device * adev)4764 static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev)
4765 {
4766 	adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
4767 	adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs;
4768 
4769 	adev->gfx.priv_reg_irq.num_types = 1;
4770 	adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs;
4771 
4772 	adev->gfx.bad_op_irq.num_types = 1;
4773 	adev->gfx.bad_op_irq.funcs = &gfx_v9_4_3_bad_op_irq_funcs;
4774 
4775 	adev->gfx.priv_inst_irq.num_types = 1;
4776 	adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs;
4777 }
4778 
gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device * adev)4779 static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev)
4780 {
4781 	adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs;
4782 }
4783 
4784 
gfx_v9_4_3_set_gds_init(struct amdgpu_device * adev)4785 static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
4786 {
4787 	/* 9.4.3 variants removed all the GDS internal memory,
4788 	 * only support GWS opcode in kernel, like barrier
4789 	 * semaphore.etc */
4790 
4791 	/* init asic gds info */
4792 	adev->gds.gds_size = 0;
4793 	adev->gds.gds_compute_max_wave_id = 0;
4794 	adev->gds.gws_size = 64;
4795 	adev->gds.oa_size = 16;
4796 }
4797 
gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device * adev,u32 bitmap,int xcc_id)4798 static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
4799 						 u32 bitmap, int xcc_id)
4800 {
4801 	u32 data;
4802 
4803 	if (!bitmap)
4804 		return;
4805 
4806 	data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4807 	data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4808 
4809 	WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
4810 }
4811 
gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device * adev,int xcc_id)4812 static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
4813 {
4814 	u32 data, mask;
4815 
4816 	data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
4817 	data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
4818 
4819 	data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
4820 	data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
4821 
4822 	mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
4823 
4824 	return (~data) & mask;
4825 }
4826 
gfx_v9_4_3_get_cu_info(struct amdgpu_device * adev,struct amdgpu_cu_info * cu_info)4827 static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
4828 				 struct amdgpu_cu_info *cu_info)
4829 {
4830 	int i, j, k, prev_counter, counter, xcc_id, active_cu_number = 0;
4831 	u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0, tmp;
4832 	unsigned disable_masks[4 * 4];
4833 	bool is_symmetric_cus;
4834 
4835 	if (!adev || !cu_info)
4836 		return -EINVAL;
4837 
4838 	/*
4839 	 * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs
4840 	 */
4841 	if (adev->gfx.config.max_shader_engines *
4842 		adev->gfx.config.max_sh_per_se > 16)
4843 		return -EINVAL;
4844 
4845 	amdgpu_gfx_parse_disable_cu(disable_masks,
4846 				    adev->gfx.config.max_shader_engines,
4847 				    adev->gfx.config.max_sh_per_se);
4848 
4849 	mutex_lock(&adev->grbm_idx_mutex);
4850 	for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
4851 		is_symmetric_cus = true;
4852 		for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
4853 			for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
4854 				mask = 1;
4855 				ao_bitmap = 0;
4856 				counter = 0;
4857 				gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
4858 				gfx_v9_4_3_set_user_cu_inactive_bitmap(
4859 					adev,
4860 					disable_masks[i * adev->gfx.config.max_sh_per_se + j],
4861 					xcc_id);
4862 				bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
4863 
4864 				cu_info->bitmap[xcc_id][i][j] = bitmap;
4865 
4866 				for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
4867 					if (bitmap & mask) {
4868 						if (counter < adev->gfx.config.max_cu_per_sh)
4869 							ao_bitmap |= mask;
4870 						counter++;
4871 					}
4872 					mask <<= 1;
4873 				}
4874 				active_cu_number += counter;
4875 				if (i < 2 && j < 2)
4876 					ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
4877 				cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
4878 			}
4879 			if (i && is_symmetric_cus && prev_counter != counter)
4880 				is_symmetric_cus = false;
4881 			prev_counter = counter;
4882 		}
4883 		if (is_symmetric_cus) {
4884 			tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG);
4885 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_RELAUNCH_DISABLE, 1);
4886 			tmp = REG_SET_FIELD(tmp, CP_CPC_DEBUG, CPC_HARVESTING_DISPATCH_DISABLE, 1);
4887 			WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_DEBUG, tmp);
4888 		}
4889 		gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
4890 					    xcc_id);
4891 	}
4892 	mutex_unlock(&adev->grbm_idx_mutex);
4893 
4894 	cu_info->number = active_cu_number;
4895 	cu_info->ao_cu_mask = ao_cu_mask;
4896 	cu_info->simd_per_cu = NUM_SIMD_PER_CU;
4897 
4898 	return 0;
4899 }
4900 
4901 const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
4902 	.type = AMD_IP_BLOCK_TYPE_GFX,
4903 	.major = 9,
4904 	.minor = 4,
4905 	.rev = 3,
4906 	.funcs = &gfx_v9_4_3_ip_funcs,
4907 };
4908 
gfx_v9_4_3_xcp_resume(void * handle,uint32_t inst_mask)4909 static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask)
4910 {
4911 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4912 	uint32_t tmp_mask;
4913 	int i, r;
4914 
4915 	/* TODO : Initialize golden regs */
4916 	/* gfx_v9_4_3_init_golden_registers(adev); */
4917 
4918 	tmp_mask = inst_mask;
4919 	for_each_inst(i, tmp_mask)
4920 		gfx_v9_4_3_xcc_constants_init(adev, i);
4921 
4922 	if (!amdgpu_sriov_vf(adev)) {
4923 		tmp_mask = inst_mask;
4924 		for_each_inst(i, tmp_mask) {
4925 			r = gfx_v9_4_3_xcc_rlc_resume(adev, i);
4926 			if (r)
4927 				return r;
4928 		}
4929 	}
4930 
4931 	tmp_mask = inst_mask;
4932 	for_each_inst(i, tmp_mask) {
4933 		r = gfx_v9_4_3_xcc_cp_resume(adev, i);
4934 		if (r)
4935 			return r;
4936 	}
4937 
4938 	return 0;
4939 }
4940 
gfx_v9_4_3_xcp_suspend(void * handle,uint32_t inst_mask)4941 static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask)
4942 {
4943 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4944 	int i;
4945 
4946 	for_each_inst(i, inst_mask)
4947 		gfx_v9_4_3_xcc_fini(adev, i);
4948 
4949 	return 0;
4950 }
4951 
4952 struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = {
4953 	.suspend = &gfx_v9_4_3_xcp_suspend,
4954 	.resume = &gfx_v9_4_3_xcp_resume
4955 };
4956 
4957 struct amdgpu_ras_block_hw_ops  gfx_v9_4_3_ras_ops = {
4958 	.query_ras_error_count = &gfx_v9_4_3_query_ras_error_count,
4959 	.reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count,
4960 };
4961 
gfx_v9_4_3_ras_late_init(struct amdgpu_device * adev,struct ras_common_if * ras_block)4962 static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
4963 {
4964 	int r;
4965 
4966 	r = amdgpu_ras_block_late_init(adev, ras_block);
4967 	if (r)
4968 		return r;
4969 
4970 	r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__GFX,
4971 				&gfx_v9_4_3_aca_info,
4972 				NULL);
4973 	if (r)
4974 		goto late_fini;
4975 
4976 	return 0;
4977 
4978 late_fini:
4979 	amdgpu_ras_block_late_fini(adev, ras_block);
4980 
4981 	return r;
4982 }
4983 
4984 struct amdgpu_gfx_ras gfx_v9_4_3_ras = {
4985 	.ras_block = {
4986 		.hw_ops = &gfx_v9_4_3_ras_ops,
4987 		.ras_late_init = &gfx_v9_4_3_ras_late_init,
4988 	},
4989 	.enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer,
4990 };
4991