1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #ifndef __AMDGPU_GFX_H__
25 #define __AMDGPU_GFX_H__
26
27 /*
28 * GFX stuff
29 */
30 #include "clearstate_defs.h"
31 #include "amdgpu_ring.h"
32 #include "amdgpu_rlc.h"
33 #include "amdgpu_imu.h"
34 #include "soc15.h"
35 #include "amdgpu_ras.h"
36 #include "amdgpu_ring_mux.h"
37 #include "amdgpu_xcp.h"
38
39 /* GFX current status */
40 #define AMDGPU_GFX_NORMAL_MODE 0x00000000L
41 #define AMDGPU_GFX_SAFE_MODE 0x00000001L
42 #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
43 #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
44 #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
45
46 #define AMDGPU_MAX_GC_INSTANCES 8
47 #define AMDGPU_MAX_QUEUES 128
48
49 #define AMDGPU_MAX_GFX_QUEUES AMDGPU_MAX_QUEUES
50 #define AMDGPU_MAX_COMPUTE_QUEUES AMDGPU_MAX_QUEUES
51
52 enum amdgpu_gfx_pipe_priority {
53 AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1,
54 AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
55 };
56
57 #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
58 #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
59
60 /* 1 second timeout */
61 #define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(1000)
62
63 enum amdgpu_gfx_partition {
64 AMDGPU_SPX_PARTITION_MODE = 0,
65 AMDGPU_DPX_PARTITION_MODE = 1,
66 AMDGPU_TPX_PARTITION_MODE = 2,
67 AMDGPU_QPX_PARTITION_MODE = 3,
68 AMDGPU_CPX_PARTITION_MODE = 4,
69 AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1,
70 /* Automatically choose the right mode */
71 AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2,
72 };
73
74 #define NUM_XCC(x) hweight16(x)
75
76 enum amdgpu_gfx_ras_mem_id_type {
77 AMDGPU_GFX_CP_MEM = 0,
78 AMDGPU_GFX_GCEA_MEM,
79 AMDGPU_GFX_GC_CANE_MEM,
80 AMDGPU_GFX_GCUTCL2_MEM,
81 AMDGPU_GFX_GDS_MEM,
82 AMDGPU_GFX_LDS_MEM,
83 AMDGPU_GFX_RLC_MEM,
84 AMDGPU_GFX_SP_MEM,
85 AMDGPU_GFX_SPI_MEM,
86 AMDGPU_GFX_SQC_MEM,
87 AMDGPU_GFX_SQ_MEM,
88 AMDGPU_GFX_TA_MEM,
89 AMDGPU_GFX_TCC_MEM,
90 AMDGPU_GFX_TCA_MEM,
91 AMDGPU_GFX_TCI_MEM,
92 AMDGPU_GFX_TCP_MEM,
93 AMDGPU_GFX_TD_MEM,
94 AMDGPU_GFX_TCX_MEM,
95 AMDGPU_GFX_ATC_L2_MEM,
96 AMDGPU_GFX_UTCL2_MEM,
97 AMDGPU_GFX_VML2_MEM,
98 AMDGPU_GFX_VML2_WALKER_MEM,
99 AMDGPU_GFX_MEM_TYPE_NUM
100 };
101
102 struct amdgpu_mec {
103 struct amdgpu_bo *hpd_eop_obj;
104 u64 hpd_eop_gpu_addr;
105 struct amdgpu_bo *mec_fw_obj;
106 u64 mec_fw_gpu_addr;
107 struct amdgpu_bo *mec_fw_data_obj;
108 u64 mec_fw_data_gpu_addr;
109
110 u32 num_mec;
111 u32 num_pipe_per_mec;
112 u32 num_queue_per_pipe;
113 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES];
114 };
115
116 struct amdgpu_mec_bitmap {
117 /* These are the resources for which amdgpu takes ownership */
118 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
119 };
120
121 enum amdgpu_unmap_queues_action {
122 PREEMPT_QUEUES = 0,
123 RESET_QUEUES,
124 DISABLE_PROCESS_QUEUES,
125 PREEMPT_QUEUES_NO_UNMAP,
126 };
127
128 struct kiq_pm4_funcs {
129 /* Support ASIC-specific kiq pm4 packets*/
130 void (*kiq_set_resources)(struct amdgpu_ring *kiq_ring,
131 uint64_t queue_mask);
132 void (*kiq_map_queues)(struct amdgpu_ring *kiq_ring,
133 struct amdgpu_ring *ring);
134 void (*kiq_unmap_queues)(struct amdgpu_ring *kiq_ring,
135 struct amdgpu_ring *ring,
136 enum amdgpu_unmap_queues_action action,
137 u64 gpu_addr, u64 seq);
138 void (*kiq_query_status)(struct amdgpu_ring *kiq_ring,
139 struct amdgpu_ring *ring,
140 u64 addr,
141 u64 seq);
142 void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
143 uint16_t pasid, uint32_t flush_type,
144 bool all_hub);
145 void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring,
146 uint32_t queue_type, uint32_t me_id,
147 uint32_t pipe_id, uint32_t queue_id,
148 uint32_t xcc_id, uint32_t vmid);
149 /* Packet sizes */
150 int set_resources_size;
151 int map_queues_size;
152 int unmap_queues_size;
153 int query_status_size;
154 int invalidate_tlbs_size;
155 };
156
157 struct amdgpu_kiq {
158 u64 eop_gpu_addr;
159 struct amdgpu_bo *eop_obj;
160 spinlock_t ring_lock;
161 struct amdgpu_ring ring;
162 struct amdgpu_irq_src irq;
163 const struct kiq_pm4_funcs *pmf;
164 void *mqd_backup;
165 };
166
167 /*
168 * GFX configurations
169 */
170 #define AMDGPU_GFX_MAX_SE 4
171 #define AMDGPU_GFX_MAX_SH_PER_SE 2
172
173 /**
174 * amdgpu_rb_config - Configure a single Render Backend (RB)
175 *
176 * Bad RBs are fused off and there is a harvest register the driver reads to
177 * determine which RB(s) are fused off so that the driver can configure the
178 * hardware state so that nothing gets sent to them. There are also user
179 * harvest registers that the driver can program to disable additional RBs,
180 * etc., for testing purposes.
181 */
182 struct amdgpu_rb_config {
183 /**
184 * @rb_backend_disable:
185 *
186 * The value captured from register RB_BACKEND_DISABLE indicates if the
187 * RB backend is disabled or not.
188 */
189 uint32_t rb_backend_disable;
190
191 /**
192 * @user_rb_backend_disable:
193 *
194 * The value captured from register USER_RB_BACKEND_DISABLE indicates
195 * if the User RB backend is disabled or not.
196 */
197 uint32_t user_rb_backend_disable;
198
199 /**
200 * @raster_config:
201 *
202 * To set up all of the states, it is necessary to have two registers
203 * to keep all of the states. This field holds the first register.
204 */
205 uint32_t raster_config;
206
207 /**
208 * @raster_config_1:
209 *
210 * To set up all of the states, it is necessary to have two registers
211 * to keep all of the states. This field holds the second register.
212 */
213 uint32_t raster_config_1;
214 };
215
216 struct gb_addr_config {
217 uint16_t pipe_interleave_size;
218 uint8_t num_pipes;
219 uint8_t max_compress_frags;
220 uint8_t num_banks;
221 uint8_t num_se;
222 uint8_t num_rb_per_se;
223 uint8_t num_pkrs;
224 };
225
226 struct amdgpu_gfx_config {
227 unsigned max_shader_engines;
228 unsigned max_tile_pipes;
229 unsigned max_cu_per_sh;
230 unsigned max_sh_per_se;
231 unsigned max_backends_per_se;
232 unsigned max_texture_channel_caches;
233 unsigned max_gprs;
234 unsigned max_gs_threads;
235 unsigned max_hw_contexts;
236 unsigned sc_prim_fifo_size_frontend;
237 unsigned sc_prim_fifo_size_backend;
238 unsigned sc_hiz_tile_fifo_size;
239 unsigned sc_earlyz_tile_fifo_size;
240
241 unsigned num_tile_pipes;
242 unsigned backend_enable_mask;
243 unsigned mem_max_burst_length_bytes;
244 unsigned mem_row_size_in_kb;
245 unsigned shader_engine_tile_size;
246 unsigned num_gpus;
247 unsigned multi_gpu_tile_size;
248 unsigned mc_arb_ramcfg;
249 unsigned num_banks;
250 unsigned num_ranks;
251 unsigned gb_addr_config;
252 unsigned num_rbs;
253 unsigned gs_vgt_table_depth;
254 unsigned gs_prim_buffer_depth;
255
256 uint32_t tile_mode_array[32];
257 uint32_t macrotile_mode_array[16];
258
259 struct gb_addr_config gb_addr_config_fields;
260
261 /**
262 * @rb_config:
263 *
264 * Matrix that keeps all the Render Backend (color and depth buffer
265 * handling) configuration on the 3D engine.
266 */
267 struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
268
269 /* gfx configure feature */
270 uint32_t double_offchip_lds_buf;
271 /* cached value of DB_DEBUG2 */
272 uint32_t db_debug2;
273 /* gfx10 specific config */
274 uint32_t num_sc_per_sh;
275 uint32_t num_packer_per_sc;
276 uint32_t pa_sc_tile_steering_override;
277 /* Whether texture coordinate truncation is conformant. */
278 bool ta_cntl2_truncate_coord_mode;
279 uint64_t tcc_disabled_mask;
280 uint32_t gc_num_tcp_per_sa;
281 uint32_t gc_num_sdp_interface;
282 uint32_t gc_num_tcps;
283 uint32_t gc_num_tcp_per_wpg;
284 uint32_t gc_tcp_l1_size;
285 uint32_t gc_num_sqc_per_wgp;
286 uint32_t gc_l1_instruction_cache_size_per_sqc;
287 uint32_t gc_l1_data_cache_size_per_sqc;
288 uint32_t gc_gl1c_per_sa;
289 uint32_t gc_gl1c_size_per_instance;
290 uint32_t gc_gl2c_per_gpu;
291 uint32_t gc_tcp_size_per_cu;
292 uint32_t gc_num_cu_per_sqc;
293 uint32_t gc_tcc_size;
294 uint32_t gc_tcp_cache_line_size;
295 uint32_t gc_instruction_cache_size_per_sqc;
296 uint32_t gc_instruction_cache_line_size;
297 uint32_t gc_scalar_data_cache_size_per_sqc;
298 uint32_t gc_scalar_data_cache_line_size;
299 uint32_t gc_tcc_cache_line_size;
300 };
301
302 struct amdgpu_cu_info {
303 uint32_t simd_per_cu;
304 uint32_t max_waves_per_simd;
305 uint32_t wave_front_size;
306 uint32_t max_scratch_slots_per_cu;
307 uint32_t lds_size;
308
309 /* total active CU number */
310 uint32_t number;
311 uint32_t ao_cu_mask;
312 uint32_t ao_cu_bitmap[4][4];
313 uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
314 };
315
316 struct amdgpu_gfx_ras {
317 struct amdgpu_ras_block_object ras_block;
318 void (*enable_watchdog_timer)(struct amdgpu_device *adev);
319 int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
320 struct amdgpu_irq_src *source,
321 struct amdgpu_iv_entry *entry);
322 int (*poison_consumption_handler)(struct amdgpu_device *adev,
323 struct amdgpu_iv_entry *entry);
324 };
325
326 struct amdgpu_gfx_shadow_info {
327 u32 shadow_size;
328 u32 shadow_alignment;
329 u32 csa_size;
330 u32 csa_alignment;
331 };
332
333 struct amdgpu_gfx_funcs {
334 /* get the gpu clock counter */
335 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
336 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num,
337 u32 sh_num, u32 instance, int xcc_id);
338 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
339 uint32_t wave, uint32_t *dst, int *no_fields);
340 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
341 uint32_t wave, uint32_t thread, uint32_t start,
342 uint32_t size, uint32_t *dst);
343 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
344 uint32_t wave, uint32_t start, uint32_t size,
345 uint32_t *dst);
346 void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
347 u32 queue, u32 vmid, u32 xcc_id);
348 void (*init_spm_golden)(struct amdgpu_device *adev);
349 void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
350 int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
351 struct amdgpu_gfx_shadow_info *shadow_info,
352 bool skip_check);
353 enum amdgpu_gfx_partition
354 (*query_partition_mode)(struct amdgpu_device *adev);
355 int (*switch_partition_mode)(struct amdgpu_device *adev,
356 int num_xccs_per_xcp);
357 int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
358 int (*get_xccs_per_xcp)(struct amdgpu_device *adev);
359 };
360
361 struct sq_work {
362 struct work_struct work;
363 unsigned ih_data;
364 };
365
366 struct amdgpu_pfp {
367 struct amdgpu_bo *pfp_fw_obj;
368 uint64_t pfp_fw_gpu_addr;
369 uint32_t *pfp_fw_ptr;
370
371 struct amdgpu_bo *pfp_fw_data_obj;
372 uint64_t pfp_fw_data_gpu_addr;
373 uint32_t *pfp_fw_data_ptr;
374 };
375
376 struct amdgpu_ce {
377 struct amdgpu_bo *ce_fw_obj;
378 uint64_t ce_fw_gpu_addr;
379 uint32_t *ce_fw_ptr;
380 };
381
382 struct amdgpu_me {
383 struct amdgpu_bo *me_fw_obj;
384 uint64_t me_fw_gpu_addr;
385 uint32_t *me_fw_ptr;
386
387 struct amdgpu_bo *me_fw_data_obj;
388 uint64_t me_fw_data_gpu_addr;
389 uint32_t *me_fw_data_ptr;
390
391 uint32_t num_me;
392 uint32_t num_pipe_per_me;
393 uint32_t num_queue_per_pipe;
394 void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
395
396 /* These are the resources for which amdgpu takes ownership */
397 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
398 };
399
400 struct amdgpu_isolation_work {
401 struct amdgpu_device *adev;
402 u32 xcp_id;
403 struct delayed_work work;
404 };
405
406 struct amdgpu_gfx {
407 struct mutex gpu_clock_mutex;
408 struct amdgpu_gfx_config config;
409 struct amdgpu_rlc rlc;
410 struct amdgpu_pfp pfp;
411 struct amdgpu_ce ce;
412 struct amdgpu_me me;
413 struct amdgpu_mec mec;
414 struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES];
415 struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES];
416 struct amdgpu_imu imu;
417 bool rs64_enable; /* firmware format */
418 const struct firmware *me_fw; /* ME firmware */
419 uint32_t me_fw_version;
420 const struct firmware *pfp_fw; /* PFP firmware */
421 uint32_t pfp_fw_version;
422 const struct firmware *ce_fw; /* CE firmware */
423 uint32_t ce_fw_version;
424 const struct firmware *rlc_fw; /* RLC firmware */
425 uint32_t rlc_fw_version;
426 const struct firmware *mec_fw; /* MEC firmware */
427 uint32_t mec_fw_version;
428 const struct firmware *mec2_fw; /* MEC2 firmware */
429 uint32_t mec2_fw_version;
430 const struct firmware *imu_fw; /* IMU firmware */
431 uint32_t imu_fw_version;
432 uint32_t me_feature_version;
433 uint32_t ce_feature_version;
434 uint32_t pfp_feature_version;
435 uint32_t rlc_feature_version;
436 uint32_t rlc_srlc_fw_version;
437 uint32_t rlc_srlc_feature_version;
438 uint32_t rlc_srlg_fw_version;
439 uint32_t rlc_srlg_feature_version;
440 uint32_t rlc_srls_fw_version;
441 uint32_t rlc_srls_feature_version;
442 uint32_t rlcp_ucode_version;
443 uint32_t rlcp_ucode_feature_version;
444 uint32_t rlcv_ucode_version;
445 uint32_t rlcv_ucode_feature_version;
446 uint32_t mec_feature_version;
447 uint32_t mec2_feature_version;
448 bool mec_fw_write_wait;
449 bool me_fw_write_wait;
450 bool cp_fw_write_wait;
451 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
452 unsigned num_gfx_rings;
453 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES];
454 unsigned num_compute_rings;
455 struct amdgpu_irq_src eop_irq;
456 struct amdgpu_irq_src priv_reg_irq;
457 struct amdgpu_irq_src priv_inst_irq;
458 struct amdgpu_irq_src bad_op_irq;
459 struct amdgpu_irq_src cp_ecc_error_irq;
460 struct amdgpu_irq_src sq_irq;
461 struct amdgpu_irq_src rlc_gc_fed_irq;
462 struct sq_work sq_work;
463
464 /* gfx status */
465 uint32_t gfx_current_status;
466 /* ce ram size*/
467 unsigned ce_ram_size;
468 struct amdgpu_cu_info cu_info;
469 const struct amdgpu_gfx_funcs *funcs;
470
471 /* reset mask */
472 uint32_t grbm_soft_reset;
473 uint32_t srbm_soft_reset;
474 uint32_t gfx_supported_reset;
475 uint32_t compute_supported_reset;
476
477 /* gfx off */
478 bool gfx_off_state; /* true: enabled, false: disabled */
479 struct mutex gfx_off_mutex; /* mutex to change gfxoff state */
480 uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
481 struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */
482 uint32_t gfx_off_residency; /* last logged residency */
483 uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */
484
485 /* pipe reservation */
486 struct mutex pipe_reserve_mutex;
487 DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
488
489 /*ras */
490 struct ras_common_if *ras_if;
491 struct amdgpu_gfx_ras *ras;
492
493 bool is_poweron;
494
495 struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS];
496 struct amdgpu_ring_mux muxer;
497
498 bool cp_gfx_shadow; /* for gfx11 */
499
500 uint16_t xcc_mask;
501 uint32_t num_xcc_per_xcp;
502 struct mutex partition_mutex;
503 bool mcbp; /* mid command buffer preemption */
504
505 /* IP reg dump */
506 uint32_t *ip_dump_core;
507 uint32_t *ip_dump_compute_queues;
508 uint32_t *ip_dump_gfx_queues;
509
510 struct mutex reset_sem_mutex;
511
512 /* cleaner shader */
513 struct amdgpu_bo *cleaner_shader_obj;
514 unsigned int cleaner_shader_size;
515 u64 cleaner_shader_gpu_addr;
516 void *cleaner_shader_cpu_ptr;
517 const void *cleaner_shader_ptr;
518 bool enable_cleaner_shader;
519 struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
520 /* Mutex for synchronizing KFD scheduler operations */
521 struct mutex userq_sch_mutex;
522 u64 userq_sch_req_count[MAX_XCP];
523 bool userq_sch_inactive[MAX_XCP];
524 unsigned long enforce_isolation_jiffies[MAX_XCP];
525 unsigned long enforce_isolation_time[MAX_XCP];
526
527 atomic_t total_submission_cnt;
528 struct delayed_work idle_work;
529 bool workload_profile_active;
530 struct mutex workload_profile_mutex;
531
532 bool disable_kq;
533 bool disable_uq;
534 };
535
536 struct amdgpu_gfx_ras_reg_entry {
537 struct amdgpu_ras_err_status_reg_entry reg_entry;
538 enum amdgpu_gfx_ras_mem_id_type mem_id_type;
539 uint32_t se_num;
540 };
541
542 struct amdgpu_gfx_ras_mem_id_entry {
543 const struct amdgpu_ras_memory_id_entry *mem_id_ent;
544 uint32_t size;
545 };
546
547 #define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)},
548
549 #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
550 #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
551 #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
552 #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
553 #define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
554
555 /**
556 * amdgpu_gfx_create_bitmask - create a bitmask
557 *
558 * @bit_width: length of the mask
559 *
560 * create a variable length bit mask.
561 * Returns the bitmask.
562 */
amdgpu_gfx_create_bitmask(u32 bit_width)563 static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
564 {
565 return (u32)((1ULL << bit_width) - 1);
566 }
567
568 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
569 unsigned max_sh);
570
571 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id);
572
573 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
574
575 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id);
576 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
577 unsigned hpd_size, int xcc_id);
578
579 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
580 unsigned mqd_size, int xcc_id);
581 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id);
582 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id);
583 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id);
584 int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id);
585 int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id);
586
587 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
588 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
589
590 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
591 int pipe, int queue);
592 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
593 int *mec, int *pipe, int *queue);
594 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id,
595 int mec, int pipe, int queue);
596 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
597 struct amdgpu_ring *ring);
598 bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
599 struct amdgpu_ring *ring);
600 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
601 int pipe, int queue);
602 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
603 void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable);
604 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
605 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
606 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
607 int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value);
608 int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency);
609 int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value);
610 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
611 void *err_data,
612 struct amdgpu_iv_entry *entry);
613 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
614 struct amdgpu_irq_src *source,
615 struct amdgpu_iv_entry *entry);
616 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
617 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
618 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
619 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
620
621 int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
622 int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
623 struct amdgpu_iv_entry *entry);
624
625 bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id);
626 int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev);
627 void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev);
628 void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
629 void *ras_error_status,
630 void (*func)(struct amdgpu_device *adev, void *ras_error_status,
631 int xcc_id));
632 int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
633 unsigned int cleaner_shader_size);
634 void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
635 void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
636 unsigned int cleaner_shader_size,
637 const void *cleaner_shader_ptr);
638 void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
639 void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
640 void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
641
642 void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
643 void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
644 void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
645 u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
646 u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
647 void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
648
649 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
650 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
651
amdgpu_gfx_compute_mode_desc(int mode)652 static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
653 {
654 switch (mode) {
655 case AMDGPU_SPX_PARTITION_MODE:
656 return "SPX";
657 case AMDGPU_DPX_PARTITION_MODE:
658 return "DPX";
659 case AMDGPU_TPX_PARTITION_MODE:
660 return "TPX";
661 case AMDGPU_QPX_PARTITION_MODE:
662 return "QPX";
663 case AMDGPU_CPX_PARTITION_MODE:
664 return "CPX";
665 default:
666 return "UNKNOWN";
667 }
668 }
669
670 #endif
671