1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #ifndef __AMDGPU_H__
29 #define __AMDGPU_H__
30
31 #ifdef pr_fmt
32 #undef pr_fmt
33 #endif
34
35 #define pr_fmt(fmt) "amdgpu: " fmt
36
37 #ifdef dev_fmt
38 #undef dev_fmt
39 #endif
40
41 #define dev_fmt(fmt) "amdgpu: " fmt
42
43 #include "amdgpu_ctx.h"
44
45 #include <linux/atomic.h>
46 #include <linux/wait.h>
47 #include <linux/list.h>
48 #include <linux/kref.h>
49 #include <linux/rbtree.h>
50 #include <linux/hashtable.h>
51 #include <linux/dma-fence.h>
52 #include <linux/pci.h>
53
54 #include <drm/ttm/ttm_bo.h>
55 #include <drm/ttm/ttm_placement.h>
56
57 #include <drm/amdgpu_drm.h>
58 #include <drm/drm_gem.h>
59 #include <drm/drm_ioctl.h>
60
61 #include <kgd_kfd_interface.h>
62 #include "dm_pp_interface.h"
63 #include "kgd_pp_interface.h"
64
65 #include "amd_shared.h"
66 #include "amdgpu_mode.h"
67 #include "amdgpu_ih.h"
68 #include "amdgpu_irq.h"
69 #include "amdgpu_ucode.h"
70 #include "amdgpu_ttm.h"
71 #include "amdgpu_psp.h"
72 #include "amdgpu_gds.h"
73 #include "amdgpu_sync.h"
74 #include "amdgpu_ring.h"
75 #include "amdgpu_vm.h"
76 #include "amdgpu_dpm.h"
77 #include "amdgpu_acp.h"
78 #include "amdgpu_uvd.h"
79 #include "amdgpu_vce.h"
80 #include "amdgpu_vcn.h"
81 #include "amdgpu_jpeg.h"
82 #include "amdgpu_vpe.h"
83 #include "amdgpu_umsch_mm.h"
84 #include "amdgpu_gmc.h"
85 #include "amdgpu_gfx.h"
86 #include "amdgpu_sdma.h"
87 #include "amdgpu_lsdma.h"
88 #include "amdgpu_nbio.h"
89 #include "amdgpu_hdp.h"
90 #include "amdgpu_dm.h"
91 #include "amdgpu_virt.h"
92 #include "amdgpu_csa.h"
93 #include "amdgpu_mes_ctx.h"
94 #include "amdgpu_gart.h"
95 #include "amdgpu_debugfs.h"
96 #include "amdgpu_job.h"
97 #include "amdgpu_bo_list.h"
98 #include "amdgpu_gem.h"
99 #include "amdgpu_doorbell.h"
100 #include "amdgpu_amdkfd.h"
101 #include "amdgpu_discovery.h"
102 #include "amdgpu_mes.h"
103 #include "amdgpu_umc.h"
104 #include "amdgpu_mmhub.h"
105 #include "amdgpu_gfxhub.h"
106 #include "amdgpu_df.h"
107 #include "amdgpu_smuio.h"
108 #include "amdgpu_fdinfo.h"
109 #include "amdgpu_mca.h"
110 #include "amdgpu_aca.h"
111 #include "amdgpu_ras.h"
112 #include "amdgpu_cper.h"
113 #include "amdgpu_xcp.h"
114 #include "amdgpu_seq64.h"
115 #include "amdgpu_reg_state.h"
116 #if defined(CONFIG_DRM_AMD_ISP)
117 #include "amdgpu_isp.h"
118 #endif
119
120 #define MAX_GPU_INSTANCE 64
121
122 #define GFX_SLICE_PERIOD_MS 250
123
124 struct amdgpu_gpu_instance {
125 struct amdgpu_device *adev;
126 int mgpu_fan_enabled;
127 };
128
129 struct amdgpu_mgpu_info {
130 struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
131 struct mutex mutex;
132 uint32_t num_gpu;
133 uint32_t num_dgpu;
134 uint32_t num_apu;
135 };
136
137 enum amdgpu_ss {
138 AMDGPU_SS_DRV_LOAD,
139 AMDGPU_SS_DEV_D0,
140 AMDGPU_SS_DEV_D3,
141 AMDGPU_SS_DRV_UNLOAD
142 };
143
144 struct amdgpu_hwip_reg_entry {
145 u32 hwip;
146 u32 inst;
147 u32 seg;
148 u32 reg_offset;
149 const char *reg_name;
150 };
151
152 struct amdgpu_watchdog_timer {
153 bool timeout_fatal_disable;
154 uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
155 };
156
157 #define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
158
159 /*
160 * Modules parameters.
161 */
162 extern int amdgpu_modeset;
163 extern unsigned int amdgpu_vram_limit;
164 extern int amdgpu_vis_vram_limit;
165 extern int amdgpu_gart_size;
166 extern int amdgpu_gtt_size;
167 extern int amdgpu_moverate;
168 extern int amdgpu_audio;
169 extern int amdgpu_disp_priority;
170 extern int amdgpu_hw_i2c;
171 extern int amdgpu_pcie_gen2;
172 extern int amdgpu_msi;
173 extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
174 extern int amdgpu_dpm;
175 extern int amdgpu_fw_load_type;
176 extern int amdgpu_aspm;
177 extern int amdgpu_runtime_pm;
178 extern uint amdgpu_ip_block_mask;
179 extern int amdgpu_bapm;
180 extern int amdgpu_deep_color;
181 extern int amdgpu_vm_size;
182 extern int amdgpu_vm_block_size;
183 extern int amdgpu_vm_fragment_size;
184 extern int amdgpu_vm_fault_stop;
185 extern int amdgpu_vm_debug;
186 extern int amdgpu_vm_update_mode;
187 extern int amdgpu_exp_hw_support;
188 extern int amdgpu_dc;
189 extern int amdgpu_sched_jobs;
190 extern int amdgpu_sched_hw_submission;
191 extern uint amdgpu_pcie_gen_cap;
192 extern uint amdgpu_pcie_lane_cap;
193 extern u64 amdgpu_cg_mask;
194 extern uint amdgpu_pg_mask;
195 extern uint amdgpu_sdma_phase_quantum;
196 extern char *amdgpu_disable_cu;
197 extern char *amdgpu_virtual_display;
198 extern uint amdgpu_pp_feature_mask;
199 extern uint amdgpu_force_long_training;
200 extern int amdgpu_lbpw;
201 extern int amdgpu_compute_multipipe;
202 extern int amdgpu_gpu_recovery;
203 extern int amdgpu_emu_mode;
204 extern uint amdgpu_smu_memory_pool_size;
205 extern int amdgpu_smu_pptable_id;
206 extern uint amdgpu_dc_feature_mask;
207 extern uint amdgpu_freesync_vid_mode;
208 extern uint amdgpu_dc_debug_mask;
209 extern uint amdgpu_dc_visual_confirm;
210 extern int amdgpu_dm_abm_level;
211 extern int amdgpu_backlight;
212 extern int amdgpu_damage_clips;
213 extern struct amdgpu_mgpu_info mgpu_info;
214 extern int amdgpu_ras_enable;
215 extern uint amdgpu_ras_mask;
216 extern int amdgpu_bad_page_threshold;
217 extern bool amdgpu_ignore_bad_page_threshold;
218 extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
219 extern int amdgpu_async_gfx_ring;
220 extern int amdgpu_mcbp;
221 extern int amdgpu_discovery;
222 extern int amdgpu_mes;
223 extern int amdgpu_mes_log_enable;
224 extern int amdgpu_mes_kiq;
225 extern int amdgpu_uni_mes;
226 extern int amdgpu_noretry;
227 extern int amdgpu_force_asic_type;
228 extern int amdgpu_smartshift_bias;
229 extern int amdgpu_use_xgmi_p2p;
230 extern int amdgpu_mtype_local;
231 extern bool enforce_isolation;
232 #ifdef CONFIG_HSA_AMD
233 extern int sched_policy;
234 extern bool debug_evictions;
235 extern bool no_system_mem_limit;
236 extern int halt_if_hws_hang;
237 extern uint amdgpu_svm_default_granularity;
238 #else
239 static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
240 static const bool __maybe_unused debug_evictions; /* = false */
241 static const bool __maybe_unused no_system_mem_limit;
242 static const int __maybe_unused halt_if_hws_hang;
243 #endif
244 #ifdef CONFIG_HSA_AMD_P2P
245 extern bool pcie_p2p;
246 #endif
247
248 extern int amdgpu_tmz;
249 extern int amdgpu_reset_method;
250
251 #ifdef CONFIG_DRM_AMDGPU_SI
252 extern int amdgpu_si_support;
253 #endif
254 #ifdef CONFIG_DRM_AMDGPU_CIK
255 extern int amdgpu_cik_support;
256 #endif
257 extern int amdgpu_num_kcq;
258
259 #define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
260 #define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024)
261 extern int amdgpu_vcnfw_log;
262 extern int amdgpu_sg_display;
263 extern int amdgpu_umsch_mm;
264 extern int amdgpu_seamless;
265 extern int amdgpu_umsch_mm_fwlog;
266
267 extern int amdgpu_user_partt_mode;
268 extern int amdgpu_agp;
269
270 extern int amdgpu_wbrf;
271
272 #define AMDGPU_VM_MAX_NUM_CTX 4096
273 #define AMDGPU_SG_THRESHOLD (256*1024*1024)
274 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
275 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
276 #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
277 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
278 #define AMDGPUFB_CONN_LIMIT 4
279 #define AMDGPU_BIOS_NUM_SCRATCH 16
280
281 #define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
282
283 /* hard reset data */
284 #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
285
286 /* reset flags */
287 #define AMDGPU_RESET_GFX (1 << 0)
288 #define AMDGPU_RESET_COMPUTE (1 << 1)
289 #define AMDGPU_RESET_DMA (1 << 2)
290 #define AMDGPU_RESET_CP (1 << 3)
291 #define AMDGPU_RESET_GRBM (1 << 4)
292 #define AMDGPU_RESET_DMA1 (1 << 5)
293 #define AMDGPU_RESET_RLC (1 << 6)
294 #define AMDGPU_RESET_SEM (1 << 7)
295 #define AMDGPU_RESET_IH (1 << 8)
296 #define AMDGPU_RESET_VMC (1 << 9)
297 #define AMDGPU_RESET_MC (1 << 10)
298 #define AMDGPU_RESET_DISPLAY (1 << 11)
299 #define AMDGPU_RESET_UVD (1 << 12)
300 #define AMDGPU_RESET_VCE (1 << 13)
301 #define AMDGPU_RESET_VCE1 (1 << 14)
302
303 /* reset mask */
304 #define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
305 #define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
306 #define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
307 #define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
308
309 /* max cursor sizes (in pixels) */
310 #define CIK_CURSOR_WIDTH 128
311 #define CIK_CURSOR_HEIGHT 128
312
313 /* smart shift bias level limits */
314 #define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
315 #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
316
317 /* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
318 #define AMDGPU_SWCTF_EXTRA_DELAY 50
319
320 struct amdgpu_xcp_mgr;
321 struct amdgpu_device;
322 struct amdgpu_irq_src;
323 struct amdgpu_fpriv;
324 struct amdgpu_bo_va_mapping;
325 struct kfd_vm_fault_info;
326 struct amdgpu_hive_info;
327 struct amdgpu_reset_context;
328 struct amdgpu_reset_control;
329
330 enum amdgpu_cp_irq {
331 AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
332 AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
333 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
334 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
335 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
336 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
337 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
338 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
339 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
340 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
341
342 AMDGPU_CP_IRQ_LAST
343 };
344
345 enum amdgpu_thermal_irq {
346 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
347 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
348
349 AMDGPU_THERMAL_IRQ_LAST
350 };
351
352 enum amdgpu_kiq_irq {
353 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
354 AMDGPU_CP_KIQ_IRQ_LAST
355 };
356 #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
357 #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
358 #define MAX_KIQ_REG_TRY 1000
359
360 int amdgpu_device_ip_set_clockgating_state(void *dev,
361 enum amd_ip_block_type block_type,
362 enum amd_clockgating_state state);
363 int amdgpu_device_ip_set_powergating_state(void *dev,
364 enum amd_ip_block_type block_type,
365 enum amd_powergating_state state);
366 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
367 u64 *flags);
368 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
369 enum amd_ip_block_type block_type);
370 bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
371 enum amd_ip_block_type block_type);
372 int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
373
374 int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
375
376 #define AMDGPU_MAX_IP_NUM 16
377
378 struct amdgpu_ip_block_status {
379 bool valid;
380 bool sw;
381 bool hw;
382 bool late_initialized;
383 bool hang;
384 };
385
386 struct amdgpu_ip_block_version {
387 const enum amd_ip_block_type type;
388 const u32 major;
389 const u32 minor;
390 const u32 rev;
391 const struct amd_ip_funcs *funcs;
392 };
393
394 struct amdgpu_ip_block {
395 struct amdgpu_ip_block_status status;
396 const struct amdgpu_ip_block_version *version;
397 struct amdgpu_device *adev;
398 };
399
400 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
401 enum amd_ip_block_type type,
402 u32 major, u32 minor);
403
404 struct amdgpu_ip_block *
405 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
406 enum amd_ip_block_type type);
407
408 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
409 const struct amdgpu_ip_block_version *ip_block_version);
410
411 /*
412 * BIOS.
413 */
414 bool amdgpu_get_bios(struct amdgpu_device *adev);
415 bool amdgpu_read_bios(struct amdgpu_device *adev);
416 bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
417 u8 *bios, u32 length_bytes);
418 void amdgpu_bios_release(struct amdgpu_device *adev);
419 /*
420 * Clocks
421 */
422
423 #define AMDGPU_MAX_PPLL 3
424
425 struct amdgpu_clock {
426 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
427 struct amdgpu_pll spll;
428 struct amdgpu_pll mpll;
429 /* 10 Khz units */
430 uint32_t default_mclk;
431 uint32_t default_sclk;
432 uint32_t default_dispclk;
433 uint32_t current_dispclk;
434 uint32_t dp_extclk;
435 uint32_t max_pixel_clock;
436 };
437
438 /* sub-allocation manager, it has to be protected by another lock.
439 * By conception this is an helper for other part of the driver
440 * like the indirect buffer or semaphore, which both have their
441 * locking.
442 *
443 * Principe is simple, we keep a list of sub allocation in offset
444 * order (first entry has offset == 0, last entry has the highest
445 * offset).
446 *
447 * When allocating new object we first check if there is room at
448 * the end total_size - (last_object_offset + last_object_size) >=
449 * alloc_size. If so we allocate new object there.
450 *
451 * When there is not enough room at the end, we start waiting for
452 * each sub object until we reach object_offset+object_size >=
453 * alloc_size, this object then become the sub object we return.
454 *
455 * Alignment can't be bigger than page size.
456 *
457 * Hole are not considered for allocation to keep things simple.
458 * Assumption is that there won't be hole (all object on same
459 * alignment).
460 */
461
462 struct amdgpu_sa_manager {
463 struct drm_suballoc_manager base;
464 struct amdgpu_bo *bo;
465 uint64_t gpu_addr;
466 void *cpu_ptr;
467 };
468
469 int amdgpu_fence_slab_init(void);
470 void amdgpu_fence_slab_fini(void);
471
472 /*
473 * IRQS.
474 */
475
476 struct amdgpu_flip_work {
477 struct delayed_work flip_work;
478 struct work_struct unpin_work;
479 struct amdgpu_device *adev;
480 int crtc_id;
481 u32 target_vblank;
482 uint64_t base;
483 struct drm_pending_vblank_event *event;
484 struct amdgpu_bo *old_abo;
485 unsigned shared_count;
486 struct dma_fence **shared;
487 struct dma_fence_cb cb;
488 bool async;
489 };
490
491
492 /*
493 * file private structure
494 */
495
496 struct amdgpu_fpriv {
497 struct amdgpu_vm vm;
498 struct amdgpu_bo_va *prt_va;
499 struct amdgpu_bo_va *csa_va;
500 struct amdgpu_bo_va *seq64_va;
501 struct mutex bo_list_lock;
502 struct idr bo_list_handles;
503 struct amdgpu_ctx_mgr ctx_mgr;
504 /** GPU partition selection */
505 uint32_t xcp_id;
506 };
507
508 int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
509
510 /*
511 * Writeback
512 */
513 #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
514
515 struct amdgpu_wb {
516 struct amdgpu_bo *wb_obj;
517 volatile uint32_t *wb;
518 uint64_t gpu_addr;
519 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
520 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
521 spinlock_t lock;
522 };
523
524 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
525 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
526
527 /*
528 * Benchmarking
529 */
530 int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
531
532 /*
533 * ASIC specific register table accessible by UMD
534 */
535 struct amdgpu_allowed_register_entry {
536 uint32_t reg_offset;
537 bool grbm_indexed;
538 };
539
540 /**
541 * enum amd_reset_method - Methods for resetting AMD GPU devices
542 *
543 * @AMD_RESET_METHOD_NONE: The device will not be reset.
544 * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
545 * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
546 * any device.
547 * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
548 * individually. Suitable only for some discrete GPU, not
549 * available for all ASICs.
550 * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
551 * are reset depends on the ASIC. Notably doesn't reset IPs
552 * shared with the CPU on APUs or the memory controllers (so
553 * VRAM is not lost). Not available on all ASICs.
554 * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
555 * but without powering off the PCI bus. Suitable only for
556 * discrete GPUs.
557 * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
558 * and does a secondary bus reset or FLR, depending on what the
559 * underlying hardware supports.
560 *
561 * Methods available for AMD GPU driver for resetting the device. Not all
562 * methods are suitable for every device. User can override the method using
563 * module parameter `reset_method`.
564 */
565 enum amd_reset_method {
566 AMD_RESET_METHOD_NONE = -1,
567 AMD_RESET_METHOD_LEGACY = 0,
568 AMD_RESET_METHOD_MODE0,
569 AMD_RESET_METHOD_MODE1,
570 AMD_RESET_METHOD_MODE2,
571 AMD_RESET_METHOD_BACO,
572 AMD_RESET_METHOD_PCI,
573 AMD_RESET_METHOD_ON_INIT,
574 };
575
576 struct amdgpu_video_codec_info {
577 u32 codec_type;
578 u32 max_width;
579 u32 max_height;
580 u32 max_pixels_per_frame;
581 u32 max_level;
582 };
583
584 #define codec_info_build(type, width, height, level) \
585 .codec_type = type,\
586 .max_width = width,\
587 .max_height = height,\
588 .max_pixels_per_frame = height * width,\
589 .max_level = level,
590
591 struct amdgpu_video_codecs {
592 const u32 codec_count;
593 const struct amdgpu_video_codec_info *codec_array;
594 };
595
596 /*
597 * ASIC specific functions.
598 */
599 struct amdgpu_asic_funcs {
600 bool (*read_disabled_bios)(struct amdgpu_device *adev);
601 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
602 u8 *bios, u32 length_bytes);
603 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
604 u32 sh_num, u32 reg_offset, u32 *value);
605 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
606 int (*reset)(struct amdgpu_device *adev);
607 enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
608 /* get the reference clock */
609 u32 (*get_xclk)(struct amdgpu_device *adev);
610 /* MM block clocks */
611 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
612 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
613 /* static power management */
614 int (*get_pcie_lanes)(struct amdgpu_device *adev);
615 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
616 /* get config memsize register */
617 u32 (*get_config_memsize)(struct amdgpu_device *adev);
618 /* flush hdp write queue */
619 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
620 /* invalidate hdp read cache */
621 void (*invalidate_hdp)(struct amdgpu_device *adev,
622 struct amdgpu_ring *ring);
623 /* check if the asic needs a full reset of if soft reset will work */
624 bool (*need_full_reset)(struct amdgpu_device *adev);
625 /* initialize doorbell layout for specific asic*/
626 void (*init_doorbell_index)(struct amdgpu_device *adev);
627 /* PCIe bandwidth usage */
628 void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
629 uint64_t *count1);
630 /* do we need to reset the asic at init time (e.g., kexec) */
631 bool (*need_reset_on_init)(struct amdgpu_device *adev);
632 /* PCIe replay counter */
633 uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
634 /* device supports BACO */
635 int (*supports_baco)(struct amdgpu_device *adev);
636 /* pre asic_init quirks */
637 void (*pre_asic_init)(struct amdgpu_device *adev);
638 /* enter/exit umd stable pstate */
639 int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
640 /* query video codecs */
641 int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
642 const struct amdgpu_video_codecs **codecs);
643 /* encode "> 32bits" smn addressing */
644 u64 (*encode_ext_smn_addressing)(int ext_id);
645
646 ssize_t (*get_reg_state)(struct amdgpu_device *adev,
647 enum amdgpu_reg_state reg_state, void *buf,
648 size_t max_size);
649 };
650
651 /*
652 * IOCTL.
653 */
654 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
655 struct drm_file *filp);
656
657 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
658 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
659 struct drm_file *filp);
660 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
661 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
662 struct drm_file *filp);
663
664 /* VRAM scratch page for HDP bug, default vram page */
665 struct amdgpu_mem_scratch {
666 struct amdgpu_bo *robj;
667 volatile uint32_t *ptr;
668 u64 gpu_addr;
669 };
670
671 /*
672 * CGS
673 */
674 struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
675 void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
676
677 /*
678 * Core structure, functions and helpers.
679 */
680 typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
681 typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
682
683 typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
684 typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
685
686 typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
687 typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
688
689 typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t);
690 typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t);
691
692 typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
693 typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
694
695 struct amdgpu_mmio_remap {
696 u32 reg_offset;
697 resource_size_t bus_addr;
698 };
699
700 /* Define the HW IP blocks will be used in driver , add more if necessary */
701 enum amd_hw_ip_block_type {
702 GC_HWIP = 1,
703 HDP_HWIP,
704 SDMA0_HWIP,
705 SDMA1_HWIP,
706 SDMA2_HWIP,
707 SDMA3_HWIP,
708 SDMA4_HWIP,
709 SDMA5_HWIP,
710 SDMA6_HWIP,
711 SDMA7_HWIP,
712 LSDMA_HWIP,
713 MMHUB_HWIP,
714 ATHUB_HWIP,
715 NBIO_HWIP,
716 MP0_HWIP,
717 MP1_HWIP,
718 UVD_HWIP,
719 VCN_HWIP = UVD_HWIP,
720 JPEG_HWIP = VCN_HWIP,
721 VCN1_HWIP,
722 VCE_HWIP,
723 VPE_HWIP,
724 DF_HWIP,
725 DCE_HWIP,
726 OSSSYS_HWIP,
727 SMUIO_HWIP,
728 PWR_HWIP,
729 NBIF_HWIP,
730 THM_HWIP,
731 CLK_HWIP,
732 UMC_HWIP,
733 RSMU_HWIP,
734 XGMI_HWIP,
735 DCI_HWIP,
736 PCIE_HWIP,
737 ISP_HWIP,
738 MAX_HWIP
739 };
740
741 #define HWIP_MAX_INSTANCE 44
742
743 #define HW_ID_MAX 300
744 #define IP_VERSION_FULL(mj, mn, rv, var, srev) \
745 (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev))
746 #define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0)
747 #define IP_VERSION_MAJ(ver) ((ver) >> 24)
748 #define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF)
749 #define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF)
750 #define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF)
751 #define IP_VERSION_SUBREV(ver) ((ver) & 0xF)
752 #define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8)
753
754 struct amdgpu_ip_map_info {
755 /* Map of logical to actual dev instances/mask */
756 uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
757 int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
758 enum amd_hw_ip_block_type block,
759 int8_t inst);
760 uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
761 enum amd_hw_ip_block_type block,
762 uint32_t mask);
763 };
764
765 struct amd_powerplay {
766 void *pp_handle;
767 const struct amd_pm_funcs *pp_funcs;
768 };
769
770 struct ip_discovery_top;
771
772 /* polaris10 kickers */
773 #define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
774 ((rid == 0xE3) || \
775 (rid == 0xE4) || \
776 (rid == 0xE5) || \
777 (rid == 0xE7) || \
778 (rid == 0xEF))) || \
779 ((did == 0x6FDF) && \
780 ((rid == 0xE7) || \
781 (rid == 0xEF) || \
782 (rid == 0xFF))))
783
784 #define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \
785 ((rid == 0xE1) || \
786 (rid == 0xF7)))
787
788 /* polaris11 kickers */
789 #define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \
790 ((rid == 0xE0) || \
791 (rid == 0xE5))) || \
792 ((did == 0x67FF) && \
793 ((rid == 0xCF) || \
794 (rid == 0xEF) || \
795 (rid == 0xFF))))
796
797 #define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \
798 ((rid == 0xE2)))
799
800 /* polaris12 kickers */
801 #define ASICID_IS_P23(did, rid) (((did == 0x6987) && \
802 ((rid == 0xC0) || \
803 (rid == 0xC1) || \
804 (rid == 0xC3) || \
805 (rid == 0xC7))) || \
806 ((did == 0x6981) && \
807 ((rid == 0x00) || \
808 (rid == 0x01) || \
809 (rid == 0x10))))
810
811 struct amdgpu_mqd_prop {
812 uint64_t mqd_gpu_addr;
813 uint64_t hqd_base_gpu_addr;
814 uint64_t rptr_gpu_addr;
815 uint64_t wptr_gpu_addr;
816 uint32_t queue_size;
817 bool use_doorbell;
818 uint32_t doorbell_index;
819 uint64_t eop_gpu_addr;
820 uint32_t hqd_pipe_priority;
821 uint32_t hqd_queue_priority;
822 bool allow_tunneling;
823 bool hqd_active;
824 };
825
826 struct amdgpu_mqd {
827 unsigned mqd_size;
828 int (*init_mqd)(struct amdgpu_device *adev, void *mqd,
829 struct amdgpu_mqd_prop *p);
830 };
831
832 /*
833 * Custom Init levels could be defined for different situations where a full
834 * initialization of all hardware blocks are not expected. Sample cases are
835 * custom init sequences after resume after S0i3/S3, reset on initialization,
836 * partial reset of blocks etc. Presently, this defines only two levels. Levels
837 * are described in corresponding struct definitions - amdgpu_init_default,
838 * amdgpu_init_minimal_xgmi.
839 */
840 enum amdgpu_init_lvl_id {
841 AMDGPU_INIT_LEVEL_DEFAULT,
842 AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
843 AMDGPU_INIT_LEVEL_RESET_RECOVERY,
844 };
845
846 struct amdgpu_init_level {
847 enum amdgpu_init_lvl_id level;
848 uint32_t hwini_ip_block_mask;
849 };
850
851 #define AMDGPU_RESET_MAGIC_NUM 64
852 #define AMDGPU_MAX_DF_PERFMONS 4
853 struct amdgpu_reset_domain;
854 struct amdgpu_fru_info;
855
856 /*
857 * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
858 */
859 #define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
860
861 struct amdgpu_device {
862 struct device *dev;
863 struct pci_dev *pdev;
864 struct drm_device ddev;
865
866 #ifdef CONFIG_DRM_AMD_ACP
867 struct amdgpu_acp acp;
868 #endif
869 struct amdgpu_hive_info *hive;
870 struct amdgpu_xcp_mgr *xcp_mgr;
871 /* ASIC */
872 enum amd_asic_type asic_type;
873 uint32_t family;
874 uint32_t rev_id;
875 uint32_t external_rev_id;
876 unsigned long flags;
877 unsigned long apu_flags;
878 int usec_timeout;
879 const struct amdgpu_asic_funcs *asic_funcs;
880 bool shutdown;
881 bool need_swiotlb;
882 bool accel_working;
883 struct notifier_block acpi_nb;
884 struct notifier_block pm_nb;
885 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
886 struct debugfs_blob_wrapper debugfs_vbios_blob;
887 struct debugfs_blob_wrapper debugfs_discovery_blob;
888 struct mutex srbm_mutex;
889 /* GRBM index mutex. Protects concurrent access to GRBM index */
890 struct mutex grbm_idx_mutex;
891 struct dev_pm_domain vga_pm_domain;
892 bool have_disp_power_ref;
893 bool have_atomics_support;
894
895 /* BIOS */
896 bool is_atom_fw;
897 uint8_t *bios;
898 uint32_t bios_size;
899 uint32_t bios_scratch_reg_offset;
900 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
901
902 /* Register/doorbell mmio */
903 resource_size_t rmmio_base;
904 resource_size_t rmmio_size;
905 void __iomem *rmmio;
906 /* protects concurrent MM_INDEX/DATA based register access */
907 spinlock_t mmio_idx_lock;
908 struct amdgpu_mmio_remap rmmio_remap;
909 /* protects concurrent SMC based register access */
910 spinlock_t smc_idx_lock;
911 amdgpu_rreg_t smc_rreg;
912 amdgpu_wreg_t smc_wreg;
913 /* protects concurrent PCIE register access */
914 spinlock_t pcie_idx_lock;
915 amdgpu_rreg_t pcie_rreg;
916 amdgpu_wreg_t pcie_wreg;
917 amdgpu_rreg_t pciep_rreg;
918 amdgpu_wreg_t pciep_wreg;
919 amdgpu_rreg_ext_t pcie_rreg_ext;
920 amdgpu_wreg_ext_t pcie_wreg_ext;
921 amdgpu_rreg64_t pcie_rreg64;
922 amdgpu_wreg64_t pcie_wreg64;
923 amdgpu_rreg64_ext_t pcie_rreg64_ext;
924 amdgpu_wreg64_ext_t pcie_wreg64_ext;
925 /* protects concurrent UVD register access */
926 spinlock_t uvd_ctx_idx_lock;
927 amdgpu_rreg_t uvd_ctx_rreg;
928 amdgpu_wreg_t uvd_ctx_wreg;
929 /* protects concurrent DIDT register access */
930 spinlock_t didt_idx_lock;
931 amdgpu_rreg_t didt_rreg;
932 amdgpu_wreg_t didt_wreg;
933 /* protects concurrent gc_cac register access */
934 spinlock_t gc_cac_idx_lock;
935 amdgpu_rreg_t gc_cac_rreg;
936 amdgpu_wreg_t gc_cac_wreg;
937 /* protects concurrent se_cac register access */
938 spinlock_t se_cac_idx_lock;
939 amdgpu_rreg_t se_cac_rreg;
940 amdgpu_wreg_t se_cac_wreg;
941 /* protects concurrent ENDPOINT (audio) register access */
942 spinlock_t audio_endpt_idx_lock;
943 amdgpu_block_rreg_t audio_endpt_rreg;
944 amdgpu_block_wreg_t audio_endpt_wreg;
945 struct amdgpu_doorbell doorbell;
946
947 /* clock/pll info */
948 struct amdgpu_clock clock;
949
950 /* MC */
951 struct amdgpu_gmc gmc;
952 struct amdgpu_gart gart;
953 dma_addr_t dummy_page_addr;
954 struct amdgpu_vm_manager vm_manager;
955 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
956 DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
957
958 /* memory management */
959 struct amdgpu_mman mman;
960 struct amdgpu_mem_scratch mem_scratch;
961 struct amdgpu_wb wb;
962 atomic64_t num_bytes_moved;
963 atomic64_t num_evictions;
964 atomic64_t num_vram_cpu_page_faults;
965 atomic_t gpu_reset_counter;
966 atomic_t vram_lost_counter;
967
968 /* data for buffer migration throttling */
969 struct {
970 spinlock_t lock;
971 s64 last_update_us;
972 s64 accum_us; /* accumulated microseconds */
973 s64 accum_us_vis; /* for visible VRAM */
974 u32 log2_max_MBps;
975 } mm_stats;
976
977 /* display */
978 bool enable_virtual_display;
979 struct amdgpu_vkms_output *amdgpu_vkms_output;
980 struct amdgpu_mode_info mode_info;
981 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
982 struct delayed_work hotplug_work;
983 struct amdgpu_irq_src crtc_irq;
984 struct amdgpu_irq_src vline0_irq;
985 struct amdgpu_irq_src vupdate_irq;
986 struct amdgpu_irq_src pageflip_irq;
987 struct amdgpu_irq_src hpd_irq;
988 struct amdgpu_irq_src dmub_trace_irq;
989 struct amdgpu_irq_src dmub_outbox_irq;
990
991 /* rings */
992 u64 fence_context;
993 unsigned num_rings;
994 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
995 struct dma_fence __rcu *gang_submit;
996 bool ib_pool_ready;
997 struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
998 struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
999
1000 /* interrupts */
1001 struct amdgpu_irq irq;
1002
1003 /* powerplay */
1004 struct amd_powerplay powerplay;
1005 struct amdgpu_pm pm;
1006 u64 cg_flags;
1007 u32 pg_flags;
1008
1009 /* nbio */
1010 struct amdgpu_nbio nbio;
1011
1012 /* hdp */
1013 struct amdgpu_hdp hdp;
1014
1015 /* smuio */
1016 struct amdgpu_smuio smuio;
1017
1018 /* mmhub */
1019 struct amdgpu_mmhub mmhub;
1020
1021 /* gfxhub */
1022 struct amdgpu_gfxhub gfxhub;
1023
1024 /* gfx */
1025 struct amdgpu_gfx gfx;
1026
1027 /* sdma */
1028 struct amdgpu_sdma sdma;
1029
1030 /* lsdma */
1031 struct amdgpu_lsdma lsdma;
1032
1033 /* uvd */
1034 struct amdgpu_uvd uvd;
1035
1036 /* vce */
1037 struct amdgpu_vce vce;
1038
1039 /* vcn */
1040 struct amdgpu_vcn vcn;
1041
1042 /* jpeg */
1043 struct amdgpu_jpeg jpeg;
1044
1045 /* vpe */
1046 struct amdgpu_vpe vpe;
1047
1048 /* umsch */
1049 struct amdgpu_umsch_mm umsch_mm;
1050 bool enable_umsch_mm;
1051
1052 /* firmwares */
1053 struct amdgpu_firmware firmware;
1054
1055 /* PSP */
1056 struct psp_context psp;
1057
1058 /* GDS */
1059 struct amdgpu_gds gds;
1060
1061 /* for userq and VM fences */
1062 struct amdgpu_seq64 seq64;
1063
1064 /* KFD */
1065 struct amdgpu_kfd_dev kfd;
1066
1067 /* UMC */
1068 struct amdgpu_umc umc;
1069
1070 /* display related functionality */
1071 struct amdgpu_display_manager dm;
1072
1073 #if defined(CONFIG_DRM_AMD_ISP)
1074 /* isp */
1075 struct amdgpu_isp isp;
1076 #endif
1077
1078 /* mes */
1079 bool enable_mes;
1080 bool enable_mes_kiq;
1081 bool enable_uni_mes;
1082 struct amdgpu_mes mes;
1083 struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
1084
1085 /* df */
1086 struct amdgpu_df df;
1087
1088 /* MCA */
1089 struct amdgpu_mca mca;
1090
1091 /* ACA */
1092 struct amdgpu_aca aca;
1093
1094 /* CPER */
1095 struct amdgpu_cper cper;
1096
1097 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
1098 uint32_t harvest_ip_mask;
1099 int num_ip_blocks;
1100 struct mutex mn_lock;
1101 DECLARE_HASHTABLE(mn_hash, 7);
1102
1103 /* tracking pinned memory */
1104 atomic64_t vram_pin_size;
1105 atomic64_t visible_pin_size;
1106 atomic64_t gart_pin_size;
1107
1108 /* soc15 register offset based on ip, instance and segment */
1109 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1110 struct amdgpu_ip_map_info ip_map;
1111
1112 /* delayed work_func for deferring clockgating during resume */
1113 struct delayed_work delayed_init_work;
1114
1115 struct amdgpu_virt virt;
1116
1117 /* record hw reset is performed */
1118 bool has_hw_reset;
1119 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
1120
1121 /* s3/s4 mask */
1122 bool in_suspend;
1123 bool in_s3;
1124 bool in_s4;
1125 bool in_s0ix;
1126 suspend_state_t last_suspend_state;
1127
1128 enum pp_mp1_state mp1_state;
1129 struct amdgpu_doorbell_index doorbell_index;
1130
1131 struct mutex notifier_lock;
1132
1133 int asic_reset_res;
1134 struct work_struct xgmi_reset_work;
1135 struct list_head reset_list;
1136
1137 long gfx_timeout;
1138 long sdma_timeout;
1139 long video_timeout;
1140 long compute_timeout;
1141 long psp_timeout;
1142
1143 uint64_t unique_id;
1144 uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
1145
1146 /* enable runtime pm on the device */
1147 bool in_runpm;
1148 bool has_pr3;
1149
1150 bool ucode_sysfs_en;
1151
1152 struct amdgpu_fru_info *fru_info;
1153 atomic_t throttling_logging_enabled;
1154 struct ratelimit_state throttling_logging_rs;
1155 uint32_t ras_hw_enabled;
1156 uint32_t ras_enabled;
1157 bool ras_default_ecc_enabled;
1158
1159 bool no_hw_access;
1160 struct pci_saved_state *pci_state;
1161 pci_channel_state_t pci_channel_state;
1162
1163 /* Track auto wait count on s_barrier settings */
1164 bool barrier_has_auto_waitcnt;
1165
1166 struct amdgpu_reset_control *reset_cntl;
1167 uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
1168
1169 bool ram_is_direct_mapped;
1170
1171 struct list_head ras_list;
1172
1173 struct ip_discovery_top *ip_top;
1174
1175 struct amdgpu_reset_domain *reset_domain;
1176
1177 struct mutex benchmark_mutex;
1178
1179 bool scpm_enabled;
1180 uint32_t scpm_status;
1181
1182 struct work_struct reset_work;
1183
1184 bool dc_enabled;
1185 /* Mask of active clusters */
1186 uint32_t aid_mask;
1187
1188 /* Debug */
1189 bool debug_vm;
1190 bool debug_largebar;
1191 bool debug_disable_soft_recovery;
1192 bool debug_use_vram_fw_buf;
1193 bool debug_enable_ras_aca;
1194 bool debug_exp_resets;
1195 bool debug_disable_gpu_ring_reset;
1196
1197 /* Protection for the following isolation structure */
1198 struct mutex enforce_isolation_mutex;
1199 bool enforce_isolation[MAX_XCP];
1200 struct amdgpu_isolation {
1201 void *owner;
1202 struct dma_fence *spearhead;
1203 struct amdgpu_sync active;
1204 struct amdgpu_sync prev;
1205 } isolation[MAX_XCP];
1206
1207 struct amdgpu_init_level *init_lvl;
1208
1209 /* This flag is used to determine how VRAM allocations are handled for APUs
1210 * in KFD: VRAM or GTT.
1211 */
1212 bool apu_prefer_gtt;
1213 };
1214
amdgpu_ip_version(const struct amdgpu_device * adev,uint8_t ip,uint8_t inst)1215 static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
1216 uint8_t ip, uint8_t inst)
1217 {
1218 /* This considers only major/minor/rev and ignores
1219 * subrevision/variant fields.
1220 */
1221 return adev->ip_versions[ip][inst] & ~0xFFU;
1222 }
1223
amdgpu_ip_version_full(const struct amdgpu_device * adev,uint8_t ip,uint8_t inst)1224 static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
1225 uint8_t ip, uint8_t inst)
1226 {
1227 /* This returns full version - major/minor/rev/variant/subrevision */
1228 return adev->ip_versions[ip][inst];
1229 }
1230
drm_to_adev(struct drm_device * ddev)1231 static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
1232 {
1233 return container_of(ddev, struct amdgpu_device, ddev);
1234 }
1235
adev_to_drm(struct amdgpu_device * adev)1236 static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
1237 {
1238 return &adev->ddev;
1239 }
1240
amdgpu_ttm_adev(struct ttm_device * bdev)1241 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
1242 {
1243 return container_of(bdev, struct amdgpu_device, mman.bdev);
1244 }
1245
1246 int amdgpu_device_init(struct amdgpu_device *adev,
1247 uint32_t flags);
1248 void amdgpu_device_fini_hw(struct amdgpu_device *adev);
1249 void amdgpu_device_fini_sw(struct amdgpu_device *adev);
1250
1251 int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1252
1253 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
1254 void *buf, size_t size, bool write);
1255 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
1256 void *buf, size_t size, bool write);
1257
1258 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
1259 void *buf, size_t size, bool write);
1260 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
1261 uint32_t inst, uint32_t reg_addr, char reg_name[],
1262 uint32_t expected_value, uint32_t mask);
1263 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
1264 uint32_t reg, uint32_t acc_flags);
1265 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
1266 u64 reg_addr);
1267 uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
1268 uint32_t reg, uint32_t acc_flags,
1269 uint32_t xcc_id);
1270 void amdgpu_device_wreg(struct amdgpu_device *adev,
1271 uint32_t reg, uint32_t v,
1272 uint32_t acc_flags);
1273 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
1274 u64 reg_addr, u32 reg_data);
1275 void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
1276 uint32_t reg, uint32_t v,
1277 uint32_t acc_flags,
1278 uint32_t xcc_id);
1279 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
1280 uint32_t reg, uint32_t v, uint32_t xcc_id);
1281 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1282 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1283
1284 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
1285 u32 reg_addr);
1286 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
1287 u32 reg_addr);
1288 u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
1289 u64 reg_addr);
1290 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
1291 u32 reg_addr, u32 reg_data);
1292 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
1293 u32 reg_addr, u64 reg_data);
1294 void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
1295 u64 reg_addr, u64 reg_data);
1296 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
1297 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1298 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1299
1300 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
1301
1302 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
1303 struct amdgpu_reset_context *reset_context);
1304
1305 int amdgpu_do_asic_reset(struct list_head *device_list_handle,
1306 struct amdgpu_reset_context *reset_context);
1307
1308 int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
1309
1310 int emu_soc_asic_init(struct amdgpu_device *adev);
1311
1312 /*
1313 * Registers read & write functions.
1314 */
1315 #define AMDGPU_REGS_NO_KIQ (1<<1)
1316 #define AMDGPU_REGS_RLC (1<<2)
1317
1318 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1319 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1320
1321 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
1322 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)
1323
1324 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1325 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1326
1327 #define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
1328 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
1329 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
1330 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1331 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1332 #define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
1333 #define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
1334 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1335 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
1336 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1337 #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
1338 #define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
1339 #define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
1340 #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
1341 #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
1342 #define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg))
1343 #define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
1344 #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1345 #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1346 #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1347 #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1348 #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1349 #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
1350 #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1351 #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
1352 #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1353 #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
1354 #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1355 #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1356 #define WREG32_P(reg, val, mask) \
1357 do { \
1358 uint32_t tmp_ = RREG32(reg); \
1359 tmp_ &= (mask); \
1360 tmp_ |= ((val) & ~(mask)); \
1361 WREG32(reg, tmp_); \
1362 } while (0)
1363 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1364 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1365 #define WREG32_PLL_P(reg, val, mask) \
1366 do { \
1367 uint32_t tmp_ = RREG32_PLL(reg); \
1368 tmp_ &= (mask); \
1369 tmp_ |= ((val) & ~(mask)); \
1370 WREG32_PLL(reg, tmp_); \
1371 } while (0)
1372
1373 #define WREG32_SMC_P(_Reg, _Val, _Mask) \
1374 do { \
1375 u32 tmp = RREG32_SMC(_Reg); \
1376 tmp &= (_Mask); \
1377 tmp |= ((_Val) & ~(_Mask)); \
1378 WREG32_SMC(_Reg, tmp); \
1379 } while (0)
1380
1381 #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
1382
1383 #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1384 #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1385
1386 #define REG_SET_FIELD(orig_val, reg, field, field_val) \
1387 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
1388 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1389
1390 #define REG_GET_FIELD(value, reg, field) \
1391 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
1392
1393 #define WREG32_FIELD(reg, field, val) \
1394 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1395
1396 #define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1397 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1398
1399 #define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
1400 /*
1401 * BIOS helpers.
1402 */
1403 #define RBIOS8(i) (adev->bios[i])
1404 #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1405 #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1406
1407 /*
1408 * ASICs macro.
1409 */
1410 #define amdgpu_asic_set_vga_state(adev, state) \
1411 ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
1412 #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
1413 #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
1414 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1415 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1416 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
1417 #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1418 #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1419 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
1420 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
1421 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
1422 #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
1423 #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
1424 #define amdgpu_asic_flush_hdp(adev, r) \
1425 ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
1426 #define amdgpu_asic_invalidate_hdp(adev, r) \
1427 ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
1428 ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
1429 #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
1430 #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
1431 #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
1432 #define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
1433 #define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
1434 #define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
1435 #define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
1436 #define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
1437 ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
1438 #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
1439
1440 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter))
1441
1442 #define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
1443 #define for_each_inst(i, inst_mask) \
1444 for (i = ffs(inst_mask); i-- != 0; \
1445 i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
1446
1447 /* Common functions */
1448 bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
1449 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
1450 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1451 struct amdgpu_job *job,
1452 struct amdgpu_reset_context *reset_context);
1453 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
1454 int amdgpu_device_pci_reset(struct amdgpu_device *adev);
1455 bool amdgpu_device_need_post(struct amdgpu_device *adev);
1456 bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
1457 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
1458
1459 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1460 u64 num_vis_bytes);
1461 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
1462 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
1463 const u32 *registers,
1464 const u32 array_size);
1465
1466 int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
1467 bool amdgpu_device_supports_atpx(struct drm_device *dev);
1468 bool amdgpu_device_supports_px(struct drm_device *dev);
1469 bool amdgpu_device_supports_boco(struct drm_device *dev);
1470 bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
1471 int amdgpu_device_supports_baco(struct drm_device *dev);
1472 void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
1473 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
1474 struct amdgpu_device *peer_adev);
1475 int amdgpu_device_baco_enter(struct drm_device *dev);
1476 int amdgpu_device_baco_exit(struct drm_device *dev);
1477
1478 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
1479 struct amdgpu_ring *ring);
1480 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
1481 struct amdgpu_ring *ring);
1482
1483 void amdgpu_device_halt(struct amdgpu_device *adev);
1484 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
1485 u32 reg);
1486 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
1487 u32 reg, u32 v);
1488 struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
1489 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
1490 struct dma_fence *gang);
1491 struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
1492 struct amdgpu_ring *ring,
1493 struct amdgpu_job *job);
1494 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
1495 ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
1496 ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
1497
1498 /* atpx handler */
1499 #if defined(CONFIG_VGA_SWITCHEROO)
1500 void amdgpu_register_atpx_handler(void);
1501 void amdgpu_unregister_atpx_handler(void);
1502 bool amdgpu_has_atpx_dgpu_power_cntl(void);
1503 bool amdgpu_is_atpx_hybrid(void);
1504 bool amdgpu_has_atpx(void);
1505 #else
amdgpu_register_atpx_handler(void)1506 static inline void amdgpu_register_atpx_handler(void) {}
amdgpu_unregister_atpx_handler(void)1507 static inline void amdgpu_unregister_atpx_handler(void) {}
amdgpu_has_atpx_dgpu_power_cntl(void)1508 static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
amdgpu_is_atpx_hybrid(void)1509 static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
amdgpu_has_atpx(void)1510 static inline bool amdgpu_has_atpx(void) { return false; }
1511 #endif
1512
1513 /*
1514 * KMS
1515 */
1516 extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
1517 extern const int amdgpu_max_kms_ioctl;
1518
1519 int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
1520 void amdgpu_driver_unload_kms(struct drm_device *dev);
1521 int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1522 void amdgpu_driver_postclose_kms(struct drm_device *dev,
1523 struct drm_file *file_priv);
1524 void amdgpu_driver_release_kms(struct drm_device *dev);
1525
1526 int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
1527 int amdgpu_device_prepare(struct drm_device *dev);
1528 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
1529 int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
1530 u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
1531 int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
1532 void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
1533 int amdgpu_info_ioctl(struct drm_device *dev, void *data,
1534 struct drm_file *filp);
1535
1536 /*
1537 * functions used by amdgpu_encoder.c
1538 */
1539 struct amdgpu_afmt_acr {
1540 u32 clock;
1541
1542 int n_32khz;
1543 int cts_32khz;
1544
1545 int n_44_1khz;
1546 int cts_44_1khz;
1547
1548 int n_48khz;
1549 int cts_48khz;
1550
1551 };
1552
1553 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1554
1555 /* amdgpu_acpi.c */
1556
1557 struct amdgpu_numa_info {
1558 uint64_t size;
1559 int pxm;
1560 int nid;
1561 };
1562
1563 /* ATCS Device/Driver State */
1564 #define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
1565 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
1566 #define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0
1567 #define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1
1568
1569 #if defined(CONFIG_ACPI)
1570 int amdgpu_acpi_init(struct amdgpu_device *adev);
1571 void amdgpu_acpi_fini(struct amdgpu_device *adev);
1572 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1573 bool amdgpu_acpi_is_power_shift_control_supported(void);
1574 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1575 u8 perf_req, bool advertise);
1576 int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1577 u8 dev_state, bool drv_state);
1578 int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
1579 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1580 int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
1581 u64 *tmr_size);
1582 int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
1583 struct amdgpu_numa_info *numa_info);
1584
1585 void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
1586 bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
1587 void amdgpu_acpi_detect(void);
1588 void amdgpu_acpi_release(void);
1589 #else
amdgpu_acpi_init(struct amdgpu_device * adev)1590 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
amdgpu_acpi_get_tmr_info(struct amdgpu_device * adev,u64 * tmr_offset,u64 * tmr_size)1591 static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
1592 u64 *tmr_offset, u64 *tmr_size)
1593 {
1594 return -EINVAL;
1595 }
amdgpu_acpi_get_mem_info(struct amdgpu_device * adev,int xcc_id,struct amdgpu_numa_info * numa_info)1596 static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
1597 int xcc_id,
1598 struct amdgpu_numa_info *numa_info)
1599 {
1600 return -EINVAL;
1601 }
amdgpu_acpi_fini(struct amdgpu_device * adev)1602 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
amdgpu_acpi_should_gpu_reset(struct amdgpu_device * adev)1603 static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
amdgpu_acpi_detect(void)1604 static inline void amdgpu_acpi_detect(void) { }
amdgpu_acpi_release(void)1605 static inline void amdgpu_acpi_release(void) { }
amdgpu_acpi_is_power_shift_control_supported(void)1606 static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
amdgpu_acpi_power_shift_control(struct amdgpu_device * adev,u8 dev_state,bool drv_state)1607 static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
1608 u8 dev_state, bool drv_state) { return 0; }
amdgpu_acpi_smart_shift_update(struct drm_device * dev,enum amdgpu_ss ss_state)1609 static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
1610 enum amdgpu_ss ss_state) { return 0; }
amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps * caps)1611 static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
1612 #endif
1613
1614 #if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
1615 bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
1616 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
1617 #else
amdgpu_acpi_is_s0ix_active(struct amdgpu_device * adev)1618 static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
amdgpu_acpi_is_s3_active(struct amdgpu_device * adev)1619 static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
1620 #endif
1621
1622 void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
1623 void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
1624
1625 pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
1626 pci_channel_state_t state);
1627 pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
1628 pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
1629 void amdgpu_pci_resume(struct pci_dev *pdev);
1630
1631 bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
1632 bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
1633
1634 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
1635
1636 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
1637 enum amd_clockgating_state state);
1638 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
1639 enum amd_powergating_state state);
1640
amdgpu_device_has_timeouts_enabled(struct amdgpu_device * adev)1641 static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
1642 {
1643 return amdgpu_gpu_recovery != 0 &&
1644 adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
1645 adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
1646 adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
1647 adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
1648 }
1649
1650 #include "amdgpu_object.h"
1651
amdgpu_is_tmz(struct amdgpu_device * adev)1652 static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
1653 {
1654 return adev->gmc.tmz_enabled;
1655 }
1656
1657 int amdgpu_in_reset(struct amdgpu_device *adev);
1658
1659 extern const struct attribute_group amdgpu_vram_mgr_attr_group;
1660 extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
1661 extern const struct attribute_group amdgpu_flash_attr_group;
1662
1663 void amdgpu_set_init_level(struct amdgpu_device *adev,
1664 enum amdgpu_init_lvl_id lvl);
1665 #endif
1666