1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef KFD_IOCTL_H_INCLUDED
24 #define KFD_IOCTL_H_INCLUDED
25 
26 #include <drm/drm.h>
27 #include <linux/ioctl.h>
28 
29 /*
30  * - 1.1 - initial version
31  * - 1.3 - Add SMI events support
32  * - 1.4 - Indicate new SRAM EDC bit in device properties
33  * - 1.5 - Add SVM API
34  * - 1.6 - Query clear flags in SVM get_attr API
35  * - 1.7 - Checkpoint Restore (CRIU) API
36  * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
37  * - 1.9 - Add available memory ioctl
38  * - 1.10 - Add SMI profiler event log
39  * - 1.11 - Add unified memory for ctx save/restore area
40  * - 1.12 - Add DMA buf export ioctl
41  * - 1.13 - Add debugger API
42  * - 1.14 - Update kfd_event_data
43  */
44 #define KFD_IOCTL_MAJOR_VERSION 1
45 #define KFD_IOCTL_MINOR_VERSION 14
46 
47 struct kfd_ioctl_get_version_args {
48 	__u32 major_version;	/* from KFD */
49 	__u32 minor_version;	/* from KFD */
50 };
51 
52 /* For kfd_ioctl_create_queue_args.queue_type. */
53 #define KFD_IOC_QUEUE_TYPE_COMPUTE		0x0
54 #define KFD_IOC_QUEUE_TYPE_SDMA			0x1
55 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL		0x2
56 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI		0x3
57 
58 #define KFD_MAX_QUEUE_PERCENTAGE	100
59 #define KFD_MAX_QUEUE_PRIORITY		15
60 
61 struct kfd_ioctl_create_queue_args {
62 	__u64 ring_base_address;	/* to KFD */
63 	__u64 write_pointer_address;	/* from KFD */
64 	__u64 read_pointer_address;	/* from KFD */
65 	__u64 doorbell_offset;	/* from KFD */
66 
67 	__u32 ring_size;		/* to KFD */
68 	__u32 gpu_id;		/* to KFD */
69 	__u32 queue_type;		/* to KFD */
70 	__u32 queue_percentage;	/* to KFD */
71 	__u32 queue_priority;	/* to KFD */
72 	__u32 queue_id;		/* from KFD */
73 
74 	__u64 eop_buffer_address;	/* to KFD */
75 	__u64 eop_buffer_size;	/* to KFD */
76 	__u64 ctx_save_restore_address; /* to KFD */
77 	__u32 ctx_save_restore_size;	/* to KFD */
78 	__u32 ctl_stack_size;		/* to KFD */
79 };
80 
81 struct kfd_ioctl_destroy_queue_args {
82 	__u32 queue_id;		/* to KFD */
83 	__u32 pad;
84 };
85 
86 struct kfd_ioctl_update_queue_args {
87 	__u64 ring_base_address;	/* to KFD */
88 
89 	__u32 queue_id;		/* to KFD */
90 	__u32 ring_size;		/* to KFD */
91 	__u32 queue_percentage;	/* to KFD */
92 	__u32 queue_priority;	/* to KFD */
93 };
94 
95 struct kfd_ioctl_set_cu_mask_args {
96 	__u32 queue_id;		/* to KFD */
97 	__u32 num_cu_mask;		/* to KFD */
98 	__u64 cu_mask_ptr;		/* to KFD */
99 };
100 
101 struct kfd_ioctl_get_queue_wave_state_args {
102 	__u64 ctl_stack_address;	/* to KFD */
103 	__u32 ctl_stack_used_size;	/* from KFD */
104 	__u32 save_area_used_size;	/* from KFD */
105 	__u32 queue_id;			/* to KFD */
106 	__u32 pad;
107 };
108 
109 struct kfd_ioctl_get_available_memory_args {
110 	__u64 available;	/* from KFD */
111 	__u32 gpu_id;		/* to KFD */
112 	__u32 pad;
113 };
114 
115 struct kfd_dbg_device_info_entry {
116 	__u64 exception_status;
117 	__u64 lds_base;
118 	__u64 lds_limit;
119 	__u64 scratch_base;
120 	__u64 scratch_limit;
121 	__u64 gpuvm_base;
122 	__u64 gpuvm_limit;
123 	__u32 gpu_id;
124 	__u32 location_id;
125 	__u32 vendor_id;
126 	__u32 device_id;
127 	__u32 revision_id;
128 	__u32 subsystem_vendor_id;
129 	__u32 subsystem_device_id;
130 	__u32 fw_version;
131 	__u32 gfx_target_version;
132 	__u32 simd_count;
133 	__u32 max_waves_per_simd;
134 	__u32 array_count;
135 	__u32 simd_arrays_per_engine;
136 	__u32 num_xcc;
137 	__u32 capability;
138 	__u32 debug_prop;
139 };
140 
141 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
142 #define KFD_IOC_CACHE_POLICY_COHERENT 0
143 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
144 
145 struct kfd_ioctl_set_memory_policy_args {
146 	__u64 alternate_aperture_base;	/* to KFD */
147 	__u64 alternate_aperture_size;	/* to KFD */
148 
149 	__u32 gpu_id;			/* to KFD */
150 	__u32 default_policy;		/* to KFD */
151 	__u32 alternate_policy;		/* to KFD */
152 	__u32 pad;
153 };
154 
155 /*
156  * All counters are monotonic. They are used for profiling of compute jobs.
157  * The profiling is done by userspace.
158  *
159  * In case of GPU reset, the counter should not be affected.
160  */
161 
162 struct kfd_ioctl_get_clock_counters_args {
163 	__u64 gpu_clock_counter;	/* from KFD */
164 	__u64 cpu_clock_counter;	/* from KFD */
165 	__u64 system_clock_counter;	/* from KFD */
166 	__u64 system_clock_freq;	/* from KFD */
167 
168 	__u32 gpu_id;		/* to KFD */
169 	__u32 pad;
170 };
171 
172 struct kfd_process_device_apertures {
173 	__u64 lds_base;		/* from KFD */
174 	__u64 lds_limit;		/* from KFD */
175 	__u64 scratch_base;		/* from KFD */
176 	__u64 scratch_limit;		/* from KFD */
177 	__u64 gpuvm_base;		/* from KFD */
178 	__u64 gpuvm_limit;		/* from KFD */
179 	__u32 gpu_id;		/* from KFD */
180 	__u32 pad;
181 };
182 
183 /*
184  * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
185  * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
186  * unlimited number of GPUs.
187  */
188 #define NUM_OF_SUPPORTED_GPUS 7
189 struct kfd_ioctl_get_process_apertures_args {
190 	struct kfd_process_device_apertures
191 			process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
192 
193 	/* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
194 	__u32 num_of_nodes;
195 	__u32 pad;
196 };
197 
198 struct kfd_ioctl_get_process_apertures_new_args {
199 	/* User allocated. Pointer to struct kfd_process_device_apertures
200 	 * filled in by Kernel
201 	 */
202 	__u64 kfd_process_device_apertures_ptr;
203 	/* to KFD - indicates amount of memory present in
204 	 *  kfd_process_device_apertures_ptr
205 	 * from KFD - Number of entries filled by KFD.
206 	 */
207 	__u32 num_of_nodes;
208 	__u32 pad;
209 };
210 
211 #define MAX_ALLOWED_NUM_POINTS    100
212 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
213 #define MAX_ALLOWED_WAC_BUFF_SIZE  128
214 
215 struct kfd_ioctl_dbg_register_args {
216 	__u32 gpu_id;		/* to KFD */
217 	__u32 pad;
218 };
219 
220 struct kfd_ioctl_dbg_unregister_args {
221 	__u32 gpu_id;		/* to KFD */
222 	__u32 pad;
223 };
224 
225 struct kfd_ioctl_dbg_address_watch_args {
226 	__u64 content_ptr;		/* a pointer to the actual content */
227 	__u32 gpu_id;		/* to KFD */
228 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
229 };
230 
231 struct kfd_ioctl_dbg_wave_control_args {
232 	__u64 content_ptr;		/* a pointer to the actual content */
233 	__u32 gpu_id;		/* to KFD */
234 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
235 };
236 
237 #define KFD_INVALID_FD     0xffffffff
238 
239 /* Matching HSA_EVENTTYPE */
240 #define KFD_IOC_EVENT_SIGNAL			0
241 #define KFD_IOC_EVENT_NODECHANGE		1
242 #define KFD_IOC_EVENT_DEVICESTATECHANGE		2
243 #define KFD_IOC_EVENT_HW_EXCEPTION		3
244 #define KFD_IOC_EVENT_SYSTEM_EVENT		4
245 #define KFD_IOC_EVENT_DEBUG_EVENT		5
246 #define KFD_IOC_EVENT_PROFILE_EVENT		6
247 #define KFD_IOC_EVENT_QUEUE_EVENT		7
248 #define KFD_IOC_EVENT_MEMORY			8
249 
250 #define KFD_IOC_WAIT_RESULT_COMPLETE		0
251 #define KFD_IOC_WAIT_RESULT_TIMEOUT		1
252 #define KFD_IOC_WAIT_RESULT_FAIL		2
253 
254 #define KFD_SIGNAL_EVENT_LIMIT			4096
255 
256 /* For kfd_event_data.hw_exception_data.reset_type. */
257 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET	0
258 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET	1
259 
260 /* For kfd_event_data.hw_exception_data.reset_cause. */
261 #define KFD_HW_EXCEPTION_GPU_HANG	0
262 #define KFD_HW_EXCEPTION_ECC		1
263 
264 /* For kfd_hsa_memory_exception_data.ErrorType */
265 #define KFD_MEM_ERR_NO_RAS		0
266 #define KFD_MEM_ERR_SRAM_ECC		1
267 #define KFD_MEM_ERR_POISON_CONSUMED	2
268 #define KFD_MEM_ERR_GPU_HANG		3
269 
270 struct kfd_ioctl_create_event_args {
271 	__u64 event_page_offset;	/* from KFD */
272 	__u32 event_trigger_data;	/* from KFD - signal events only */
273 	__u32 event_type;		/* to KFD */
274 	__u32 auto_reset;		/* to KFD */
275 	__u32 node_id;		/* to KFD - only valid for certain
276 							event types */
277 	__u32 event_id;		/* from KFD */
278 	__u32 event_slot_index;	/* from KFD */
279 };
280 
281 struct kfd_ioctl_destroy_event_args {
282 	__u32 event_id;		/* to KFD */
283 	__u32 pad;
284 };
285 
286 struct kfd_ioctl_set_event_args {
287 	__u32 event_id;		/* to KFD */
288 	__u32 pad;
289 };
290 
291 struct kfd_ioctl_reset_event_args {
292 	__u32 event_id;		/* to KFD */
293 	__u32 pad;
294 };
295 
296 struct kfd_memory_exception_failure {
297 	__u32 NotPresent;	/* Page not present or supervisor privilege */
298 	__u32 ReadOnly;	/* Write access to a read-only page */
299 	__u32 NoExecute;	/* Execute access to a page marked NX */
300 	__u32 imprecise;	/* Can't determine the	exact fault address */
301 };
302 
303 /* memory exception data */
304 struct kfd_hsa_memory_exception_data {
305 	struct kfd_memory_exception_failure failure;
306 	__u64 va;
307 	__u32 gpu_id;
308 	__u32 ErrorType; /* 0 = no RAS error,
309 			  * 1 = ECC_SRAM,
310 			  * 2 = Link_SYNFLOOD (poison),
311 			  * 3 = GPU hang (not attributable to a specific cause),
312 			  * other values reserved
313 			  */
314 };
315 
316 /* hw exception data */
317 struct kfd_hsa_hw_exception_data {
318 	__u32 reset_type;
319 	__u32 reset_cause;
320 	__u32 memory_lost;
321 	__u32 gpu_id;
322 };
323 
324 /* hsa signal event data */
325 struct kfd_hsa_signal_event_data {
326 	__u64 last_event_age;	/* to and from KFD */
327 };
328 
329 /* Event data */
330 struct kfd_event_data {
331 	union {
332 		/* From KFD */
333 		struct kfd_hsa_memory_exception_data memory_exception_data;
334 		struct kfd_hsa_hw_exception_data hw_exception_data;
335 		/* To and From KFD */
336 		struct kfd_hsa_signal_event_data signal_event_data;
337 	};
338 	__u64 kfd_event_data_ext;	/* pointer to an extension structure
339 					   for future exception types */
340 	__u32 event_id;		/* to KFD */
341 	__u32 pad;
342 };
343 
344 struct kfd_ioctl_wait_events_args {
345 	__u64 events_ptr;		/* pointed to struct
346 					   kfd_event_data array, to KFD */
347 	__u32 num_events;		/* to KFD */
348 	__u32 wait_for_all;		/* to KFD */
349 	__u32 timeout;		/* to KFD */
350 	__u32 wait_result;		/* from KFD */
351 };
352 
353 struct kfd_ioctl_set_scratch_backing_va_args {
354 	__u64 va_addr;	/* to KFD */
355 	__u32 gpu_id;	/* to KFD */
356 	__u32 pad;
357 };
358 
359 struct kfd_ioctl_get_tile_config_args {
360 	/* to KFD: pointer to tile array */
361 	__u64 tile_config_ptr;
362 	/* to KFD: pointer to macro tile array */
363 	__u64 macro_tile_config_ptr;
364 	/* to KFD: array size allocated by user mode
365 	 * from KFD: array size filled by kernel
366 	 */
367 	__u32 num_tile_configs;
368 	/* to KFD: array size allocated by user mode
369 	 * from KFD: array size filled by kernel
370 	 */
371 	__u32 num_macro_tile_configs;
372 
373 	__u32 gpu_id;		/* to KFD */
374 	__u32 gb_addr_config;	/* from KFD */
375 	__u32 num_banks;		/* from KFD */
376 	__u32 num_ranks;		/* from KFD */
377 	/* struct size can be extended later if needed
378 	 * without breaking ABI compatibility
379 	 */
380 };
381 
382 struct kfd_ioctl_set_trap_handler_args {
383 	__u64 tba_addr;		/* to KFD */
384 	__u64 tma_addr;		/* to KFD */
385 	__u32 gpu_id;		/* to KFD */
386 	__u32 pad;
387 };
388 
389 struct kfd_ioctl_acquire_vm_args {
390 	__u32 drm_fd;	/* to KFD */
391 	__u32 gpu_id;	/* to KFD */
392 };
393 
394 /* Allocation flags: memory types */
395 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM		(1 << 0)
396 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT		(1 << 1)
397 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR		(1 << 2)
398 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL	(1 << 3)
399 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP	(1 << 4)
400 /* Allocation flags: attributes/access options */
401 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE	(1 << 31)
402 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE	(1 << 30)
403 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC		(1 << 29)
404 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE	(1 << 28)
405 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM	(1 << 27)
406 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT	(1 << 26)
407 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED	(1 << 25)
408 #define KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT	(1 << 24)
409 
410 /* Allocate memory for later SVM (shared virtual memory) mapping.
411  *
412  * @va_addr:     virtual address of the memory to be allocated
413  *               all later mappings on all GPUs will use this address
414  * @size:        size in bytes
415  * @handle:      buffer handle returned to user mode, used to refer to
416  *               this allocation for mapping, unmapping and freeing
417  * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
418  *               for userptrs this is overloaded to specify the CPU address
419  * @gpu_id:      device identifier
420  * @flags:       memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
421  */
422 struct kfd_ioctl_alloc_memory_of_gpu_args {
423 	__u64 va_addr;		/* to KFD */
424 	__u64 size;		/* to KFD */
425 	__u64 handle;		/* from KFD */
426 	__u64 mmap_offset;	/* to KFD (userptr), from KFD (mmap offset) */
427 	__u32 gpu_id;		/* to KFD */
428 	__u32 flags;
429 };
430 
431 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
432  *
433  * @handle: memory handle returned by alloc
434  */
435 struct kfd_ioctl_free_memory_of_gpu_args {
436 	__u64 handle;		/* to KFD */
437 };
438 
439 /* Map memory to one or more GPUs
440  *
441  * @handle:                memory handle returned by alloc
442  * @device_ids_array_ptr:  array of gpu_ids (__u32 per device)
443  * @n_devices:             number of devices in the array
444  * @n_success:             number of devices mapped successfully
445  *
446  * @n_success returns information to the caller how many devices from
447  * the start of the array have mapped the buffer successfully. It can
448  * be passed into a subsequent retry call to skip those devices. For
449  * the first call the caller should initialize it to 0.
450  *
451  * If the ioctl completes with return code 0 (success), n_success ==
452  * n_devices.
453  */
454 struct kfd_ioctl_map_memory_to_gpu_args {
455 	__u64 handle;			/* to KFD */
456 	__u64 device_ids_array_ptr;	/* to KFD */
457 	__u32 n_devices;		/* to KFD */
458 	__u32 n_success;		/* to/from KFD */
459 };
460 
461 /* Unmap memory from one or more GPUs
462  *
463  * same arguments as for mapping
464  */
465 struct kfd_ioctl_unmap_memory_from_gpu_args {
466 	__u64 handle;			/* to KFD */
467 	__u64 device_ids_array_ptr;	/* to KFD */
468 	__u32 n_devices;		/* to KFD */
469 	__u32 n_success;		/* to/from KFD */
470 };
471 
472 /* Allocate GWS for specific queue
473  *
474  * @queue_id:    queue's id that GWS is allocated for
475  * @num_gws:     how many GWS to allocate
476  * @first_gws:   index of the first GWS allocated.
477  *               only support contiguous GWS allocation
478  */
479 struct kfd_ioctl_alloc_queue_gws_args {
480 	__u32 queue_id;		/* to KFD */
481 	__u32 num_gws;		/* to KFD */
482 	__u32 first_gws;	/* from KFD */
483 	__u32 pad;
484 };
485 
486 struct kfd_ioctl_get_dmabuf_info_args {
487 	__u64 size;		/* from KFD */
488 	__u64 metadata_ptr;	/* to KFD */
489 	__u32 metadata_size;	/* to KFD (space allocated by user)
490 				 * from KFD (actual metadata size)
491 				 */
492 	__u32 gpu_id;	/* from KFD */
493 	__u32 flags;		/* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
494 	__u32 dmabuf_fd;	/* to KFD */
495 };
496 
497 struct kfd_ioctl_import_dmabuf_args {
498 	__u64 va_addr;	/* to KFD */
499 	__u64 handle;	/* from KFD */
500 	__u32 gpu_id;	/* to KFD */
501 	__u32 dmabuf_fd;	/* to KFD */
502 };
503 
504 struct kfd_ioctl_export_dmabuf_args {
505 	__u64 handle;		/* to KFD */
506 	__u32 flags;		/* to KFD */
507 	__u32 dmabuf_fd;	/* from KFD */
508 };
509 
510 /*
511  * KFD SMI(System Management Interface) events
512  */
513 enum kfd_smi_event {
514 	KFD_SMI_EVENT_NONE = 0, /* not used */
515 	KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
516 	KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
517 	KFD_SMI_EVENT_GPU_PRE_RESET = 3,
518 	KFD_SMI_EVENT_GPU_POST_RESET = 4,
519 	KFD_SMI_EVENT_MIGRATE_START = 5,
520 	KFD_SMI_EVENT_MIGRATE_END = 6,
521 	KFD_SMI_EVENT_PAGE_FAULT_START = 7,
522 	KFD_SMI_EVENT_PAGE_FAULT_END = 8,
523 	KFD_SMI_EVENT_QUEUE_EVICTION = 9,
524 	KFD_SMI_EVENT_QUEUE_RESTORE = 10,
525 	KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
526 
527 	/*
528 	 * max event number, as a flag bit to get events from all processes,
529 	 * this requires super user permission, otherwise will not be able to
530 	 * receive event from any process. Without this flag to receive events
531 	 * from same process.
532 	 */
533 	KFD_SMI_EVENT_ALL_PROCESS = 64
534 };
535 
536 enum KFD_MIGRATE_TRIGGERS {
537 	KFD_MIGRATE_TRIGGER_PREFETCH,
538 	KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
539 	KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
540 	KFD_MIGRATE_TRIGGER_TTM_EVICTION
541 };
542 
543 enum KFD_QUEUE_EVICTION_TRIGGERS {
544 	KFD_QUEUE_EVICTION_TRIGGER_SVM,
545 	KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
546 	KFD_QUEUE_EVICTION_TRIGGER_TTM,
547 	KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
548 	KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
549 	KFD_QUEUE_EVICTION_CRIU_RESTORE
550 };
551 
552 enum KFD_SVM_UNMAP_TRIGGERS {
553 	KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
554 	KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
555 	KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
556 };
557 
558 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
559 #define KFD_SMI_EVENT_MSG_SIZE	96
560 
561 struct kfd_ioctl_smi_events_args {
562 	__u32 gpuid;	/* to KFD */
563 	__u32 anon_fd;	/* from KFD */
564 };
565 
566 /**************************************************************************************************
567  * CRIU IOCTLs (Checkpoint Restore In Userspace)
568  *
569  * When checkpointing a process, the userspace application will perform:
570  * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
571  *    all the queues.
572  * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
573  * 3. UNPAUSE op to un-evict all the queues
574  *
575  * When restoring a process, the CRIU userspace application will perform:
576  *
577  * 1. RESTORE op to restore process contents
578  * 2. RESUME op to start the process
579  *
580  * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
581  * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
582  */
583 
584 enum kfd_criu_op {
585 	KFD_CRIU_OP_PROCESS_INFO,
586 	KFD_CRIU_OP_CHECKPOINT,
587 	KFD_CRIU_OP_UNPAUSE,
588 	KFD_CRIU_OP_RESTORE,
589 	KFD_CRIU_OP_RESUME,
590 };
591 
592 /**
593  * kfd_ioctl_criu_args - Arguments perform CRIU operation
594  * @devices:		[in/out] User pointer to memory location for devices information.
595  * 			This is an array of type kfd_criu_device_bucket.
596  * @bos:		[in/out] User pointer to memory location for BOs information
597  * 			This is an array of type kfd_criu_bo_bucket.
598  * @priv_data:		[in/out] User pointer to memory location for private data
599  * @priv_data_size:	[in/out] Size of priv_data in bytes
600  * @num_devices:	[in/out] Number of GPUs used by process. Size of @devices array.
601  * @num_bos		[in/out] Number of BOs used by process. Size of @bos array.
602  * @num_objects:	[in/out] Number of objects used by process. Objects are opaque to
603  *				 user application.
604  * @pid:		[in/out] PID of the process being checkpointed
605  * @op			[in] Type of operation (kfd_criu_op)
606  *
607  * Return: 0 on success, -errno on failure
608  */
609 struct kfd_ioctl_criu_args {
610 	__u64 devices;		/* Used during ops: CHECKPOINT, RESTORE */
611 	__u64 bos;		/* Used during ops: CHECKPOINT, RESTORE */
612 	__u64 priv_data;	/* Used during ops: CHECKPOINT, RESTORE */
613 	__u64 priv_data_size;	/* Used during ops: PROCESS_INFO, RESTORE */
614 	__u32 num_devices;	/* Used during ops: PROCESS_INFO, RESTORE */
615 	__u32 num_bos;		/* Used during ops: PROCESS_INFO, RESTORE */
616 	__u32 num_objects;	/* Used during ops: PROCESS_INFO, RESTORE */
617 	__u32 pid;		/* Used during ops: PROCESS_INFO, RESUME */
618 	__u32 op;
619 };
620 
621 struct kfd_criu_device_bucket {
622 	__u32 user_gpu_id;
623 	__u32 actual_gpu_id;
624 	__u32 drm_fd;
625 	__u32 pad;
626 };
627 
628 struct kfd_criu_bo_bucket {
629 	__u64 addr;
630 	__u64 size;
631 	__u64 offset;
632 	__u64 restored_offset;    /* During restore, updated offset for BO */
633 	__u32 gpu_id;             /* This is the user_gpu_id */
634 	__u32 alloc_flags;
635 	__u32 dmabuf_fd;
636 	__u32 pad;
637 };
638 
639 /* CRIU IOCTLs - END */
640 /**************************************************************************************************/
641 
642 /* Register offset inside the remapped mmio page
643  */
644 enum kfd_mmio_remap {
645 	KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
646 	KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
647 };
648 
649 /* Guarantee host access to memory */
650 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
651 /* Fine grained coherency between all devices with access */
652 #define KFD_IOCTL_SVM_FLAG_COHERENT    0x00000002
653 /* Use any GPU in same hive as preferred device */
654 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL  0x00000004
655 /* GPUs only read, allows replication */
656 #define KFD_IOCTL_SVM_FLAG_GPU_RO      0x00000008
657 /* Allow execution on GPU */
658 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC    0x00000010
659 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
660 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY     0x00000020
661 /* Keep GPU memory mapping always valid as if XNACK is disable */
662 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED   0x00000040
663 /* Fine grained coherency between all devices using device-scope atomics */
664 #define KFD_IOCTL_SVM_FLAG_EXT_COHERENT        0x00000080
665 
666 /**
667  * kfd_ioctl_svm_op - SVM ioctl operations
668  *
669  * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
670  * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
671  */
672 enum kfd_ioctl_svm_op {
673 	KFD_IOCTL_SVM_OP_SET_ATTR,
674 	KFD_IOCTL_SVM_OP_GET_ATTR
675 };
676 
677 /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
678  *
679  * GPU IDs are used to specify GPUs as preferred and prefetch locations.
680  * Below definitions are used for system memory or for leaving the preferred
681  * location unspecified.
682  */
683 enum kfd_ioctl_svm_location {
684 	KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
685 	KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
686 };
687 
688 /**
689  * kfd_ioctl_svm_attr_type - SVM attribute types
690  *
691  * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
692  *                                    system memory
693  * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
694  *                                   system memory. Setting this triggers an
695  *                                   immediate prefetch (migration).
696  * @KFD_IOCTL_SVM_ATTR_ACCESS:
697  * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
698  * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
699  *                                by the attribute value
700  * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
701  *                                KFD_IOCTL_SVM_FLAG_...)
702  * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
703  * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
704  *                                  (log2 num pages)
705  */
706 enum kfd_ioctl_svm_attr_type {
707 	KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
708 	KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
709 	KFD_IOCTL_SVM_ATTR_ACCESS,
710 	KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
711 	KFD_IOCTL_SVM_ATTR_NO_ACCESS,
712 	KFD_IOCTL_SVM_ATTR_SET_FLAGS,
713 	KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
714 	KFD_IOCTL_SVM_ATTR_GRANULARITY
715 };
716 
717 /**
718  * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
719  *
720  * The meaning of the @value depends on the attribute type.
721  *
722  * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
723  * @value: attribute value
724  */
725 struct kfd_ioctl_svm_attribute {
726 	__u32 type;
727 	__u32 value;
728 };
729 
730 /**
731  * kfd_ioctl_svm_args - Arguments for SVM ioctl
732  *
733  * @op specifies the operation to perform (see enum
734  * @kfd_ioctl_svm_op).  @start_addr and @size are common for all
735  * operations.
736  *
737  * A variable number of attributes can be given in @attrs.
738  * @nattr specifies the number of attributes. New attributes can be
739  * added in the future without breaking the ABI. If unknown attributes
740  * are given, the function returns -EINVAL.
741  *
742  * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
743  * range. It may overlap existing virtual address ranges. If it does,
744  * the existing ranges will be split such that the attribute changes
745  * only apply to the specified address range.
746  *
747  * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
748  * over all memory in the given range and returns the result as the
749  * attribute value. If different pages have different preferred or
750  * prefetch locations, 0xffffffff will be returned for
751  * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
752  * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
753  * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
754  * aggregated by bitwise AND. That means, a flag will be set in the
755  * output, if that flag is set for all pages in the range. For
756  * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
757  * aggregated by bitwise NOR. That means, a flag will be set in the
758  * output, if that flag is clear for all pages in the range.
759  * The minimum migration granularity throughout the range will be
760  * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
761  *
762  * Querying of accessibility attributes works by initializing the
763  * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
764  * GPUID being queried. Multiple attributes can be given to allow
765  * querying multiple GPUIDs. The ioctl function overwrites the
766  * attribute type to indicate the access for the specified GPU.
767  */
768 struct kfd_ioctl_svm_args {
769 	__u64 start_addr;
770 	__u64 size;
771 	__u32 op;
772 	__u32 nattr;
773 	/* Variable length array of attributes */
774 	struct kfd_ioctl_svm_attribute attrs[];
775 };
776 
777 /**
778  * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
779  *
780  * @xnack_enabled:       [in/out] Whether to enable XNACK mode for this process
781  *
782  * @xnack_enabled indicates whether recoverable page faults should be
783  * enabled for the current process. 0 means disabled, positive means
784  * enabled, negative means leave unchanged. If enabled, virtual address
785  * translations on GFXv9 and later AMD GPUs can return XNACK and retry
786  * the access until a valid PTE is available. This is used to implement
787  * device page faults.
788  *
789  * On output, @xnack_enabled returns the (new) current mode (0 or
790  * positive). Therefore, a negative input value can be used to query
791  * the current mode without changing it.
792  *
793  * The XNACK mode fundamentally changes the way SVM managed memory works
794  * in the driver, with subtle effects on application performance and
795  * functionality.
796  *
797  * Enabling XNACK mode requires shader programs to be compiled
798  * differently. Furthermore, not all GPUs support changing the mode
799  * per-process. Therefore changing the mode is only allowed while no
800  * user mode queues exist in the process. This ensure that no shader
801  * code is running that may be compiled for the wrong mode. And GPUs
802  * that cannot change to the requested mode will prevent the XNACK
803  * mode from occurring. All GPUs used by the process must be in the
804  * same XNACK mode.
805  *
806  * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
807  * Therefore those GPUs are not considered for the XNACK mode switch.
808  *
809  * Return: 0 on success, -errno on failure
810  */
811 struct kfd_ioctl_set_xnack_mode_args {
812 	__s32 xnack_enabled;
813 };
814 
815 /* Wave launch override modes */
816 enum kfd_dbg_trap_override_mode {
817 	KFD_DBG_TRAP_OVERRIDE_OR = 0,
818 	KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
819 };
820 
821 /* Wave launch overrides */
822 enum kfd_dbg_trap_mask {
823 	KFD_DBG_TRAP_MASK_FP_INVALID = 1,
824 	KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
825 	KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
826 	KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
827 	KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
828 	KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
829 	KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
830 	KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
831 	KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
832 	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
833 	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
834 };
835 
836 /* Wave launch modes */
837 enum kfd_dbg_trap_wave_launch_mode {
838 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
839 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
840 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
841 };
842 
843 /* Address watch modes */
844 enum kfd_dbg_trap_address_watch_mode {
845 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
846 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
847 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
848 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
849 };
850 
851 /* Additional wave settings */
852 enum kfd_dbg_trap_flags {
853 	KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
854 };
855 
856 /* Trap exceptions */
857 enum kfd_dbg_trap_exception_code {
858 	EC_NONE = 0,
859 	/* per queue */
860 	EC_QUEUE_WAVE_ABORT = 1,
861 	EC_QUEUE_WAVE_TRAP = 2,
862 	EC_QUEUE_WAVE_MATH_ERROR = 3,
863 	EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
864 	EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
865 	EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
866 	EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
867 	EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
868 	EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
869 	EC_QUEUE_PACKET_RESERVED = 19,
870 	EC_QUEUE_PACKET_UNSUPPORTED = 20,
871 	EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
872 	EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
873 	EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
874 	EC_QUEUE_PREEMPTION_ERROR = 30,
875 	EC_QUEUE_NEW = 31,
876 	/* per device */
877 	EC_DEVICE_QUEUE_DELETE = 32,
878 	EC_DEVICE_MEMORY_VIOLATION = 33,
879 	EC_DEVICE_RAS_ERROR = 34,
880 	EC_DEVICE_FATAL_HALT = 35,
881 	EC_DEVICE_NEW = 36,
882 	/* per process */
883 	EC_PROCESS_RUNTIME = 48,
884 	EC_PROCESS_DEVICE_REMOVE = 49,
885 	EC_MAX
886 };
887 
888 /* Mask generated by ecode in kfd_dbg_trap_exception_code */
889 #define KFD_EC_MASK(ecode)	(1ULL << (ecode - 1))
890 
891 /* Masks for exception code type checks below */
892 #define KFD_EC_MASK_QUEUE	(KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) |	\
893 				 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) |	\
894 				 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) |	\
895 				 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) |	\
896 				 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) |	\
897 				 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) |	\
898 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) |	\
899 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) |	\
900 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) |	\
901 				 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) |	\
902 				 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) |	\
903 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) |	\
904 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) |	\
905 				 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)	|	\
906 				 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR)	|	\
907 				 KFD_EC_MASK(EC_QUEUE_NEW))
908 #define KFD_EC_MASK_DEVICE	(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) |		\
909 				 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) |		\
910 				 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) |		\
911 				 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) |	\
912 				 KFD_EC_MASK(EC_DEVICE_NEW))
913 #define KFD_EC_MASK_PROCESS	(KFD_EC_MASK(EC_PROCESS_RUNTIME) |	\
914 				 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
915 
916 /* Checks for exception code types for KFD search */
917 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode)					\
918 			(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
919 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode)				\
920 			(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
921 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode)				\
922 			(!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
923 
924 
925 /* Runtime enable states */
926 enum kfd_dbg_runtime_state {
927 	DEBUG_RUNTIME_STATE_DISABLED = 0,
928 	DEBUG_RUNTIME_STATE_ENABLED = 1,
929 	DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
930 	DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
931 };
932 
933 /* Runtime enable status */
934 struct kfd_runtime_info {
935 	__u64 r_debug;
936 	__u32 runtime_state;
937 	__u32 ttmp_setup;
938 };
939 
940 /* Enable modes for runtime enable */
941 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK	1
942 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK	2
943 
944 /**
945  * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
946  *
947  * Coordinates debug exception signalling and debug device enablement with runtime.
948  *
949  * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
950  * @mode_mask - mask to set mode
951  *	KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
952  *	KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
953  * @capabilities_mask - mask to notify runtime on what KFD supports
954  *
955  * Return - 0 on SUCCESS.
956  *	  - EBUSY if runtime enable call already pending.
957  *	  - EEXIST if user queues already active prior to call.
958  *	    If process is debug enabled, runtime enable will enable debug devices and
959  *	    wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
960  *	    to unblock - see kfd_ioctl_dbg_trap_args.
961  *
962  */
963 struct kfd_ioctl_runtime_enable_args {
964 	__u64 r_debug;
965 	__u32 mode_mask;
966 	__u32 capabilities_mask;
967 };
968 
969 /* Queue information */
970 struct kfd_queue_snapshot_entry {
971 	__u64 exception_status;
972 	__u64 ring_base_address;
973 	__u64 write_pointer_address;
974 	__u64 read_pointer_address;
975 	__u64 ctx_save_restore_address;
976 	__u32 queue_id;
977 	__u32 gpu_id;
978 	__u32 ring_size;
979 	__u32 queue_type;
980 	__u32 ctx_save_restore_area_size;
981 	__u32 reserved;
982 };
983 
984 /* Queue status return for suspend/resume */
985 #define KFD_DBG_QUEUE_ERROR_BIT		30
986 #define KFD_DBG_QUEUE_INVALID_BIT	31
987 #define KFD_DBG_QUEUE_ERROR_MASK	(1 << KFD_DBG_QUEUE_ERROR_BIT)
988 #define KFD_DBG_QUEUE_INVALID_MASK	(1 << KFD_DBG_QUEUE_INVALID_BIT)
989 
990 /* Context save area header information */
991 struct kfd_context_save_area_header {
992 	struct {
993 		__u32 control_stack_offset;
994 		__u32 control_stack_size;
995 		__u32 wave_state_offset;
996 		__u32 wave_state_size;
997 	} wave_state;
998 	__u32 debug_offset;
999 	__u32 debug_size;
1000 	__u64 err_payload_addr;
1001 	__u32 err_event_id;
1002 	__u32 reserved1;
1003 };
1004 
1005 /*
1006  * Debug operations
1007  *
1008  * For specifics on usage and return values, see documentation per operation
1009  * below.  Otherwise, generic error returns apply:
1010  *	- ESRCH if the process to debug does not exist.
1011  *
1012  *	- EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
1013  *		 KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
1014  *		 Also returns this error if GPU hardware scheduling is not supported.
1015  *
1016  *	- EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
1017  *		 PTRACE_ATTACHED.  KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
1018  *		 clean up of debug mode as long as process is debug enabled.
1019  *
1020  *	- EACCES if any DBG_HW_OP (debug hardware operation) is requested when
1021  *		 AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
1022  *
1023  *	- ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
1024  *
1025  *	- Other errors may be returned when a DBG_HW_OP occurs while the GPU
1026  *	  is in a fatal state.
1027  *
1028  */
1029 enum kfd_dbg_trap_operations {
1030 	KFD_IOC_DBG_TRAP_ENABLE = 0,
1031 	KFD_IOC_DBG_TRAP_DISABLE = 1,
1032 	KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
1033 	KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
1034 	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4,  /* DBG_HW_OP */
1035 	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5,      /* DBG_HW_OP */
1036 	KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6,		/* DBG_HW_OP */
1037 	KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7,		/* DBG_HW_OP */
1038 	KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8,	/* DBG_HW_OP */
1039 	KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9,	/* DBG_HW_OP */
1040 	KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
1041 	KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
1042 	KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
1043 	KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
1044 	KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
1045 };
1046 
1047 /**
1048  * kfd_ioctl_dbg_trap_enable_args
1049  *
1050  *     Arguments for KFD_IOC_DBG_TRAP_ENABLE.
1051  *
1052  *     Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
1053  *     kfd_ioctl_dbg_trap_args to disable debug session.
1054  *
1055  *     @exception_mask (IN)	- exceptions to raise to the debugger
1056  *     @rinfo_ptr      (IN)	- pointer to runtime info buffer (see kfd_runtime_info)
1057  *     @rinfo_size     (IN/OUT)	- size of runtime info buffer in bytes
1058  *     @dbg_fd	       (IN)	- fd the KFD will nofify the debugger with of raised
1059  *				  exceptions set in exception_mask.
1060  *
1061  *     Generic errors apply (see kfd_dbg_trap_operations).
1062  *     Return - 0 on SUCCESS.
1063  *		Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
1064  *		Size of kfd_runtime saved by the KFD returned to @rinfo_size.
1065  *            - EBADF if KFD cannot get a reference to dbg_fd.
1066  *            - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
1067  *            - EINVAL if target process is already debug enabled.
1068  *
1069  */
1070 struct kfd_ioctl_dbg_trap_enable_args {
1071 	__u64 exception_mask;
1072 	__u64 rinfo_ptr;
1073 	__u32 rinfo_size;
1074 	__u32 dbg_fd;
1075 };
1076 
1077 /**
1078  * kfd_ioctl_dbg_trap_send_runtime_event_args
1079  *
1080  *
1081  *     Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
1082  *     Raises exceptions to runtime.
1083  *
1084  *     @exception_mask (IN) - exceptions to raise to runtime
1085  *     @gpu_id	       (IN) - target device id
1086  *     @queue_id       (IN) - target queue id
1087  *
1088  *     Generic errors apply (see kfd_dbg_trap_operations).
1089  *     Return - 0 on SUCCESS.
1090  *	      - ENODEV if gpu_id not found.
1091  *		If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
1092  *		AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
1093  *		All other exceptions are raised to runtime through err_payload_addr.
1094  *		See kfd_context_save_area_header.
1095  */
1096 struct kfd_ioctl_dbg_trap_send_runtime_event_args {
1097 	__u64 exception_mask;
1098 	__u32 gpu_id;
1099 	__u32 queue_id;
1100 };
1101 
1102 /**
1103  * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
1104  *
1105  *     Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
1106  *     Set new exceptions to be raised to the debugger.
1107  *
1108  *     @exception_mask (IN) - new exceptions to raise the debugger
1109  *
1110  *     Generic errors apply (see kfd_dbg_trap_operations).
1111  *     Return - 0 on SUCCESS.
1112  */
1113 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
1114 	__u64 exception_mask;
1115 };
1116 
1117 /**
1118  * kfd_ioctl_dbg_trap_set_wave_launch_override_args
1119  *
1120  *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
1121  *     Enable HW exceptions to raise trap.
1122  *
1123  *     @override_mode	     (IN)     - see kfd_dbg_trap_override_mode
1124  *     @enable_mask	     (IN/OUT) - reference kfd_dbg_trap_mask.
1125  *					IN is the override modes requested to be enabled.
1126  *					OUT is referenced in Return below.
1127  *     @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1128  *					IN is the override modes requested for support check.
1129  *					OUT is referenced in Return below.
1130  *
1131  *     Generic errors apply (see kfd_dbg_trap_operations).
1132  *     Return - 0 on SUCCESS.
1133  *		Previous enablement is returned in @enable_mask.
1134  *		Actual override support is returned in @support_request_mask.
1135  *	      - EINVAL if override mode is not supported.
1136  *	      - EACCES if trap support requested is not actually supported.
1137  *		i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
1138  *		Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
1139  */
1140 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
1141 	__u32 override_mode;
1142 	__u32 enable_mask;
1143 	__u32 support_request_mask;
1144 	__u32 pad;
1145 };
1146 
1147 /**
1148  * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
1149  *
1150  *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
1151  *     Set wave launch mode.
1152  *
1153  *     @mode (IN) - see kfd_dbg_trap_wave_launch_mode
1154  *
1155  *     Generic errors apply (see kfd_dbg_trap_operations).
1156  *     Return - 0 on SUCCESS.
1157  */
1158 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
1159 	__u32 launch_mode;
1160 	__u32 pad;
1161 };
1162 
1163 /**
1164  * kfd_ioctl_dbg_trap_suspend_queues_ags
1165  *
1166  *     Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
1167  *     Suspend queues.
1168  *
1169  *     @exception_mask	(IN) - raised exceptions to clear
1170  *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1171  *			       to suspend
1172  *     @num_queues	(IN) - number of queues to suspend in @queue_array_ptr
1173  *     @grace_period	(IN) - wave time allowance before preemption
1174  *			       per 1K GPU clock cycle unit
1175  *
1176  *     Generic errors apply (see kfd_dbg_trap_operations).
1177  *     Destruction of a suspended queue is blocked until the queue is
1178  *     resumed.  This allows the debugger to access queue information and
1179  *     the its context save area without running into a race condition on
1180  *     queue destruction.
1181  *     Automatically copies per queue context save area header information
1182  *     into the save area base
1183  *     (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
1184  *
1185  *     Return - Number of queues suspended on SUCCESS.
1186  *	.	KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
1187  *		for each queue id in @queue_array_ptr array reports unsuccessful
1188  *		suspend reason.
1189  *		KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1190  *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
1191  *		is being destroyed.
1192  */
1193 struct kfd_ioctl_dbg_trap_suspend_queues_args {
1194 	__u64 exception_mask;
1195 	__u64 queue_array_ptr;
1196 	__u32 num_queues;
1197 	__u32 grace_period;
1198 };
1199 
1200 /**
1201  * kfd_ioctl_dbg_trap_resume_queues_args
1202  *
1203  *     Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
1204  *     Resume queues.
1205  *
1206  *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1207  *			       to resume
1208  *     @num_queues	(IN) - number of queues to resume in @queue_array_ptr
1209  *
1210  *     Generic errors apply (see kfd_dbg_trap_operations).
1211  *     Return - Number of queues resumed on SUCCESS.
1212  *		KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
1213  *		for each queue id in @queue_array_ptr array reports unsuccessful
1214  *		resume reason.
1215  *		KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1216  *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
1217  */
1218 struct kfd_ioctl_dbg_trap_resume_queues_args {
1219 	__u64 queue_array_ptr;
1220 	__u32 num_queues;
1221 	__u32 pad;
1222 };
1223 
1224 /**
1225  * kfd_ioctl_dbg_trap_set_node_address_watch_args
1226  *
1227  *     Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
1228  *     Sets address watch for device.
1229  *
1230  *     @address	(IN)  - watch address to set
1231  *     @mode    (IN)  - see kfd_dbg_trap_address_watch_mode
1232  *     @mask    (IN)  - watch address mask
1233  *     @gpu_id  (IN)  - target gpu to set watch point
1234  *     @id      (OUT) - watch id allocated
1235  *
1236  *     Generic errors apply (see kfd_dbg_trap_operations).
1237  *     Return - 0 on SUCCESS.
1238  *		Allocated watch ID returned to @id.
1239  *	      - ENODEV if gpu_id not found.
1240  *	      - ENOMEM if watch IDs can be allocated
1241  */
1242 struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
1243 	__u64 address;
1244 	__u32 mode;
1245 	__u32 mask;
1246 	__u32 gpu_id;
1247 	__u32 id;
1248 };
1249 
1250 /**
1251  * kfd_ioctl_dbg_trap_clear_node_address_watch_args
1252  *
1253  *     Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
1254  *     Clear address watch for device.
1255  *
1256  *     @gpu_id  (IN)  - target device to clear watch point
1257  *     @id      (IN) - allocated watch id to clear
1258  *
1259  *     Generic errors apply (see kfd_dbg_trap_operations).
1260  *     Return - 0 on SUCCESS.
1261  *	      - ENODEV if gpu_id not found.
1262  *	      - EINVAL if watch ID has not been allocated.
1263  */
1264 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
1265 	__u32 gpu_id;
1266 	__u32 id;
1267 };
1268 
1269 /**
1270  * kfd_ioctl_dbg_trap_set_flags_args
1271  *
1272  *     Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
1273  *     Sets flags for wave behaviour.
1274  *
1275  *     @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
1276  *
1277  *     Generic errors apply (see kfd_dbg_trap_operations).
1278  *     Return - 0 on SUCCESS.
1279  *	      - EACCESS if any debug device does not allow flag options.
1280  */
1281 struct kfd_ioctl_dbg_trap_set_flags_args {
1282 	__u32 flags;
1283 	__u32 pad;
1284 };
1285 
1286 /**
1287  * kfd_ioctl_dbg_trap_query_debug_event_args
1288  *
1289  *     Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
1290  *
1291  *     Find one or more raised exceptions. This function can return multiple
1292  *     exceptions from a single queue or a single device with one call. To find
1293  *     all raised exceptions, this function must be called repeatedly until it
1294  *     returns -EAGAIN. Returned exceptions can optionally be cleared by
1295  *     setting the corresponding bit in the @exception_mask input parameter.
1296  *     However, clearing an exception prevents retrieving further information
1297  *     about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
1298  *
1299  *     @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
1300  *     @gpu_id	       (OUT)    - gpu id of exceptions raised
1301  *     @queue_id       (OUT)    - queue id of exceptions raised
1302  *
1303  *     Generic errors apply (see kfd_dbg_trap_operations).
1304  *     Return - 0 on raised exception found
1305  *              Raised exceptions found are returned in @exception mask
1306  *              with reported source id returned in @gpu_id or @queue_id.
1307  *            - EAGAIN if no raised exception has been found
1308  */
1309 struct kfd_ioctl_dbg_trap_query_debug_event_args {
1310 	__u64 exception_mask;
1311 	__u32 gpu_id;
1312 	__u32 queue_id;
1313 };
1314 
1315 /**
1316  * kfd_ioctl_dbg_trap_query_exception_info_args
1317  *
1318  *     Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
1319  *     Get additional info on raised exception.
1320  *
1321  *     @info_ptr	(IN)	 - pointer to exception info buffer to copy to
1322  *     @info_size	(IN/OUT) - exception info buffer size (bytes)
1323  *     @source_id	(IN)     - target gpu or queue id
1324  *     @exception_code	(IN)     - target exception
1325  *     @clear_exception	(IN)     - clear raised @exception_code exception
1326  *				   (0 = false, 1 = true)
1327  *
1328  *     Generic errors apply (see kfd_dbg_trap_operations).
1329  *     Return - 0 on SUCCESS.
1330  *              If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
1331  *		bytes of memory exception data to @info_ptr.
1332  *              If @exception_code is EC_PROCESS_RUNTIME, copy saved
1333  *              kfd_runtime_info to @info_ptr.
1334  *              Actual required @info_ptr size (bytes) is returned in @info_size.
1335  */
1336 struct kfd_ioctl_dbg_trap_query_exception_info_args {
1337 	__u64 info_ptr;
1338 	__u32 info_size;
1339 	__u32 source_id;
1340 	__u32 exception_code;
1341 	__u32 clear_exception;
1342 };
1343 
1344 /**
1345  * kfd_ioctl_dbg_trap_get_queue_snapshot_args
1346  *
1347  *     Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
1348  *     Get queue information.
1349  *
1350  *     @exception_mask	 (IN)	  - exceptions raised to clear
1351  *     @snapshot_buf_ptr (IN)	  - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
1352  *     @num_queues	 (IN/OUT) - number of queue snapshot entries
1353  *         The debugger specifies the size of the array allocated in @num_queues.
1354  *         KFD returns the number of queues that actually existed. If this is
1355  *         larger than the size specified by the debugger, KFD will not overflow
1356  *         the array allocated by the debugger.
1357  *
1358  *     @entry_size	 (IN/OUT) - size per entry in bytes
1359  *         The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
1360  *         @entry_size. KFD returns the number of bytes actually populated per
1361  *         entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
1362  *         which fields in struct kfd_queue_snapshot_entry are valid. This allows
1363  *         growing the ABI in a backwards compatible manner.
1364  *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1365  *         event that it's larger than actual kfd_queue_snapshot_entry.
1366  *
1367  *     Generic errors apply (see kfd_dbg_trap_operations).
1368  *     Return - 0 on SUCCESS.
1369  *              Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
1370  *              into @snapshot_buf_ptr if @num_queues(IN) > 0.
1371  *              Otherwise return @num_queues(OUT) queue snapshot entries that exist.
1372  */
1373 struct kfd_ioctl_dbg_trap_queue_snapshot_args {
1374 	__u64 exception_mask;
1375 	__u64 snapshot_buf_ptr;
1376 	__u32 num_queues;
1377 	__u32 entry_size;
1378 };
1379 
1380 /**
1381  * kfd_ioctl_dbg_trap_get_device_snapshot_args
1382  *
1383  *     Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
1384  *     Get device information.
1385  *
1386  *     @exception_mask	 (IN)	  - exceptions raised to clear
1387  *     @snapshot_buf_ptr (IN)	  - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
1388  *     @num_devices	 (IN/OUT) - number of debug devices to snapshot
1389  *         The debugger specifies the size of the array allocated in @num_devices.
1390  *         KFD returns the number of devices that actually existed. If this is
1391  *         larger than the size specified by the debugger, KFD will not overflow
1392  *         the array allocated by the debugger.
1393  *
1394  *     @entry_size	 (IN/OUT) - size per entry in bytes
1395  *         The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
1396  *         @entry_size. KFD returns the number of bytes actually populated. The
1397  *         debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
1398  *         in struct kfd_dbg_device_info_entry are valid. This allows growing the
1399  *         ABI in a backwards compatible manner.
1400  *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1401  *         event that it's larger than actual kfd_dbg_device_info_entry.
1402  *
1403  *     Generic errors apply (see kfd_dbg_trap_operations).
1404  *     Return - 0 on SUCCESS.
1405  *              Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
1406  *              into @snapshot_buf_ptr if @num_devices(IN) > 0.
1407  *              Otherwise return @num_devices(OUT) queue snapshot entries that exist.
1408  */
1409 struct kfd_ioctl_dbg_trap_device_snapshot_args {
1410 	__u64 exception_mask;
1411 	__u64 snapshot_buf_ptr;
1412 	__u32 num_devices;
1413 	__u32 entry_size;
1414 };
1415 
1416 /**
1417  * kfd_ioctl_dbg_trap_args
1418  *
1419  * Arguments to debug target process.
1420  *
1421  *     @pid - target process to debug
1422  *     @op  - debug operation (see kfd_dbg_trap_operations)
1423  *
1424  *     @op determines which union struct args to use.
1425  *     Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
1426  */
1427 struct kfd_ioctl_dbg_trap_args {
1428 	__u32 pid;
1429 	__u32 op;
1430 
1431 	union {
1432 		struct kfd_ioctl_dbg_trap_enable_args enable;
1433 		struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
1434 		struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
1435 		struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
1436 		struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
1437 		struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
1438 		struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
1439 		struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
1440 		struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
1441 		struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
1442 		struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
1443 		struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
1444 		struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
1445 		struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
1446 	};
1447 };
1448 
1449 #define AMDKFD_IOCTL_BASE 'K'
1450 #define AMDKFD_IO(nr)			_IO(AMDKFD_IOCTL_BASE, nr)
1451 #define AMDKFD_IOR(nr, type)		_IOR(AMDKFD_IOCTL_BASE, nr, type)
1452 #define AMDKFD_IOW(nr, type)		_IOW(AMDKFD_IOCTL_BASE, nr, type)
1453 #define AMDKFD_IOWR(nr, type)		_IOWR(AMDKFD_IOCTL_BASE, nr, type)
1454 
1455 #define AMDKFD_IOC_GET_VERSION			\
1456 		AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
1457 
1458 #define AMDKFD_IOC_CREATE_QUEUE			\
1459 		AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
1460 
1461 #define AMDKFD_IOC_DESTROY_QUEUE		\
1462 		AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
1463 
1464 #define AMDKFD_IOC_SET_MEMORY_POLICY		\
1465 		AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
1466 
1467 #define AMDKFD_IOC_GET_CLOCK_COUNTERS		\
1468 		AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
1469 
1470 #define AMDKFD_IOC_GET_PROCESS_APERTURES	\
1471 		AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
1472 
1473 #define AMDKFD_IOC_UPDATE_QUEUE			\
1474 		AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
1475 
1476 #define AMDKFD_IOC_CREATE_EVENT			\
1477 		AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
1478 
1479 #define AMDKFD_IOC_DESTROY_EVENT		\
1480 		AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
1481 
1482 #define AMDKFD_IOC_SET_EVENT			\
1483 		AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
1484 
1485 #define AMDKFD_IOC_RESET_EVENT			\
1486 		AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
1487 
1488 #define AMDKFD_IOC_WAIT_EVENTS			\
1489 		AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
1490 
1491 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED	\
1492 		AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
1493 
1494 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED	\
1495 		AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
1496 
1497 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED	\
1498 		AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
1499 
1500 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED	\
1501 		AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
1502 
1503 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA	\
1504 		AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
1505 
1506 #define AMDKFD_IOC_GET_TILE_CONFIG                                      \
1507 		AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
1508 
1509 #define AMDKFD_IOC_SET_TRAP_HANDLER		\
1510 		AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
1511 
1512 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW	\
1513 		AMDKFD_IOWR(0x14,		\
1514 			struct kfd_ioctl_get_process_apertures_new_args)
1515 
1516 #define AMDKFD_IOC_ACQUIRE_VM			\
1517 		AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
1518 
1519 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU		\
1520 		AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
1521 
1522 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU		\
1523 		AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
1524 
1525 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU		\
1526 		AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
1527 
1528 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU	\
1529 		AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
1530 
1531 #define AMDKFD_IOC_SET_CU_MASK		\
1532 		AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
1533 
1534 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE		\
1535 		AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
1536 
1537 #define AMDKFD_IOC_GET_DMABUF_INFO		\
1538 		AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
1539 
1540 #define AMDKFD_IOC_IMPORT_DMABUF		\
1541 		AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
1542 
1543 #define AMDKFD_IOC_ALLOC_QUEUE_GWS		\
1544 		AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
1545 
1546 #define AMDKFD_IOC_SMI_EVENTS			\
1547 		AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
1548 
1549 #define AMDKFD_IOC_SVM	AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
1550 
1551 #define AMDKFD_IOC_SET_XNACK_MODE		\
1552 		AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
1553 
1554 #define AMDKFD_IOC_CRIU_OP			\
1555 		AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
1556 
1557 #define AMDKFD_IOC_AVAILABLE_MEMORY		\
1558 		AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
1559 
1560 #define AMDKFD_IOC_EXPORT_DMABUF		\
1561 		AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
1562 
1563 #define AMDKFD_IOC_RUNTIME_ENABLE		\
1564 		AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
1565 
1566 #define AMDKFD_IOC_DBG_TRAP			\
1567 		AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
1568 
1569 #define AMDKFD_COMMAND_START		0x01
1570 #define AMDKFD_COMMAND_END		0x27
1571 
1572 #endif
1573