Lines Matching +full:wait +full:- +full:monitoring +full:- +full:ns

1 /* SPDX-License-Identifier: MIT */
17 * subject to backwards-compatibility constraints.
28 * The diagram below represents a high-level simplification of a discrete
68 * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
72 * - &DRM_IOCTL_XE_DEVICE_QUERY
73 * - &DRM_IOCTL_XE_GEM_CREATE
74 * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
75 * - &DRM_IOCTL_XE_VM_CREATE
76 * - &DRM_IOCTL_XE_VM_DESTROY
77 * - &DRM_IOCTL_XE_VM_BIND
78 * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
79 * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
80 * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
81 * - &DRM_IOCTL_XE_EXEC
82 * - &DRM_IOCTL_XE_WAIT_USER_FENCE
103 /* Must be kept compact -- no holes */
137 * .. code-block:: C
158 * struct drm_xe_user_extension - Base class for defining a chain of extensions
188 * struct drm_xe_ext_set_property - Generic set property extension
211 * struct drm_xe_engine_class_instance - instance of an engine class
218 * - %DRM_XE_ENGINE_CLASS_RENDER
219 * - %DRM_XE_ENGINE_CLASS_COPY
220 * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
221 * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
222 * - %DRM_XE_ENGINE_CLASS_COMPUTE
223 * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
245 * struct drm_xe_engine - describe hardware engine
256 * struct drm_xe_query_engines - describe engines
272 * enum drm_xe_memory_class - Supported memory classes.
286 * struct drm_xe_mem_region - Describes some region as known to
303 * @min_page_size: Min page-size in bytes for this region.
312 * Affected IOCTLS will return %-EINVAL if alignment restrictions are
362 * struct drm_xe_query_mem_regions - describe memory regions
378 * struct drm_xe_query_config - describe the device configuration
385 * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
387 * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
390 * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
392 * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
394 * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
395 * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
416 * struct drm_xe_gt - describe an individual GT.
424 * - %DRM_XE_QUERY_GT_TYPE_MAIN
425 * - %DRM_XE_QUERY_GT_TYPE_MEDIA
467 * struct drm_xe_query_gt_list - A list with GT description items.
483 * struct drm_xe_query_topology_mask - describe the topology mask of a GT
493 * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
498 * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
503 * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
522 /** @mask: little-endian mask of @num_bytes */
527 * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
564 * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
571 * @cpu_delta: Time delta in ns captured around reading the lower dword
578 * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
586 * - %DRM_XE_DEVICE_QUERY_ENGINES
587 * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
588 * - %DRM_XE_DEVICE_QUERY_CONFIG
589 * - %DRM_XE_DEVICE_QUERY_GT_LIST
590 * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
594 * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
595 * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
601 * IOCTL call returns -EINVAL.
606 * .. code-block:: C
619 * for (int i = 0; i < engines->num_engines; i++) {
621 * engines->engines[i].instance.engine_class ==
623 * engines->engines[i].instance.engine_class ==
625 * engines->engines[i].instance.engine_class ==
627 * engines->engines[i].instance.engine_class ==
629 * engines->engines[i].instance.engine_class ==
660 * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
664 * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
665 * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
666 * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
669 * for small-bar systems (on full-bar systems this gets turned into a
676 * Note2: For clear-color CCS surfaces the kernel needs to read the
677 * clear-color value stored in the buffer, and on discrete platforms we
680 * small-bar systems.
683 * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
686 * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
750 * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
770 * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
773 * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
774 * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
779 * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
780 * LR VMs can be created in recoverable page-fault mode using
783 * different per-VM overcommit semantics that are enabled by
786 * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
788 * demand when accessed, and also allows per-VM overcommit of memory.
810 * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
824 * struct drm_xe_vm_bind_op - run bind operations
827 * - %DRM_XE_VM_BIND_OP_MAP
828 * - %DRM_XE_VM_BIND_OP_UNMAP
829 * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
830 * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
831 * - %DRM_XE_VM_BIND_OP_PREFETCH
834 * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
861 * there is a mismatch (see note below for pre-MTL platforms).
863 * Note: On pre-MTL platforms there is only a caching mode and no
865 * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
870 * levels into the following coherency buckets on all pre-MTL platforms:
872 * ppGTT UC -> COH_NONE
873 * ppGTT WC -> COH_NONE
874 * ppGTT WT -> COH_NONE
875 * ppGTT WB -> COH_AT_LEAST_1WAY
878 * such platforms (or perhaps in general for dma-buf if shared with
881 * have a shared-LLC. On MTL+ this completely changes and the HW
885 * Note: For userptr and externally imported dma-buf the kernel expects
946 * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
953 * .. code-block:: C
1006 /** @num_syncs: amount of syncs to wait on */
1017 * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
1023 * .. code-block:: C
1062 * @instances: user pointer to a 2-d array of struct
1075 * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
1089 * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
1092 * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
1113 * struct drm_xe_sync - sync object
1116 * - %DRM_XE_SYNC_TYPE_SYNCOBJ
1117 * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
1118 * - %DRM_XE_SYNC_TYPE_USER_FENCE
1121 * - %DRM_XE_SYNC_FLAG_SIGNAL
1125 * .. code-block:: C
1137 * struct drm_syncobj_wait wait = {
1145 * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
1187 * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
1194 * .. code-block:: C
1239 * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
1241 * Wait on user fence, XE will wake-up on every HW engine interrupt in the
1249 * - %DRM_XE_UFENCE_WAIT_OP_EQ
1250 * - %DRM_XE_UFENCE_WAIT_OP_NEQ
1251 * - %DRM_XE_UFENCE_WAIT_OP_GT
1252 * - %DRM_XE_UFENCE_WAIT_OP_GTE
1253 * - %DRM_XE_UFENCE_WAIT_OP_LT
1254 * - %DRM_XE_UFENCE_WAIT_OP_LTE
1257 * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
1258 * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
1261 * - 0xffu for u8
1262 * - 0xffffu for u16
1263 * - 0xffffffffu for u32
1264 * - 0xffffffffffffffffu for u64
1271 * @addr: user pointer address to wait on, must qword aligned
1281 /** @op: wait operation (type of comparison) */
1285 /** @flags: wait flags */
1298 * @timeout: how long to wait before bailing, value in nanoseconds.
1300 * it contains timeout expressed in nanoseconds to wait (fence will
1302 * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
1304 * Passing negative timeout leads to neverending wait.