1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #ifndef _XE_EXEC_QUEUE_TYPES_H_ 7 #define _XE_EXEC_QUEUE_TYPES_H_ 8 9 #include <linux/kref.h> 10 11 #include <drm/gpu_scheduler.h> 12 13 #include "xe_gpu_scheduler_types.h" 14 #include "xe_hw_engine_types.h" 15 #include "xe_hw_fence_types.h" 16 #include "xe_lrc_types.h" 17 18 struct xe_execlist_exec_queue; 19 struct xe_gt; 20 struct xe_guc_exec_queue; 21 struct xe_hw_engine; 22 struct xe_vm; 23 24 enum xe_exec_queue_priority { 25 XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */ 26 XE_EXEC_QUEUE_PRIORITY_LOW = 0, 27 XE_EXEC_QUEUE_PRIORITY_NORMAL, 28 XE_EXEC_QUEUE_PRIORITY_HIGH, 29 XE_EXEC_QUEUE_PRIORITY_KERNEL, 30 31 XE_EXEC_QUEUE_PRIORITY_COUNT 32 }; 33 34 /** 35 * struct xe_exec_queue - Execution queue 36 * 37 * Contains all state necessary for submissions. Can either be a user object or 38 * a kernel object. 39 */ 40 struct xe_exec_queue { 41 /** @gt: graphics tile this exec queue can submit to */ 42 struct xe_gt *gt; 43 /** 44 * @hwe: A hardware of the same class. May (physical engine) or may not 45 * (virtual engine) be where jobs actual engine up running. Should never 46 * really be used for submissions. 47 */ 48 struct xe_hw_engine *hwe; 49 /** @refcount: ref count of this exec queue */ 50 struct kref refcount; 51 /** @vm: VM (address space) for this exec queue */ 52 struct xe_vm *vm; 53 /** @class: class of this exec queue */ 54 enum xe_engine_class class; 55 /** 56 * @logical_mask: logical mask of where job submitted to exec queue can run 57 */ 58 u32 logical_mask; 59 /** @name: name of this exec queue */ 60 char name[MAX_FENCE_NAME_LEN]; 61 /** @width: width (number BB submitted per exec) of this exec queue */ 62 u16 width; 63 /** @fence_irq: fence IRQ used to signal job completion */ 64 struct xe_hw_fence_irq *fence_irq; 65 66 /** 67 * @last_fence: last fence on exec queue, protected by vm->lock in write 68 * mode if bind exec queue, protected by dma resv lock if non-bind exec 69 * queue 70 */ 71 struct dma_fence *last_fence; 72 73 /* queue no longer allowed to submit */ 74 #define EXEC_QUEUE_FLAG_BANNED BIT(0) 75 /* queue used for kernel submission only */ 76 #define EXEC_QUEUE_FLAG_KERNEL BIT(1) 77 /* kernel engine only destroyed at driver unload */ 78 #define EXEC_QUEUE_FLAG_PERMANENT BIT(2) 79 /* queue keeps running pending jobs after destroy ioctl */ 80 #define EXEC_QUEUE_FLAG_PERSISTENT BIT(3) 81 /* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */ 82 #define EXEC_QUEUE_FLAG_VM BIT(4) 83 /* child of VM queue for multi-tile VM jobs */ 84 #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(5) 85 /* kernel exec_queue only, set priority to highest level */ 86 #define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(6) 87 88 /** 89 * @flags: flags for this exec queue, should statically setup aside from ban 90 * bit 91 */ 92 unsigned long flags; 93 94 union { 95 /** @multi_gt_list: list head for VM bind engines if multi-GT */ 96 struct list_head multi_gt_list; 97 /** @multi_gt_link: link for VM bind engines if multi-GT */ 98 struct list_head multi_gt_link; 99 }; 100 101 union { 102 /** @execlist: execlist backend specific state for exec queue */ 103 struct xe_execlist_exec_queue *execlist; 104 /** @guc: GuC backend specific state for exec queue */ 105 struct xe_guc_exec_queue *guc; 106 }; 107 108 union { 109 /** 110 * @parallel: parallel submission state 111 */ 112 struct { 113 /** @composite_fence_ctx: context composite fence */ 114 u64 composite_fence_ctx; 115 /** @composite_fence_seqno: seqno for composite fence */ 116 u32 composite_fence_seqno; 117 } parallel; 118 /** 119 * @bind: bind submission state 120 */ 121 struct { 122 /** @fence_ctx: context bind fence */ 123 u64 fence_ctx; 124 /** @fence_seqno: seqno for bind fence */ 125 u32 fence_seqno; 126 } bind; 127 }; 128 129 /** @sched_props: scheduling properties */ 130 struct { 131 /** @timeslice_us: timeslice period in micro-seconds */ 132 u32 timeslice_us; 133 /** @preempt_timeout_us: preemption timeout in micro-seconds */ 134 u32 preempt_timeout_us; 135 /** @priority: priority of this exec queue */ 136 enum xe_exec_queue_priority priority; 137 } sched_props; 138 139 /** @compute: compute exec queue state */ 140 struct { 141 /** @pfence: preemption fence */ 142 struct dma_fence *pfence; 143 /** @context: preemption fence context */ 144 u64 context; 145 /** @seqno: preemption fence seqno */ 146 u32 seqno; 147 /** @link: link into VM's list of exec queues */ 148 struct list_head link; 149 /** @lock: preemption fences lock */ 150 spinlock_t lock; 151 } compute; 152 153 /** @ops: submission backend exec queue operations */ 154 const struct xe_exec_queue_ops *ops; 155 156 /** @ring_ops: ring operations for this exec queue */ 157 const struct xe_ring_ops *ring_ops; 158 /** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */ 159 struct drm_sched_entity *entity; 160 /** @lrc: logical ring context for this exec queue */ 161 struct xe_lrc lrc[]; 162 }; 163 164 /** 165 * struct xe_exec_queue_ops - Submission backend exec queue operations 166 */ 167 struct xe_exec_queue_ops { 168 /** @init: Initialize exec queue for submission backend */ 169 int (*init)(struct xe_exec_queue *q); 170 /** @kill: Kill inflight submissions for backend */ 171 void (*kill)(struct xe_exec_queue *q); 172 /** @fini: Fini exec queue for submission backend */ 173 void (*fini)(struct xe_exec_queue *q); 174 /** @set_priority: Set priority for exec queue */ 175 int (*set_priority)(struct xe_exec_queue *q, 176 enum xe_exec_queue_priority priority); 177 /** @set_timeslice: Set timeslice for exec queue */ 178 int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us); 179 /** @set_preempt_timeout: Set preemption timeout for exec queue */ 180 int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us); 181 /** @set_job_timeout: Set job timeout for exec queue */ 182 int (*set_job_timeout)(struct xe_exec_queue *q, u32 job_timeout_ms); 183 /** 184 * @suspend: Suspend exec queue from executing, allowed to be called 185 * multiple times in a row before resume with the caveat that 186 * suspend_wait returns before calling suspend again. 187 */ 188 int (*suspend)(struct xe_exec_queue *q); 189 /** 190 * @suspend_wait: Wait for an exec queue to suspend executing, should be 191 * call after suspend. 192 */ 193 void (*suspend_wait)(struct xe_exec_queue *q); 194 /** 195 * @resume: Resume exec queue execution, exec queue must be in a suspended 196 * state and dma fence returned from most recent suspend call must be 197 * signalled when this function is called. 198 */ 199 void (*resume)(struct xe_exec_queue *q); 200 /** @reset_status: check exec queue reset status */ 201 bool (*reset_status)(struct xe_exec_queue *q); 202 }; 203 204 #endif 205