1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_exec_queue.h"
7
8 #include <linux/nospec.h>
9
10 #include <drm/drm_device.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_file.h>
13 #include <uapi/drm/xe_drm.h>
14
15 #include "xe_device.h"
16 #include "xe_gt.h"
17 #include "xe_hw_engine_class_sysfs.h"
18 #include "xe_hw_engine_group.h"
19 #include "xe_hw_fence.h"
20 #include "xe_irq.h"
21 #include "xe_lrc.h"
22 #include "xe_macros.h"
23 #include "xe_migrate.h"
24 #include "xe_pm.h"
25 #include "xe_ring_ops_types.h"
26 #include "xe_trace.h"
27 #include "xe_vm.h"
28 #include "xe_pxp.h"
29
30 enum xe_exec_queue_sched_prop {
31 XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
32 XE_EXEC_QUEUE_TIMESLICE = 1,
33 XE_EXEC_QUEUE_PREEMPT_TIMEOUT = 2,
34 XE_EXEC_QUEUE_SCHED_PROP_MAX = 3,
35 };
36
37 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
38 u64 extensions, int ext_number);
39
__xe_exec_queue_free(struct xe_exec_queue * q)40 static void __xe_exec_queue_free(struct xe_exec_queue *q)
41 {
42 if (xe_exec_queue_uses_pxp(q))
43 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
44 if (q->vm)
45 xe_vm_put(q->vm);
46
47 if (q->xef)
48 xe_file_put(q->xef);
49
50 kfree(q);
51 }
52
__xe_exec_queue_alloc(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)53 static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
54 struct xe_vm *vm,
55 u32 logical_mask,
56 u16 width, struct xe_hw_engine *hwe,
57 u32 flags, u64 extensions)
58 {
59 struct xe_exec_queue *q;
60 struct xe_gt *gt = hwe->gt;
61 int err;
62
63 /* only kernel queues can be permanent */
64 XE_WARN_ON((flags & EXEC_QUEUE_FLAG_PERMANENT) && !(flags & EXEC_QUEUE_FLAG_KERNEL));
65
66 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL);
67 if (!q)
68 return ERR_PTR(-ENOMEM);
69
70 kref_init(&q->refcount);
71 q->flags = flags;
72 q->hwe = hwe;
73 q->gt = gt;
74 q->class = hwe->class;
75 q->width = width;
76 q->msix_vec = XE_IRQ_DEFAULT_MSIX;
77 q->logical_mask = logical_mask;
78 q->fence_irq = >->fence_irq[hwe->class];
79 q->ring_ops = gt->ring_ops[hwe->class];
80 q->ops = gt->exec_queue_ops;
81 INIT_LIST_HEAD(&q->lr.link);
82 INIT_LIST_HEAD(&q->multi_gt_link);
83 INIT_LIST_HEAD(&q->hw_engine_group_link);
84 INIT_LIST_HEAD(&q->pxp.link);
85
86 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
87 q->sched_props.preempt_timeout_us =
88 hwe->eclass->sched_props.preempt_timeout_us;
89 q->sched_props.job_timeout_ms =
90 hwe->eclass->sched_props.job_timeout_ms;
91 if (q->flags & EXEC_QUEUE_FLAG_KERNEL &&
92 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY)
93 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL;
94 else
95 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
96
97 if (vm)
98 q->vm = xe_vm_get(vm);
99
100 if (extensions) {
101 /*
102 * may set q->usm, must come before xe_lrc_create(),
103 * may overwrite q->sched_props, must come before q->ops->init()
104 */
105 err = exec_queue_user_extensions(xe, q, extensions, 0);
106 if (err) {
107 __xe_exec_queue_free(q);
108 return ERR_PTR(err);
109 }
110 }
111
112 return q;
113 }
114
__xe_exec_queue_init(struct xe_exec_queue * q)115 static int __xe_exec_queue_init(struct xe_exec_queue *q)
116 {
117 struct xe_vm *vm = q->vm;
118 int i, err;
119 u32 flags = 0;
120
121 /*
122 * PXP workloads executing on RCS or CCS must run in isolation (i.e. no
123 * other workload can use the EUs at the same time). On MTL this is done
124 * by setting the RUNALONE bit in the LRC, while starting on Xe2 there
125 * is a dedicated bit for it.
126 */
127 if (xe_exec_queue_uses_pxp(q) &&
128 (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) {
129 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20)
130 flags |= XE_LRC_CREATE_PXP;
131 else
132 flags |= XE_LRC_CREATE_RUNALONE;
133 }
134
135 if (vm) {
136 err = xe_vm_lock(vm, true);
137 if (err)
138 return err;
139 }
140
141 for (i = 0; i < q->width; ++i) {
142 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
143 if (IS_ERR(q->lrc[i])) {
144 err = PTR_ERR(q->lrc[i]);
145 goto err_unlock;
146 }
147 }
148
149 if (vm)
150 xe_vm_unlock(vm);
151
152 err = q->ops->init(q);
153 if (err)
154 goto err_lrc;
155
156 return 0;
157
158 err_unlock:
159 if (vm)
160 xe_vm_unlock(vm);
161 err_lrc:
162 for (i = i - 1; i >= 0; --i)
163 xe_lrc_put(q->lrc[i]);
164 return err;
165 }
166
xe_exec_queue_create(struct xe_device * xe,struct xe_vm * vm,u32 logical_mask,u16 width,struct xe_hw_engine * hwe,u32 flags,u64 extensions)167 struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
168 u32 logical_mask, u16 width,
169 struct xe_hw_engine *hwe, u32 flags,
170 u64 extensions)
171 {
172 struct xe_exec_queue *q;
173 int err;
174
175 /* VMs for GSCCS queues (and only those) must have the XE_VM_FLAG_GSC flag */
176 xe_assert(xe, !vm || (!!(vm->flags & XE_VM_FLAG_GSC) == !!(hwe->engine_id == XE_HW_ENGINE_GSCCS0)));
177
178 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags,
179 extensions);
180 if (IS_ERR(q))
181 return q;
182
183 err = __xe_exec_queue_init(q);
184 if (err)
185 goto err_post_alloc;
186
187 /*
188 * We can only add the queue to the PXP list after the init is complete,
189 * because the PXP termination can call exec_queue_kill and that will
190 * go bad if the queue is only half-initialized. This means that we
191 * can't do it when we handle the PXP extension in __xe_exec_queue_alloc
192 * and we need to do it here instead.
193 */
194 if (xe_exec_queue_uses_pxp(q)) {
195 err = xe_pxp_exec_queue_add(xe->pxp, q);
196 if (err)
197 goto err_post_alloc;
198 }
199
200 return q;
201
202 err_post_alloc:
203 __xe_exec_queue_free(q);
204 return ERR_PTR(err);
205 }
206 ALLOW_ERROR_INJECTION(xe_exec_queue_create, ERRNO);
207
xe_exec_queue_create_class(struct xe_device * xe,struct xe_gt * gt,struct xe_vm * vm,enum xe_engine_class class,u32 flags,u64 extensions)208 struct xe_exec_queue *xe_exec_queue_create_class(struct xe_device *xe, struct xe_gt *gt,
209 struct xe_vm *vm,
210 enum xe_engine_class class,
211 u32 flags, u64 extensions)
212 {
213 struct xe_hw_engine *hwe, *hwe0 = NULL;
214 enum xe_hw_engine_id id;
215 u32 logical_mask = 0;
216
217 for_each_hw_engine(hwe, gt, id) {
218 if (xe_hw_engine_is_reserved(hwe))
219 continue;
220
221 if (hwe->class == class) {
222 logical_mask |= BIT(hwe->logical_instance);
223 if (!hwe0)
224 hwe0 = hwe;
225 }
226 }
227
228 if (!logical_mask)
229 return ERR_PTR(-ENODEV);
230
231 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions);
232 }
233
234 /**
235 * xe_exec_queue_create_bind() - Create bind exec queue.
236 * @xe: Xe device.
237 * @tile: tile which bind exec queue belongs to.
238 * @flags: exec queue creation flags
239 * @extensions: exec queue creation extensions
240 *
241 * Normalize bind exec queue creation. Bind exec queue is tied to migration VM
242 * for access to physical memory required for page table programming. On a
243 * faulting devices the reserved copy engine instance must be used to avoid
244 * deadlocking (user binds cannot get stuck behind faults as kernel binds which
245 * resolve faults depend on user binds). On non-faulting devices any copy engine
246 * can be used.
247 *
248 * Returns exec queue on success, ERR_PTR on failure
249 */
xe_exec_queue_create_bind(struct xe_device * xe,struct xe_tile * tile,u32 flags,u64 extensions)250 struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
251 struct xe_tile *tile,
252 u32 flags, u64 extensions)
253 {
254 struct xe_gt *gt = tile->primary_gt;
255 struct xe_exec_queue *q;
256 struct xe_vm *migrate_vm;
257
258 migrate_vm = xe_migrate_get_vm(tile->migrate);
259 if (xe->info.has_usm) {
260 struct xe_hw_engine *hwe = xe_gt_hw_engine(gt,
261 XE_ENGINE_CLASS_COPY,
262 gt->usm.reserved_bcs_instance,
263 false);
264
265 if (!hwe) {
266 xe_vm_put(migrate_vm);
267 return ERR_PTR(-EINVAL);
268 }
269
270 q = xe_exec_queue_create(xe, migrate_vm,
271 BIT(hwe->logical_instance), 1, hwe,
272 flags, extensions);
273 } else {
274 q = xe_exec_queue_create_class(xe, gt, migrate_vm,
275 XE_ENGINE_CLASS_COPY, flags,
276 extensions);
277 }
278 xe_vm_put(migrate_vm);
279
280 return q;
281 }
282 ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
283
xe_exec_queue_destroy(struct kref * ref)284 void xe_exec_queue_destroy(struct kref *ref)
285 {
286 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
287 struct xe_exec_queue *eq, *next;
288
289 if (xe_exec_queue_uses_pxp(q))
290 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
291
292 xe_exec_queue_last_fence_put_unlocked(q);
293 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
294 list_for_each_entry_safe(eq, next, &q->multi_gt_list,
295 multi_gt_link)
296 xe_exec_queue_put(eq);
297 }
298
299 q->ops->fini(q);
300 }
301
xe_exec_queue_fini(struct xe_exec_queue * q)302 void xe_exec_queue_fini(struct xe_exec_queue *q)
303 {
304 int i;
305
306 /*
307 * Before releasing our ref to lrc and xef, accumulate our run ticks
308 * and wakeup any waiters.
309 */
310 xe_exec_queue_update_run_ticks(q);
311 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
312 wake_up_var(&q->xef->exec_queue.pending_removal);
313
314 for (i = 0; i < q->width; ++i)
315 xe_lrc_put(q->lrc[i]);
316
317 __xe_exec_queue_free(q);
318 }
319
xe_exec_queue_assign_name(struct xe_exec_queue * q,u32 instance)320 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance)
321 {
322 switch (q->class) {
323 case XE_ENGINE_CLASS_RENDER:
324 snprintf(q->name, sizeof(q->name), "rcs%d", instance);
325 break;
326 case XE_ENGINE_CLASS_VIDEO_DECODE:
327 snprintf(q->name, sizeof(q->name), "vcs%d", instance);
328 break;
329 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
330 snprintf(q->name, sizeof(q->name), "vecs%d", instance);
331 break;
332 case XE_ENGINE_CLASS_COPY:
333 snprintf(q->name, sizeof(q->name), "bcs%d", instance);
334 break;
335 case XE_ENGINE_CLASS_COMPUTE:
336 snprintf(q->name, sizeof(q->name), "ccs%d", instance);
337 break;
338 case XE_ENGINE_CLASS_OTHER:
339 snprintf(q->name, sizeof(q->name), "gsccs%d", instance);
340 break;
341 default:
342 XE_WARN_ON(q->class);
343 }
344 }
345
xe_exec_queue_lookup(struct xe_file * xef,u32 id)346 struct xe_exec_queue *xe_exec_queue_lookup(struct xe_file *xef, u32 id)
347 {
348 struct xe_exec_queue *q;
349
350 mutex_lock(&xef->exec_queue.lock);
351 q = xa_load(&xef->exec_queue.xa, id);
352 if (q)
353 xe_exec_queue_get(q);
354 mutex_unlock(&xef->exec_queue.lock);
355
356 return q;
357 }
358
359 enum xe_exec_queue_priority
xe_exec_queue_device_get_max_priority(struct xe_device * xe)360 xe_exec_queue_device_get_max_priority(struct xe_device *xe)
361 {
362 return capable(CAP_SYS_NICE) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
363 XE_EXEC_QUEUE_PRIORITY_NORMAL;
364 }
365
exec_queue_set_priority(struct xe_device * xe,struct xe_exec_queue * q,u64 value)366 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q,
367 u64 value)
368 {
369 if (XE_IOCTL_DBG(xe, value > XE_EXEC_QUEUE_PRIORITY_HIGH))
370 return -EINVAL;
371
372 if (XE_IOCTL_DBG(xe, value > xe_exec_queue_device_get_max_priority(xe)))
373 return -EPERM;
374
375 q->sched_props.priority = value;
376 return 0;
377 }
378
xe_exec_queue_enforce_schedule_limit(void)379 static bool xe_exec_queue_enforce_schedule_limit(void)
380 {
381 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
382 return true;
383 #else
384 return !capable(CAP_SYS_NICE);
385 #endif
386 }
387
388 static void
xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf * eclass,enum xe_exec_queue_sched_prop prop,u32 * min,u32 * max)389 xe_exec_queue_get_prop_minmax(struct xe_hw_engine_class_intf *eclass,
390 enum xe_exec_queue_sched_prop prop,
391 u32 *min, u32 *max)
392 {
393 switch (prop) {
394 case XE_EXEC_QUEUE_JOB_TIMEOUT:
395 *min = eclass->sched_props.job_timeout_min;
396 *max = eclass->sched_props.job_timeout_max;
397 break;
398 case XE_EXEC_QUEUE_TIMESLICE:
399 *min = eclass->sched_props.timeslice_min;
400 *max = eclass->sched_props.timeslice_max;
401 break;
402 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
403 *min = eclass->sched_props.preempt_timeout_min;
404 *max = eclass->sched_props.preempt_timeout_max;
405 break;
406 default:
407 break;
408 }
409 #if IS_ENABLED(CONFIG_DRM_XE_ENABLE_SCHEDTIMEOUT_LIMIT)
410 if (capable(CAP_SYS_NICE)) {
411 switch (prop) {
412 case XE_EXEC_QUEUE_JOB_TIMEOUT:
413 *min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
414 *max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
415 break;
416 case XE_EXEC_QUEUE_TIMESLICE:
417 *min = XE_HW_ENGINE_TIMESLICE_MIN;
418 *max = XE_HW_ENGINE_TIMESLICE_MAX;
419 break;
420 case XE_EXEC_QUEUE_PREEMPT_TIMEOUT:
421 *min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
422 *max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
423 break;
424 default:
425 break;
426 }
427 }
428 #endif
429 }
430
exec_queue_set_timeslice(struct xe_device * xe,struct xe_exec_queue * q,u64 value)431 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q,
432 u64 value)
433 {
434 u32 min = 0, max = 0;
435
436 xe_exec_queue_get_prop_minmax(q->hwe->eclass,
437 XE_EXEC_QUEUE_TIMESLICE, &min, &max);
438
439 if (xe_exec_queue_enforce_schedule_limit() &&
440 !xe_hw_engine_timeout_in_range(value, min, max))
441 return -EINVAL;
442
443 q->sched_props.timeslice_us = value;
444 return 0;
445 }
446
447 static int
exec_queue_set_pxp_type(struct xe_device * xe,struct xe_exec_queue * q,u64 value)448 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value)
449 {
450 if (value == DRM_XE_PXP_TYPE_NONE)
451 return 0;
452
453 /* we only support HWDRM sessions right now */
454 if (XE_IOCTL_DBG(xe, value != DRM_XE_PXP_TYPE_HWDRM))
455 return -EINVAL;
456
457 if (!xe_pxp_is_enabled(xe->pxp))
458 return -ENODEV;
459
460 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM);
461 }
462
463 typedef int (*xe_exec_queue_set_property_fn)(struct xe_device *xe,
464 struct xe_exec_queue *q,
465 u64 value);
466
467 static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
468 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
469 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
470 [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE] = exec_queue_set_pxp_type,
471 };
472
exec_queue_user_ext_set_property(struct xe_device * xe,struct xe_exec_queue * q,u64 extension)473 static int exec_queue_user_ext_set_property(struct xe_device *xe,
474 struct xe_exec_queue *q,
475 u64 extension)
476 {
477 u64 __user *address = u64_to_user_ptr(extension);
478 struct drm_xe_ext_set_property ext;
479 int err;
480 u32 idx;
481
482 err = __copy_from_user(&ext, address, sizeof(ext));
483 if (XE_IOCTL_DBG(xe, err))
484 return -EFAULT;
485
486 if (XE_IOCTL_DBG(xe, ext.property >=
487 ARRAY_SIZE(exec_queue_set_property_funcs)) ||
488 XE_IOCTL_DBG(xe, ext.pad) ||
489 XE_IOCTL_DBG(xe, ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY &&
490 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE &&
491 ext.property != DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE))
492 return -EINVAL;
493
494 idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
495 if (!exec_queue_set_property_funcs[idx])
496 return -EINVAL;
497
498 return exec_queue_set_property_funcs[idx](xe, q, ext.value);
499 }
500
501 typedef int (*xe_exec_queue_user_extension_fn)(struct xe_device *xe,
502 struct xe_exec_queue *q,
503 u64 extension);
504
505 static const xe_exec_queue_user_extension_fn exec_queue_user_extension_funcs[] = {
506 [DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY] = exec_queue_user_ext_set_property,
507 };
508
509 #define MAX_USER_EXTENSIONS 16
exec_queue_user_extensions(struct xe_device * xe,struct xe_exec_queue * q,u64 extensions,int ext_number)510 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
511 u64 extensions, int ext_number)
512 {
513 u64 __user *address = u64_to_user_ptr(extensions);
514 struct drm_xe_user_extension ext;
515 int err;
516 u32 idx;
517
518 if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
519 return -E2BIG;
520
521 err = __copy_from_user(&ext, address, sizeof(ext));
522 if (XE_IOCTL_DBG(xe, err))
523 return -EFAULT;
524
525 if (XE_IOCTL_DBG(xe, ext.pad) ||
526 XE_IOCTL_DBG(xe, ext.name >=
527 ARRAY_SIZE(exec_queue_user_extension_funcs)))
528 return -EINVAL;
529
530 idx = array_index_nospec(ext.name,
531 ARRAY_SIZE(exec_queue_user_extension_funcs));
532 err = exec_queue_user_extension_funcs[idx](xe, q, extensions);
533 if (XE_IOCTL_DBG(xe, err))
534 return err;
535
536 if (ext.next_extension)
537 return exec_queue_user_extensions(xe, q, ext.next_extension,
538 ++ext_number);
539
540 return 0;
541 }
542
calc_validate_logical_mask(struct xe_device * xe,struct drm_xe_engine_class_instance * eci,u16 width,u16 num_placements)543 static u32 calc_validate_logical_mask(struct xe_device *xe,
544 struct drm_xe_engine_class_instance *eci,
545 u16 width, u16 num_placements)
546 {
547 int len = width * num_placements;
548 int i, j, n;
549 u16 class;
550 u16 gt_id;
551 u32 return_mask = 0, prev_mask;
552
553 if (XE_IOCTL_DBG(xe, !xe_device_uc_enabled(xe) &&
554 len > 1))
555 return 0;
556
557 for (i = 0; i < width; ++i) {
558 u32 current_mask = 0;
559
560 for (j = 0; j < num_placements; ++j) {
561 struct xe_hw_engine *hwe;
562
563 n = j * width + i;
564
565 hwe = xe_hw_engine_lookup(xe, eci[n]);
566 if (XE_IOCTL_DBG(xe, !hwe))
567 return 0;
568
569 if (XE_IOCTL_DBG(xe, xe_hw_engine_is_reserved(hwe)))
570 return 0;
571
572 if (XE_IOCTL_DBG(xe, n && eci[n].gt_id != gt_id) ||
573 XE_IOCTL_DBG(xe, n && eci[n].engine_class != class))
574 return 0;
575
576 class = eci[n].engine_class;
577 gt_id = eci[n].gt_id;
578
579 if (width == 1 || !i)
580 return_mask |= BIT(eci[n].engine_instance);
581 current_mask |= BIT(eci[n].engine_instance);
582 }
583
584 /* Parallel submissions must be logically contiguous */
585 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1))
586 return 0;
587
588 prev_mask = current_mask;
589 }
590
591 return return_mask;
592 }
593
xe_exec_queue_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)594 int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
595 struct drm_file *file)
596 {
597 struct xe_device *xe = to_xe_device(dev);
598 struct xe_file *xef = to_xe_file(file);
599 struct drm_xe_exec_queue_create *args = data;
600 struct drm_xe_engine_class_instance eci[XE_HW_ENGINE_MAX_INSTANCE];
601 struct drm_xe_engine_class_instance __user *user_eci =
602 u64_to_user_ptr(args->instances);
603 struct xe_hw_engine *hwe;
604 struct xe_vm *vm;
605 struct xe_tile *tile;
606 struct xe_exec_queue *q = NULL;
607 u32 logical_mask;
608 u32 flags = 0;
609 u32 id;
610 u32 len;
611 int err;
612
613 if (XE_IOCTL_DBG(xe, args->flags & ~DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) ||
614 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
615 return -EINVAL;
616
617 len = args->width * args->num_placements;
618 if (XE_IOCTL_DBG(xe, !len || len > XE_HW_ENGINE_MAX_INSTANCE))
619 return -EINVAL;
620
621 err = __copy_from_user(eci, user_eci,
622 sizeof(struct drm_xe_engine_class_instance) *
623 len);
624 if (XE_IOCTL_DBG(xe, err))
625 return -EFAULT;
626
627 if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count))
628 return -EINVAL;
629
630 if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT)
631 flags |= EXEC_QUEUE_FLAG_LOW_LATENCY;
632
633 if (eci[0].engine_class == DRM_XE_ENGINE_CLASS_VM_BIND) {
634 if (XE_IOCTL_DBG(xe, args->width != 1) ||
635 XE_IOCTL_DBG(xe, args->num_placements != 1) ||
636 XE_IOCTL_DBG(xe, eci[0].engine_instance != 0))
637 return -EINVAL;
638
639 for_each_tile(tile, xe, id) {
640 struct xe_exec_queue *new;
641
642 flags |= EXEC_QUEUE_FLAG_VM;
643 if (id)
644 flags |= EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD;
645
646 new = xe_exec_queue_create_bind(xe, tile, flags,
647 args->extensions);
648 if (IS_ERR(new)) {
649 err = PTR_ERR(new);
650 if (q)
651 goto put_exec_queue;
652 return err;
653 }
654 if (id == 0)
655 q = new;
656 else
657 list_add_tail(&new->multi_gt_list,
658 &q->multi_gt_link);
659 }
660 } else {
661 logical_mask = calc_validate_logical_mask(xe, eci,
662 args->width,
663 args->num_placements);
664 if (XE_IOCTL_DBG(xe, !logical_mask))
665 return -EINVAL;
666
667 hwe = xe_hw_engine_lookup(xe, eci[0]);
668 if (XE_IOCTL_DBG(xe, !hwe))
669 return -EINVAL;
670
671 vm = xe_vm_lookup(xef, args->vm_id);
672 if (XE_IOCTL_DBG(xe, !vm))
673 return -ENOENT;
674
675 err = down_read_interruptible(&vm->lock);
676 if (err) {
677 xe_vm_put(vm);
678 return err;
679 }
680
681 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
682 up_read(&vm->lock);
683 xe_vm_put(vm);
684 return -ENOENT;
685 }
686
687 q = xe_exec_queue_create(xe, vm, logical_mask,
688 args->width, hwe, flags,
689 args->extensions);
690 up_read(&vm->lock);
691 xe_vm_put(vm);
692 if (IS_ERR(q))
693 return PTR_ERR(q);
694
695 if (xe_vm_in_preempt_fence_mode(vm)) {
696 q->lr.context = dma_fence_context_alloc(1);
697
698 err = xe_vm_add_compute_exec_queue(vm, q);
699 if (XE_IOCTL_DBG(xe, err))
700 goto put_exec_queue;
701 }
702
703 if (q->vm && q->hwe->hw_engine_group) {
704 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q);
705 if (err)
706 goto put_exec_queue;
707 }
708 }
709
710 q->xef = xe_file_get(xef);
711
712 /* user id alloc must always be last in ioctl to prevent UAF */
713 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
714 if (err)
715 goto kill_exec_queue;
716
717 args->exec_queue_id = id;
718
719 return 0;
720
721 kill_exec_queue:
722 xe_exec_queue_kill(q);
723 put_exec_queue:
724 xe_exec_queue_put(q);
725 return err;
726 }
727
xe_exec_queue_get_property_ioctl(struct drm_device * dev,void * data,struct drm_file * file)728 int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
729 struct drm_file *file)
730 {
731 struct xe_device *xe = to_xe_device(dev);
732 struct xe_file *xef = to_xe_file(file);
733 struct drm_xe_exec_queue_get_property *args = data;
734 struct xe_exec_queue *q;
735 int ret;
736
737 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
738 return -EINVAL;
739
740 q = xe_exec_queue_lookup(xef, args->exec_queue_id);
741 if (XE_IOCTL_DBG(xe, !q))
742 return -ENOENT;
743
744 switch (args->property) {
745 case DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN:
746 args->value = q->ops->reset_status(q);
747 ret = 0;
748 break;
749 default:
750 ret = -EINVAL;
751 }
752
753 xe_exec_queue_put(q);
754
755 return ret;
756 }
757
758 /**
759 * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
760 * @q: The exec_queue
761 *
762 * Return: True if the exec_queue is long-running, false otherwise.
763 */
xe_exec_queue_is_lr(struct xe_exec_queue * q)764 bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
765 {
766 return q->vm && xe_vm_in_lr_mode(q->vm) &&
767 !(q->flags & EXEC_QUEUE_FLAG_VM);
768 }
769
xe_exec_queue_num_job_inflight(struct xe_exec_queue * q)770 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
771 {
772 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
773 }
774
775 /**
776 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
777 * @q: The exec_queue
778 *
779 * Return: True if the exec_queue's ring is full, false otherwise.
780 */
xe_exec_queue_ring_full(struct xe_exec_queue * q)781 bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
782 {
783 struct xe_lrc *lrc = q->lrc[0];
784 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
785
786 return xe_exec_queue_num_job_inflight(q) >= max_job;
787 }
788
789 /**
790 * xe_exec_queue_is_idle() - Whether an exec_queue is idle.
791 * @q: The exec_queue
792 *
793 * FIXME: Need to determine what to use as the short-lived
794 * timeline lock for the exec_queues, so that the return value
795 * of this function becomes more than just an advisory
796 * snapshot in time. The timeline lock must protect the
797 * seqno from racing submissions on the same exec_queue.
798 * Typically vm->resv, but user-created timeline locks use the migrate vm
799 * and never grabs the migrate vm->resv so we have a race there.
800 *
801 * Return: True if the exec_queue is idle, false otherwise.
802 */
xe_exec_queue_is_idle(struct xe_exec_queue * q)803 bool xe_exec_queue_is_idle(struct xe_exec_queue *q)
804 {
805 if (xe_exec_queue_is_parallel(q)) {
806 int i;
807
808 for (i = 0; i < q->width; ++i) {
809 if (xe_lrc_seqno(q->lrc[i]) !=
810 q->lrc[i]->fence_ctx.next_seqno - 1)
811 return false;
812 }
813
814 return true;
815 }
816
817 return xe_lrc_seqno(q->lrc[0]) ==
818 q->lrc[0]->fence_ctx.next_seqno - 1;
819 }
820
821 /**
822 * xe_exec_queue_update_run_ticks() - Update run time in ticks for this exec queue
823 * from hw
824 * @q: The exec queue
825 *
826 * Update the timestamp saved by HW for this exec queue and save run ticks
827 * calculated by using the delta from last update.
828 */
xe_exec_queue_update_run_ticks(struct xe_exec_queue * q)829 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q)
830 {
831 struct xe_device *xe = gt_to_xe(q->gt);
832 struct xe_lrc *lrc;
833 u64 old_ts, new_ts;
834 int idx;
835
836 /*
837 * Jobs that are executed by kernel doesn't have a corresponding xe_file
838 * and thus are not accounted.
839 */
840 if (!q->xef)
841 return;
842
843 /* Synchronize with unbind while holding the xe file open */
844 if (!drm_dev_enter(&xe->drm, &idx))
845 return;
846 /*
847 * Only sample the first LRC. For parallel submission, all of them are
848 * scheduled together and we compensate that below by multiplying by
849 * width - this may introduce errors if that premise is not true and
850 * they don't exit 100% aligned. On the other hand, looping through
851 * the LRCs and reading them in different time could also introduce
852 * errors.
853 */
854 lrc = q->lrc[0];
855 new_ts = xe_lrc_update_timestamp(lrc, &old_ts);
856 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width;
857
858 drm_dev_exit(idx);
859 }
860
861 /**
862 * xe_exec_queue_kill - permanently stop all execution from an exec queue
863 * @q: The exec queue
864 *
865 * This function permanently stops all activity on an exec queue. If the queue
866 * is actively executing on the HW, it will be kicked off the engine; any
867 * pending jobs are discarded and all future submissions are rejected.
868 * This function is safe to call multiple times.
869 */
xe_exec_queue_kill(struct xe_exec_queue * q)870 void xe_exec_queue_kill(struct xe_exec_queue *q)
871 {
872 struct xe_exec_queue *eq = q, *next;
873
874 list_for_each_entry_safe(eq, next, &eq->multi_gt_list,
875 multi_gt_link) {
876 q->ops->kill(eq);
877 xe_vm_remove_compute_exec_queue(q->vm, eq);
878 }
879
880 q->ops->kill(q);
881 xe_vm_remove_compute_exec_queue(q->vm, q);
882 }
883
xe_exec_queue_destroy_ioctl(struct drm_device * dev,void * data,struct drm_file * file)884 int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
885 struct drm_file *file)
886 {
887 struct xe_device *xe = to_xe_device(dev);
888 struct xe_file *xef = to_xe_file(file);
889 struct drm_xe_exec_queue_destroy *args = data;
890 struct xe_exec_queue *q;
891
892 if (XE_IOCTL_DBG(xe, args->pad) ||
893 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
894 return -EINVAL;
895
896 mutex_lock(&xef->exec_queue.lock);
897 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id);
898 if (q)
899 atomic_inc(&xef->exec_queue.pending_removal);
900 mutex_unlock(&xef->exec_queue.lock);
901
902 if (XE_IOCTL_DBG(xe, !q))
903 return -ENOENT;
904
905 if (q->vm && q->hwe->hw_engine_group)
906 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
907
908 xe_exec_queue_kill(q);
909
910 trace_xe_exec_queue_close(q);
911 xe_exec_queue_put(q);
912
913 return 0;
914 }
915
xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue * q,struct xe_vm * vm)916 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
917 struct xe_vm *vm)
918 {
919 if (q->flags & EXEC_QUEUE_FLAG_VM) {
920 lockdep_assert_held(&vm->lock);
921 } else {
922 xe_vm_assert_held(vm);
923 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem);
924 }
925 }
926
927 /**
928 * xe_exec_queue_last_fence_put() - Drop ref to last fence
929 * @q: The exec queue
930 * @vm: The VM the engine does a bind or exec for
931 */
xe_exec_queue_last_fence_put(struct xe_exec_queue * q,struct xe_vm * vm)932 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm)
933 {
934 xe_exec_queue_last_fence_lockdep_assert(q, vm);
935
936 xe_exec_queue_last_fence_put_unlocked(q);
937 }
938
939 /**
940 * xe_exec_queue_last_fence_put_unlocked() - Drop ref to last fence unlocked
941 * @q: The exec queue
942 *
943 * Only safe to be called from xe_exec_queue_destroy().
944 */
xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue * q)945 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q)
946 {
947 if (q->last_fence) {
948 dma_fence_put(q->last_fence);
949 q->last_fence = NULL;
950 }
951 }
952
953 /**
954 * xe_exec_queue_last_fence_get() - Get last fence
955 * @q: The exec queue
956 * @vm: The VM the engine does a bind or exec for
957 *
958 * Get last fence, takes a ref
959 *
960 * Returns: last fence if not signaled, dma fence stub if signaled
961 */
xe_exec_queue_last_fence_get(struct xe_exec_queue * q,struct xe_vm * vm)962 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q,
963 struct xe_vm *vm)
964 {
965 struct dma_fence *fence;
966
967 xe_exec_queue_last_fence_lockdep_assert(q, vm);
968
969 if (q->last_fence &&
970 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
971 xe_exec_queue_last_fence_put(q, vm);
972
973 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
974 dma_fence_get(fence);
975 return fence;
976 }
977
978 /**
979 * xe_exec_queue_last_fence_get_for_resume() - Get last fence
980 * @q: The exec queue
981 * @vm: The VM the engine does a bind or exec for
982 *
983 * Get last fence, takes a ref. Only safe to be called in the context of
984 * resuming the hw engine group's long-running exec queue, when the group
985 * semaphore is held.
986 *
987 * Returns: last fence if not signaled, dma fence stub if signaled
988 */
xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue * q,struct xe_vm * vm)989 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q,
990 struct xe_vm *vm)
991 {
992 struct dma_fence *fence;
993
994 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem);
995
996 if (q->last_fence &&
997 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags))
998 xe_exec_queue_last_fence_put_unlocked(q);
999
1000 fence = q->last_fence ? q->last_fence : dma_fence_get_stub();
1001 dma_fence_get(fence);
1002 return fence;
1003 }
1004
1005 /**
1006 * xe_exec_queue_last_fence_set() - Set last fence
1007 * @q: The exec queue
1008 * @vm: The VM the engine does a bind or exec for
1009 * @fence: The fence
1010 *
1011 * Set the last fence for the engine. Increases reference count for fence, when
1012 * closing engine xe_exec_queue_last_fence_put should be called.
1013 */
xe_exec_queue_last_fence_set(struct xe_exec_queue * q,struct xe_vm * vm,struct dma_fence * fence)1014 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
1015 struct dma_fence *fence)
1016 {
1017 xe_exec_queue_last_fence_lockdep_assert(q, vm);
1018
1019 xe_exec_queue_last_fence_put(q, vm);
1020 q->last_fence = dma_fence_get(fence);
1021 }
1022
1023 /**
1024 * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
1025 * @q: The exec queue
1026 * @vm: The VM the engine does a bind or exec for
1027 *
1028 * Returns:
1029 * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
1030 */
xe_exec_queue_last_fence_test_dep(struct xe_exec_queue * q,struct xe_vm * vm)1031 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
1032 {
1033 struct dma_fence *fence;
1034 int err = 0;
1035
1036 fence = xe_exec_queue_last_fence_get(q, vm);
1037 if (fence) {
1038 err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
1039 0 : -ETIME;
1040 dma_fence_put(fence);
1041 }
1042
1043 return err;
1044 }
1045