Lines Matching +full:render +full:- +full:max
2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
13 * supports contexts for the render ring.
19 * would happen if a client ran and piggy-backed off another clients GPU state.
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
31 * The context life cycle is semi-complicated in that context BOs may live
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
89 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
108 mutex_lock(&ctx->lut_mutex); in lut_close()
110 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { in lut_close()
112 struct drm_i915_gem_object *obj = vma->obj; in lut_close()
115 if (!kref_get_unless_zero(&obj->base.refcount)) in lut_close()
118 spin_lock(&obj->lut_lock); in lut_close()
119 list_for_each_entry(lut, &obj->lut_list, obj_link) { in lut_close()
120 if (lut->ctx != ctx) in lut_close()
123 if (lut->handle != iter.index) in lut_close()
126 list_del(&lut->obj_link); in lut_close()
129 spin_unlock(&obj->lut_lock); in lut_close()
131 if (&lut->obj_link != &obj->lut_list) { in lut_close()
133 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); in lut_close()
141 mutex_unlock(&ctx->lut_mutex); in lut_close()
153 return ERR_PTR(-EINVAL); in lookup_user_engine()
158 engine = intel_engine_lookup_user(ctx->i915, in lookup_user_engine()
159 ci->engine_class, in lookup_user_engine()
160 ci->engine_instance); in lookup_user_engine()
162 return ERR_PTR(-EINVAL); in lookup_user_engine()
164 idx = engine->legacy_idx; in lookup_user_engine()
166 idx = ci->engine_instance; in lookup_user_engine()
175 s64 priority = args->value; in validate_priority()
177 if (args->size) in validate_priority()
178 return -EINVAL; in validate_priority()
180 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) in validate_priority()
181 return -ENODEV; in validate_priority()
185 return -EINVAL; in validate_priority()
189 return -EPERM; in validate_priority()
199 if (pc->pxp_wakeref) in proto_context_close()
200 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); in proto_context_close()
201 if (pc->vm) in proto_context_close()
202 i915_vm_put(pc->vm); in proto_context_close()
203 if (pc->user_engines) { in proto_context_close()
204 for (i = 0; i < pc->num_user_engines; i++) in proto_context_close()
205 kfree(pc->user_engines[i].siblings); in proto_context_close()
206 kfree(pc->user_engines); in proto_context_close()
217 * Only contexts that are short-lived [that will expire or be in proto_context_set_persistence()
221 if (!i915->params.enable_hangcheck) in proto_context_set_persistence()
222 return -EINVAL; in proto_context_set_persistence()
224 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); in proto_context_set_persistence()
226 /* To cancel a context we use "preempt-to-idle" */ in proto_context_set_persistence()
227 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) in proto_context_set_persistence()
228 return -ENODEV; in proto_context_set_persistence()
233 * If the per-engine reset fails, all hope is lost! We resort in proto_context_set_persistence()
244 return -ENODEV; in proto_context_set_persistence()
246 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE); in proto_context_set_persistence()
259 pc->uses_protected_content = false; in proto_context_set_protected()
260 } else if (!intel_pxp_is_enabled(i915->pxp)) { in proto_context_set_protected()
261 ret = -ENODEV; in proto_context_set_protected()
262 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) || in proto_context_set_protected()
263 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) { in proto_context_set_protected()
264 ret = -EPERM; in proto_context_set_protected()
266 pc->uses_protected_content = true; in proto_context_set_protected()
272 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); in proto_context_set_protected()
274 if (!intel_pxp_is_active(i915->pxp)) in proto_context_set_protected()
275 ret = intel_pxp_start(i915->pxp); in proto_context_set_protected()
288 return ERR_PTR(-ENOMEM); in proto_context_create()
290 pc->num_user_engines = -1; in proto_context_create()
291 pc->user_engines = NULL; in proto_context_create()
292 pc->user_flags = BIT(UCONTEXT_BANNABLE) | in proto_context_create()
294 if (i915->params.enable_hangcheck) in proto_context_create()
295 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE); in proto_context_create()
296 pc->sched.priority = I915_PRIORITY_NORMAL; in proto_context_create()
300 err = ERR_PTR(-EINVAL); in proto_context_create()
303 pc->single_timeline = true; in proto_context_create()
320 lockdep_assert_held(&fpriv->proto_context_lock); in proto_context_register_locked()
322 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL); in proto_context_register_locked()
326 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL); in proto_context_register_locked()
328 xa_erase(&fpriv->context_xa, *id); in proto_context_register_locked()
342 mutex_lock(&fpriv->proto_context_lock); in proto_context_register()
344 mutex_unlock(&fpriv->proto_context_lock); in proto_context_register()
354 xa_lock(&file_priv->vm_xa); in i915_gem_vm_lookup()
355 vm = xa_load(&file_priv->vm_xa, id); in i915_gem_vm_lookup()
357 kref_get(&vm->ref); in i915_gem_vm_lookup()
358 xa_unlock(&file_priv->vm_xa); in i915_gem_vm_lookup()
367 struct drm_i915_private *i915 = fpriv->i915; in set_proto_ctx_vm()
370 if (args->size) in set_proto_ctx_vm()
371 return -EINVAL; in set_proto_ctx_vm()
374 return -ENODEV; in set_proto_ctx_vm()
376 if (upper_32_bits(args->value)) in set_proto_ctx_vm()
377 return -ENOENT; in set_proto_ctx_vm()
379 vm = i915_gem_vm_lookup(fpriv, args->value); in set_proto_ctx_vm()
381 return -ENOENT; in set_proto_ctx_vm()
383 if (pc->vm) in set_proto_ctx_vm()
384 i915_vm_put(pc->vm); in set_proto_ctx_vm()
385 pc->vm = vm; in set_proto_ctx_vm()
403 struct drm_i915_private *i915 = set->i915; in set_proto_ctx_engines_balance()
410 return -ENODEV; in set_proto_ctx_engines_balance()
412 if (get_user(idx, &ext->engine_index)) in set_proto_ctx_engines_balance()
413 return -EFAULT; in set_proto_ctx_engines_balance()
415 if (idx >= set->num_engines) { in set_proto_ctx_engines_balance()
416 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", in set_proto_ctx_engines_balance()
417 idx, set->num_engines); in set_proto_ctx_engines_balance()
418 return -EINVAL; in set_proto_ctx_engines_balance()
421 idx = array_index_nospec(idx, set->num_engines); in set_proto_ctx_engines_balance()
422 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) { in set_proto_ctx_engines_balance()
423 drm_dbg(&i915->drm, in set_proto_ctx_engines_balance()
425 return -EEXIST; in set_proto_ctx_engines_balance()
428 if (get_user(num_siblings, &ext->num_siblings)) in set_proto_ctx_engines_balance()
429 return -EFAULT; in set_proto_ctx_engines_balance()
431 err = check_user_mbz(&ext->flags); in set_proto_ctx_engines_balance()
435 err = check_user_mbz(&ext->mbz64); in set_proto_ctx_engines_balance()
444 return -ENOMEM; in set_proto_ctx_engines_balance()
449 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { in set_proto_ctx_engines_balance()
450 err = -EFAULT; in set_proto_ctx_engines_balance()
458 drm_dbg(&i915->drm, in set_proto_ctx_engines_balance()
461 err = -EINVAL; in set_proto_ctx_engines_balance()
467 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL; in set_proto_ctx_engines_balance()
468 set->engines[idx].engine = siblings[0]; in set_proto_ctx_engines_balance()
471 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED; in set_proto_ctx_engines_balance()
472 set->engines[idx].num_siblings = num_siblings; in set_proto_ctx_engines_balance()
473 set->engines[idx].siblings = siblings; in set_proto_ctx_engines_balance()
490 struct drm_i915_private *i915 = set->i915; in set_proto_ctx_engines_bond()
498 drm_dbg(&i915->drm, in set_proto_ctx_engines_bond()
500 return -ENODEV; in set_proto_ctx_engines_bond()
503 if (get_user(idx, &ext->virtual_index)) in set_proto_ctx_engines_bond()
504 return -EFAULT; in set_proto_ctx_engines_bond()
506 if (idx >= set->num_engines) { in set_proto_ctx_engines_bond()
507 drm_dbg(&i915->drm, in set_proto_ctx_engines_bond()
509 idx, set->num_engines); in set_proto_ctx_engines_bond()
510 return -EINVAL; in set_proto_ctx_engines_bond()
513 idx = array_index_nospec(idx, set->num_engines); in set_proto_ctx_engines_bond()
514 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) { in set_proto_ctx_engines_bond()
515 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx); in set_proto_ctx_engines_bond()
516 return -EINVAL; in set_proto_ctx_engines_bond()
519 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) { in set_proto_ctx_engines_bond()
520 drm_dbg(&i915->drm, in set_proto_ctx_engines_bond()
522 return -EINVAL; in set_proto_ctx_engines_bond()
525 err = check_user_mbz(&ext->flags); in set_proto_ctx_engines_bond()
529 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { in set_proto_ctx_engines_bond()
530 err = check_user_mbz(&ext->mbz64[n]); in set_proto_ctx_engines_bond()
535 if (copy_from_user(&ci, &ext->master, sizeof(ci))) in set_proto_ctx_engines_bond()
536 return -EFAULT; in set_proto_ctx_engines_bond()
542 drm_dbg(&i915->drm, in set_proto_ctx_engines_bond()
545 return -EINVAL; in set_proto_ctx_engines_bond()
549 drm_dbg(&i915->drm, "bonding extension not supported with GuC submission"); in set_proto_ctx_engines_bond()
550 return -ENODEV; in set_proto_ctx_engines_bond()
553 if (get_user(num_bonds, &ext->num_bonds)) in set_proto_ctx_engines_bond()
554 return -EFAULT; in set_proto_ctx_engines_bond()
559 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) in set_proto_ctx_engines_bond()
560 return -EFAULT; in set_proto_ctx_engines_bond()
566 drm_dbg(&i915->drm, in set_proto_ctx_engines_bond()
569 return -EINVAL; in set_proto_ctx_engines_bond()
583 struct drm_i915_private *i915 = set->i915; in set_proto_ctx_engines_parallel_submit()
591 if (get_user(slot, &ext->engine_index)) in set_proto_ctx_engines_parallel_submit()
592 return -EFAULT; in set_proto_ctx_engines_parallel_submit()
594 if (get_user(width, &ext->width)) in set_proto_ctx_engines_parallel_submit()
595 return -EFAULT; in set_proto_ctx_engines_parallel_submit()
597 if (get_user(num_siblings, &ext->num_siblings)) in set_proto_ctx_engines_parallel_submit()
598 return -EFAULT; in set_proto_ctx_engines_parallel_submit()
600 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) && in set_proto_ctx_engines_parallel_submit()
602 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n", in set_proto_ctx_engines_parallel_submit()
604 return -EINVAL; in set_proto_ctx_engines_parallel_submit()
607 if (slot >= set->num_engines) { in set_proto_ctx_engines_parallel_submit()
608 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n", in set_proto_ctx_engines_parallel_submit()
609 slot, set->num_engines); in set_proto_ctx_engines_parallel_submit()
610 return -EINVAL; in set_proto_ctx_engines_parallel_submit()
613 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) { in set_proto_ctx_engines_parallel_submit()
614 drm_dbg(&i915->drm, in set_proto_ctx_engines_parallel_submit()
616 return -EINVAL; in set_proto_ctx_engines_parallel_submit()
619 if (get_user(flags, &ext->flags)) in set_proto_ctx_engines_parallel_submit()
620 return -EFAULT; in set_proto_ctx_engines_parallel_submit()
623 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags); in set_proto_ctx_engines_parallel_submit()
624 return -EINVAL; in set_proto_ctx_engines_parallel_submit()
627 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { in set_proto_ctx_engines_parallel_submit()
628 err = check_user_mbz(&ext->mbz64[n]); in set_proto_ctx_engines_parallel_submit()
634 drm_dbg(&i915->drm, "Width (%d) < 2\n", width); in set_proto_ctx_engines_parallel_submit()
635 return -EINVAL; in set_proto_ctx_engines_parallel_submit()
639 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n", in set_proto_ctx_engines_parallel_submit()
641 return -EINVAL; in set_proto_ctx_engines_parallel_submit()
648 return -ENOMEM; in set_proto_ctx_engines_parallel_submit()
658 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { in set_proto_ctx_engines_parallel_submit()
659 err = -EFAULT; in set_proto_ctx_engines_parallel_submit()
667 drm_dbg(&i915->drm, in set_proto_ctx_engines_parallel_submit()
670 err = -EINVAL; in set_proto_ctx_engines_parallel_submit()
678 if (siblings[n]->class == RENDER_CLASS || in set_proto_ctx_engines_parallel_submit()
679 siblings[n]->class == COMPUTE_CLASS) { in set_proto_ctx_engines_parallel_submit()
680 err = -EINVAL; in set_proto_ctx_engines_parallel_submit()
687 drm_dbg(&i915->drm, in set_proto_ctx_engines_parallel_submit()
691 err = -EINVAL; in set_proto_ctx_engines_parallel_submit()
697 current_mask |= siblings[n]->logical_mask; in set_proto_ctx_engines_parallel_submit()
702 drm_dbg(&i915->drm, in set_proto_ctx_engines_parallel_submit()
705 err = -EINVAL; in set_proto_ctx_engines_parallel_submit()
712 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL; in set_proto_ctx_engines_parallel_submit()
713 set->engines[slot].num_siblings = num_siblings; in set_proto_ctx_engines_parallel_submit()
714 set->engines[slot].width = width; in set_proto_ctx_engines_parallel_submit()
715 set->engines[slot].siblings = siblings; in set_proto_ctx_engines_parallel_submit()
736 struct drm_i915_private *i915 = fpriv->i915; in set_proto_ctx_engines()
739 u64_to_user_ptr(args->value); in set_proto_ctx_engines()
744 if (pc->num_user_engines >= 0) { in set_proto_ctx_engines()
745 drm_dbg(&i915->drm, "Cannot set engines twice"); in set_proto_ctx_engines()
746 return -EINVAL; in set_proto_ctx_engines()
749 if (args->size < sizeof(*user) || in set_proto_ctx_engines()
750 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) { in set_proto_ctx_engines()
751 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n", in set_proto_ctx_engines()
752 args->size); in set_proto_ctx_engines()
753 return -EINVAL; in set_proto_ctx_engines()
756 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); in set_proto_ctx_engines()
759 return -EINVAL; in set_proto_ctx_engines()
763 return -ENOMEM; in set_proto_ctx_engines()
769 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { in set_proto_ctx_engines()
771 return -EFAULT; in set_proto_ctx_engines()
784 drm_dbg(&i915->drm, in set_proto_ctx_engines()
788 return -ENOENT; in set_proto_ctx_engines()
795 err = -EFAULT; in set_proto_ctx_engines()
796 if (!get_user(extensions, &user->extensions)) in set_proto_ctx_engines()
806 pc->num_user_engines = set.num_engines; in set_proto_ctx_engines()
807 pc->user_engines = set.engines; in set_proto_ctx_engines()
816 struct drm_i915_private *i915 = fpriv->i915; in set_proto_ctx_sseu()
821 if (args->size < sizeof(user_sseu)) in set_proto_ctx_sseu()
822 return -EINVAL; in set_proto_ctx_sseu()
825 return -ENODEV; in set_proto_ctx_sseu()
827 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), in set_proto_ctx_sseu()
829 return -EFAULT; in set_proto_ctx_sseu()
832 return -EINVAL; in set_proto_ctx_sseu()
835 return -EINVAL; in set_proto_ctx_sseu()
837 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) in set_proto_ctx_sseu()
838 return -EINVAL; in set_proto_ctx_sseu()
840 if (pc->num_user_engines >= 0) { in set_proto_ctx_sseu()
844 if (idx >= pc->num_user_engines) in set_proto_ctx_sseu()
845 return -EINVAL; in set_proto_ctx_sseu()
847 pe = &pc->user_engines[idx]; in set_proto_ctx_sseu()
849 /* Only render engine supports RPCS configuration. */ in set_proto_ctx_sseu()
850 if (pe->engine->class != RENDER_CLASS) in set_proto_ctx_sseu()
851 return -EINVAL; in set_proto_ctx_sseu()
853 sseu = &pe->sseu; in set_proto_ctx_sseu()
855 /* Only render engine supports RPCS configuration. */ in set_proto_ctx_sseu()
857 return -EINVAL; in set_proto_ctx_sseu()
859 /* There is only one render engine */ in set_proto_ctx_sseu()
861 return -EINVAL; in set_proto_ctx_sseu()
863 sseu = &pc->legacy_rcs_sseu; in set_proto_ctx_sseu()
870 args->size = sizeof(user_sseu); in set_proto_ctx_sseu()
881 switch (args->param) { in set_proto_ctx_param()
883 if (args->size) in set_proto_ctx_param()
884 ret = -EINVAL; in set_proto_ctx_param()
885 else if (args->value) in set_proto_ctx_param()
886 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); in set_proto_ctx_param()
888 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); in set_proto_ctx_param()
892 if (args->size) in set_proto_ctx_param()
893 ret = -EINVAL; in set_proto_ctx_param()
894 else if (!capable(CAP_SYS_ADMIN) && !args->value) in set_proto_ctx_param()
895 ret = -EPERM; in set_proto_ctx_param()
896 else if (args->value) in set_proto_ctx_param()
897 pc->user_flags |= BIT(UCONTEXT_BANNABLE); in set_proto_ctx_param()
898 else if (pc->uses_protected_content) in set_proto_ctx_param()
899 ret = -EPERM; in set_proto_ctx_param()
901 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); in set_proto_ctx_param()
905 if (args->size) in set_proto_ctx_param()
906 ret = -EINVAL; in set_proto_ctx_param()
907 else if (!args->value) in set_proto_ctx_param()
908 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); in set_proto_ctx_param()
909 else if (pc->uses_protected_content) in set_proto_ctx_param()
910 ret = -EPERM; in set_proto_ctx_param()
912 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); in set_proto_ctx_param()
916 ret = validate_priority(fpriv->i915, args); in set_proto_ctx_param()
918 pc->sched.priority = args->value; in set_proto_ctx_param()
934 if (args->size) in set_proto_ctx_param()
935 ret = -EINVAL; in set_proto_ctx_param()
937 ret = proto_context_set_persistence(fpriv->i915, pc, in set_proto_ctx_param()
938 args->value); in set_proto_ctx_param()
942 ret = proto_context_set_protected(fpriv->i915, pc, in set_proto_ctx_param()
943 args->value); in set_proto_ctx_param()
950 ret = -EINVAL; in set_proto_ctx_param()
963 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); in intel_context_set_gem()
964 RCU_INIT_POINTER(ce->gem_context, ctx); in intel_context_set_gem()
968 if (ce->engine->class == COMPUTE_CLASS) in intel_context_set_gem()
969 ce->ring_size = SZ_512K; in intel_context_set_gem()
971 ce->ring_size = SZ_16K; in intel_context_set_gem()
973 i915_vm_put(ce->vm); in intel_context_set_gem()
974 ce->vm = i915_gem_context_get_eb_vm(ctx); in intel_context_set_gem()
976 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && in intel_context_set_gem()
977 intel_engine_has_timeslices(ce->engine) && in intel_context_set_gem()
978 intel_engine_has_semaphores(ce->engine)) in intel_context_set_gem()
979 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); in intel_context_set_gem()
982 ctx->i915->params.request_timeout_ms) { in intel_context_set_gem()
983 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms; in intel_context_set_gem()
989 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS)) in intel_context_set_gem()
997 while (count--) { in __unpin_engines()
998 struct intel_context *ce = e->engines[count], *child; in __unpin_engines()
1000 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags)) in __unpin_engines()
1011 __unpin_engines(e, e->num_engines); in unpin_engines()
1016 while (count--) { in __free_engines()
1017 if (!e->engines[count]) in __free_engines()
1020 intel_context_put(e->engines[count]); in __free_engines()
1027 __free_engines(e, e->num_engines); in free_engines()
1035 i915_sw_fence_fini(&engines->fence); in free_engines_rcu()
1050 unsigned int class = ce->engine->uabi_class; in accumulate_runtime()
1052 GEM_BUG_ON(class >= ARRAY_SIZE(client->past_runtime)); in accumulate_runtime()
1054 &client->past_runtime[class]); in accumulate_runtime()
1063 struct i915_gem_context *ctx = engines->ctx; in engines_notify()
1067 if (!list_empty(&engines->link)) { in engines_notify()
1070 spin_lock_irqsave(&ctx->stale.lock, flags); in engines_notify()
1071 list_del(&engines->link); in engines_notify()
1072 spin_unlock_irqrestore(&ctx->stale.lock, flags); in engines_notify()
1074 accumulate_runtime(ctx->client, engines); in engines_notify()
1080 init_rcu_head(&engines->rcu); in engines_notify()
1081 call_rcu(&engines->rcu, free_engines_rcu); in engines_notify()
1096 i915_sw_fence_init(&e->fence, engines_notify); in alloc_engines()
1103 const unsigned int max = I915_NUM_ENGINES; in default_engines() local
1107 e = alloc_engines(max); in default_engines()
1109 return ERR_PTR(-ENOMEM); in default_engines()
1111 for_each_uabi_engine(engine, ctx->i915) { in default_engines()
1116 if (engine->legacy_idx == INVALID_ENGINE) in default_engines()
1119 GEM_BUG_ON(engine->legacy_idx >= max); in default_engines()
1120 GEM_BUG_ON(e->engines[engine->legacy_idx]); in default_engines()
1128 e->engines[engine->legacy_idx] = ce; in default_engines()
1129 e->num_engines = max(e->num_engines, engine->legacy_idx + 1); in default_engines()
1131 if (engine->class == RENDER_CLASS) in default_engines()
1167 set_bit(CONTEXT_PERMA_PIN, &ce->flags); in perma_pin_contexts()
1192 return ERR_PTR(-ENOMEM); in user_engines()
1193 e->num_engines = num_engines; in user_engines()
1226 e->engines[n] = ce; in user_engines()
1228 ret = intel_context_set_gem(ce, ctx, pe->sseu); in user_engines()
1234 ret = intel_context_set_gem(child, ctx, pe->sseu); in user_engines()
1247 * could move the perma-pin step to the backend function in user_engines()
1275 spin_lock(&ctx->i915->gem.contexts.lock); in i915_gem_context_release_work()
1276 list_del(&ctx->link); in i915_gem_context_release_work()
1277 spin_unlock(&ctx->i915->gem.contexts.lock); in i915_gem_context_release_work()
1279 if (ctx->syncobj) in i915_gem_context_release_work()
1280 drm_syncobj_put(ctx->syncobj); in i915_gem_context_release_work()
1282 vm = ctx->vm; in i915_gem_context_release_work()
1286 if (ctx->pxp_wakeref) in i915_gem_context_release_work()
1287 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref); in i915_gem_context_release_work()
1289 if (ctx->client) in i915_gem_context_release_work()
1290 i915_drm_client_put(ctx->client); in i915_gem_context_release_work()
1292 mutex_destroy(&ctx->engines_mutex); in i915_gem_context_release_work()
1293 mutex_destroy(&ctx->lut_mutex); in i915_gem_context_release_work()
1295 put_pid(ctx->pid); in i915_gem_context_release_work()
1296 mutex_destroy(&ctx->mutex); in i915_gem_context_release_work()
1305 queue_work(ctx->i915->wq, &ctx->release_work); in i915_gem_context_release()
1311 return rcu_dereference_protected(ctx->engines, true); in __context_engines_static()
1317 intel_gt_handle_error(engine->gt, engine->mask, 0, in __reset_context()
1318 "context closure in %s", ctx->name); in __reset_context()
1348 if (!ce->timeline) in active_engine()
1352 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference in active_engine()
1354 * (and onto a new timeline->requests list). in active_engine()
1357 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { in active_engine()
1366 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline)) in active_engine()
1386 * engine will be mapped to multiple engines, and using ctx->engine[] in kill_engines()
1413 __reset_context(engines->ctx, engine); in kill_engines()
1421 spin_lock_irq(&ctx->stale.lock); in kill_context()
1423 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) { in kill_context()
1424 if (!i915_sw_fence_await(&pos->fence)) { in kill_context()
1425 list_del_init(&pos->link); in kill_context()
1429 spin_unlock_irq(&ctx->stale.lock); in kill_context()
1431 kill_engines(pos, !ctx->i915->params.enable_hangcheck, in kill_context()
1434 spin_lock_irq(&ctx->stale.lock); in kill_context()
1435 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence)); in kill_context()
1437 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */ in kill_context()
1439 i915_sw_fence_complete(&pos->fence); in kill_context()
1441 spin_unlock_irq(&ctx->stale.lock); in kill_context()
1450 INIT_LIST_HEAD(&engines->link); in engines_idle_release()
1452 engines->ctx = i915_gem_context_get(ctx); in engines_idle_release()
1463 err = i915_sw_fence_await_active(&engines->fence, in engines_idle_release()
1464 &ce->active, in engines_idle_release()
1471 spin_lock_irq(&ctx->stale.lock); in engines_idle_release()
1473 list_add_tail(&engines->link, &ctx->stale.engines); in engines_idle_release()
1474 spin_unlock_irq(&ctx->stale.lock); in engines_idle_release()
1477 if (list_empty(&engines->link)) /* raced, already closed */ in engines_idle_release()
1481 i915_sw_fence_commit(&engines->fence); in engines_idle_release()
1490 s = strrchr(ctx->name, '['); in set_closed_name()
1506 mutex_lock(&ctx->engines_mutex); in context_close()
1508 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1)); in context_close()
1510 mutex_unlock(&ctx->engines_mutex); in context_close()
1512 mutex_lock(&ctx->mutex); in context_close()
1523 ctx->file_priv = ERR_PTR(-EBADF); in context_close()
1525 client = ctx->client; in context_close()
1527 spin_lock(&client->ctx_lock); in context_close()
1528 list_del_rcu(&ctx->client_link); in context_close()
1529 spin_unlock(&client->ctx_lock); in context_close()
1532 mutex_unlock(&ctx->mutex); in context_close()
1553 * Only contexts that are short-lived [that will expire or be in __context_set_persistence()
1557 if (!ctx->i915->params.enable_hangcheck) in __context_set_persistence()
1558 return -EINVAL; in __context_set_persistence()
1562 /* To cancel a context we use "preempt-to-idle" */ in __context_set_persistence()
1563 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) in __context_set_persistence()
1564 return -ENODEV; in __context_set_persistence()
1569 * If the per-engine reset fails, all hope is lost! We resort in __context_set_persistence()
1579 if (!intel_has_reset_engine(to_gt(ctx->i915))) in __context_set_persistence()
1580 return -ENODEV; in __context_set_persistence()
1600 return ERR_PTR(-ENOMEM); in i915_gem_create_context()
1602 kref_init(&ctx->ref); in i915_gem_create_context()
1603 ctx->i915 = i915; in i915_gem_create_context()
1604 ctx->sched = pc->sched; in i915_gem_create_context()
1605 mutex_init(&ctx->mutex); in i915_gem_create_context()
1606 INIT_LIST_HEAD(&ctx->link); in i915_gem_create_context()
1607 INIT_WORK(&ctx->release_work, i915_gem_context_release_work); in i915_gem_create_context()
1609 spin_lock_init(&ctx->stale.lock); in i915_gem_create_context()
1610 INIT_LIST_HEAD(&ctx->stale.engines); in i915_gem_create_context()
1612 if (pc->vm) { in i915_gem_create_context()
1613 vm = i915_vm_get(pc->vm); in i915_gem_create_context()
1619 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n", in i915_gem_create_context()
1624 vm = &ppgtt->vm; in i915_gem_create_context()
1627 ctx->vm = vm; in i915_gem_create_context()
1629 mutex_init(&ctx->engines_mutex); in i915_gem_create_context()
1630 if (pc->num_user_engines >= 0) { in i915_gem_create_context()
1632 e = user_engines(ctx, pc->num_user_engines, pc->user_engines); in i915_gem_create_context()
1635 e = default_engines(ctx, pc->legacy_rcs_sseu); in i915_gem_create_context()
1641 RCU_INIT_POINTER(ctx->engines, e); in i915_gem_create_context()
1643 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); in i915_gem_create_context()
1644 mutex_init(&ctx->lut_mutex); in i915_gem_create_context()
1649 ctx->remap_slice = ALL_L3_SLICES(i915); in i915_gem_create_context()
1651 ctx->user_flags = pc->user_flags; in i915_gem_create_context()
1653 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) in i915_gem_create_context()
1654 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; in i915_gem_create_context()
1656 if (pc->single_timeline) { in i915_gem_create_context()
1657 err = drm_syncobj_create(&ctx->syncobj, in i915_gem_create_context()
1664 if (pc->uses_protected_content) { in i915_gem_create_context()
1665 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm); in i915_gem_create_context()
1666 ctx->uses_protected_content = true; in i915_gem_create_context()
1676 if (ctx->vm) in i915_gem_create_context()
1677 i915_vm_put(ctx->vm); in i915_gem_create_context()
1685 spin_lock_init(&gc->lock); in init_contexts()
1686 INIT_LIST_HEAD(&gc->list); in init_contexts()
1691 init_contexts(&i915->gem.contexts); in i915_gem_init__contexts()
1702 struct drm_i915_private *i915 = ctx->i915; in gem_context_register()
1705 ctx->file_priv = fpriv; in gem_context_register()
1707 ctx->pid = get_task_pid(current, PIDTYPE_PID); in gem_context_register()
1708 ctx->client = i915_drm_client_get(fpriv->client); in gem_context_register()
1710 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", in gem_context_register()
1711 current->comm, pid_nr(ctx->pid)); in gem_context_register()
1713 spin_lock(&ctx->client->ctx_lock); in gem_context_register()
1714 list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list); in gem_context_register()
1715 spin_unlock(&ctx->client->ctx_lock); in gem_context_register()
1717 spin_lock(&i915->gem.contexts.lock); in gem_context_register()
1718 list_add_tail(&ctx->link, &i915->gem.contexts.list); in gem_context_register()
1719 spin_unlock(&i915->gem.contexts.lock); in gem_context_register()
1722 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL); in gem_context_register()
1729 struct drm_i915_file_private *file_priv = file->driver_priv; in i915_gem_context_open()
1734 mutex_init(&file_priv->proto_context_lock); in i915_gem_context_open()
1735 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC); in i915_gem_context_open()
1738 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1); in i915_gem_context_open()
1741 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1); in i915_gem_context_open()
1761 xa_destroy(&file_priv->vm_xa); in i915_gem_context_open()
1762 xa_destroy(&file_priv->context_xa); in i915_gem_context_open()
1763 xa_destroy(&file_priv->proto_context_xa); in i915_gem_context_open()
1764 mutex_destroy(&file_priv->proto_context_lock); in i915_gem_context_open()
1770 struct drm_i915_file_private *file_priv = file->driver_priv; in i915_gem_context_close()
1776 xa_for_each(&file_priv->proto_context_xa, idx, pc) in i915_gem_context_close()
1777 proto_context_close(file_priv->i915, pc); in i915_gem_context_close()
1778 xa_destroy(&file_priv->proto_context_xa); in i915_gem_context_close()
1779 mutex_destroy(&file_priv->proto_context_lock); in i915_gem_context_close()
1781 xa_for_each(&file_priv->context_xa, idx, ctx) in i915_gem_context_close()
1783 xa_destroy(&file_priv->context_xa); in i915_gem_context_close()
1785 xa_for_each(&file_priv->vm_xa, idx, vm) in i915_gem_context_close()
1787 xa_destroy(&file_priv->vm_xa); in i915_gem_context_close()
1795 struct drm_i915_file_private *file_priv = file->driver_priv; in i915_gem_vm_create_ioctl()
1801 return -ENODEV; in i915_gem_vm_create_ioctl()
1803 if (args->flags) in i915_gem_vm_create_ioctl()
1804 return -EINVAL; in i915_gem_vm_create_ioctl()
1810 if (args->extensions) { in i915_gem_vm_create_ioctl()
1811 err = i915_user_extensions(u64_to_user_ptr(args->extensions), in i915_gem_vm_create_ioctl()
1818 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm, in i915_gem_vm_create_ioctl()
1824 args->vm_id = id; in i915_gem_vm_create_ioctl()
1828 i915_vm_put(&ppgtt->vm); in i915_gem_vm_create_ioctl()
1835 struct drm_i915_file_private *file_priv = file->driver_priv; in i915_gem_vm_destroy_ioctl()
1839 if (args->flags) in i915_gem_vm_destroy_ioctl()
1840 return -EINVAL; in i915_gem_vm_destroy_ioctl()
1842 if (args->extensions) in i915_gem_vm_destroy_ioctl()
1843 return -EINVAL; in i915_gem_vm_destroy_ioctl()
1845 vm = xa_erase(&file_priv->vm_xa, args->vm_id); in i915_gem_vm_destroy_ioctl()
1847 return -ENOENT; in i915_gem_vm_destroy_ioctl()
1862 return -ENODEV; in get_ppgtt()
1864 vm = ctx->vm; in get_ppgtt()
1875 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL); in get_ppgtt()
1882 args->value = id; in get_ppgtt()
1883 args->size = 0; in get_ppgtt()
1893 const struct sseu_dev_info *device = >->info.sseu; in i915_gem_user_to_context_sseu()
1894 struct drm_i915_private *i915 = gt->i915; in i915_gem_user_to_context_sseu()
1898 if (!user->slice_mask || !user->subslice_mask || in i915_gem_user_to_context_sseu()
1899 !user->min_eus_per_subslice || !user->max_eus_per_subslice) in i915_gem_user_to_context_sseu()
1900 return -EINVAL; in i915_gem_user_to_context_sseu()
1902 /* Max > min. */ in i915_gem_user_to_context_sseu()
1903 if (user->max_eus_per_subslice < user->min_eus_per_subslice) in i915_gem_user_to_context_sseu()
1904 return -EINVAL; in i915_gem_user_to_context_sseu()
1910 if (overflows_type(user->slice_mask, context->slice_mask) || in i915_gem_user_to_context_sseu()
1911 overflows_type(user->subslice_mask, context->subslice_mask) || in i915_gem_user_to_context_sseu()
1912 overflows_type(user->min_eus_per_subslice, in i915_gem_user_to_context_sseu()
1913 context->min_eus_per_subslice) || in i915_gem_user_to_context_sseu()
1914 overflows_type(user->max_eus_per_subslice, in i915_gem_user_to_context_sseu()
1915 context->max_eus_per_subslice)) in i915_gem_user_to_context_sseu()
1916 return -EINVAL; in i915_gem_user_to_context_sseu()
1919 if (user->slice_mask & ~device->slice_mask) in i915_gem_user_to_context_sseu()
1920 return -EINVAL; in i915_gem_user_to_context_sseu()
1922 if (user->subslice_mask & ~dev_subslice_mask) in i915_gem_user_to_context_sseu()
1923 return -EINVAL; in i915_gem_user_to_context_sseu()
1925 if (user->max_eus_per_subslice > device->max_eus_per_subslice) in i915_gem_user_to_context_sseu()
1926 return -EINVAL; in i915_gem_user_to_context_sseu()
1928 context->slice_mask = user->slice_mask; in i915_gem_user_to_context_sseu()
1929 context->subslice_mask = user->subslice_mask; in i915_gem_user_to_context_sseu()
1930 context->min_eus_per_subslice = user->min_eus_per_subslice; in i915_gem_user_to_context_sseu()
1931 context->max_eus_per_subslice = user->max_eus_per_subslice; in i915_gem_user_to_context_sseu()
1935 unsigned int hw_s = hweight8(device->slice_mask); in i915_gem_user_to_context_sseu()
1937 unsigned int req_s = hweight8(context->slice_mask); in i915_gem_user_to_context_sseu()
1938 unsigned int req_ss = hweight8(context->subslice_mask); in i915_gem_user_to_context_sseu()
1945 return -EINVAL; in i915_gem_user_to_context_sseu()
1952 return -EINVAL; in i915_gem_user_to_context_sseu()
1961 return -EINVAL; in i915_gem_user_to_context_sseu()
1963 /* ABI restriction - VME use case only. */ in i915_gem_user_to_context_sseu()
1967 return -EINVAL; in i915_gem_user_to_context_sseu()
1975 return -EINVAL; in i915_gem_user_to_context_sseu()
1978 if ((user->min_eus_per_subslice != in i915_gem_user_to_context_sseu()
1979 device->max_eus_per_subslice) || in i915_gem_user_to_context_sseu()
1980 (user->max_eus_per_subslice != in i915_gem_user_to_context_sseu()
1981 device->max_eus_per_subslice)) in i915_gem_user_to_context_sseu()
1982 return -EINVAL; in i915_gem_user_to_context_sseu()
1991 struct drm_i915_private *i915 = ctx->i915; in set_sseu()
1998 if (args->size < sizeof(user_sseu)) in set_sseu()
1999 return -EINVAL; in set_sseu()
2002 return -ENODEV; in set_sseu()
2004 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), in set_sseu()
2006 return -EFAULT; in set_sseu()
2009 return -EINVAL; in set_sseu()
2012 return -EINVAL; in set_sseu()
2022 /* Only render engine supports RPCS configuration. */ in set_sseu()
2023 if (ce->engine->class != RENDER_CLASS) { in set_sseu()
2024 ret = -ENODEV; in set_sseu()
2028 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu); in set_sseu()
2036 args->size = sizeof(user_sseu); in set_sseu()
2047 if (args->size) in set_persistence()
2048 return -EINVAL; in set_persistence()
2050 return __context_set_persistence(ctx, args->value); in set_persistence()
2060 err = validate_priority(ctx->i915, args); in set_priority()
2064 ctx->sched.priority = args->value; in set_priority()
2067 if (!intel_engine_has_timeslices(ce->engine)) in set_priority()
2070 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && in set_priority()
2071 intel_engine_has_semaphores(ce->engine)) in set_priority()
2084 args->size = 0; in get_protected()
2085 args->value = i915_gem_context_uses_protected_content(ctx); in get_protected()
2096 switch (args->param) { in ctx_setparam()
2098 if (args->size) in ctx_setparam()
2099 ret = -EINVAL; in ctx_setparam()
2100 else if (args->value) in ctx_setparam()
2107 if (args->size) in ctx_setparam()
2108 ret = -EINVAL; in ctx_setparam()
2109 else if (!capable(CAP_SYS_ADMIN) && !args->value) in ctx_setparam()
2110 ret = -EPERM; in ctx_setparam()
2111 else if (args->value) in ctx_setparam()
2114 ret = -EPERM; /* can't clear this for protected contexts */ in ctx_setparam()
2120 if (args->size) in ctx_setparam()
2121 ret = -EINVAL; in ctx_setparam()
2122 else if (!args->value) in ctx_setparam()
2125 ret = -EPERM; /* can't set this for protected contexts */ in ctx_setparam()
2149 ret = -EINVAL; in ctx_setparam()
2167 return -EFAULT; in create_setparam()
2170 return -EINVAL; in create_setparam()
2172 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param); in create_setparam()
2177 return -EINVAL; in invalid_ext()
2187 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; in client_is_banned()
2196 ctx = xa_load(&file_priv->context_xa, id); in __context_lookup()
2197 if (ctx && !kref_get_unless_zero(&ctx->ref)) in __context_lookup()
2211 lockdep_assert_held(&file_priv->proto_context_lock); in finalize_create_context_locked()
2213 ctx = i915_gem_create_context(file_priv->i915, pc); in finalize_create_context_locked()
2228 old = xa_erase(&file_priv->proto_context_xa, id); in finalize_create_context_locked()
2230 proto_context_close(file_priv->i915, pc); in finalize_create_context_locked()
2245 mutex_lock(&file_priv->proto_context_lock); in i915_gem_context_lookup()
2249 pc = xa_load(&file_priv->proto_context_xa, id); in i915_gem_context_lookup()
2251 ctx = ERR_PTR(-ENOENT); in i915_gem_context_lookup()
2255 mutex_unlock(&file_priv->proto_context_lock); in i915_gem_context_lookup()
2269 if (!DRIVER_CAPS(i915)->has_logical_contexts) in i915_gem_context_create_ioctl()
2270 return -ENODEV; in i915_gem_context_create_ioctl()
2272 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) in i915_gem_context_create_ioctl()
2273 return -EINVAL; in i915_gem_context_create_ioctl()
2279 ext_data.fpriv = file->driver_priv; in i915_gem_context_create_ioctl()
2281 drm_dbg(&i915->drm, in i915_gem_context_create_ioctl()
2283 current->comm, task_pid_nr(current)); in i915_gem_context_create_ioctl()
2284 return -EIO; in i915_gem_context_create_ioctl()
2287 ext_data.pc = proto_context_create(i915, args->flags); in i915_gem_context_create_ioctl()
2291 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { in i915_gem_context_create_ioctl()
2292 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), in i915_gem_context_create_ioctl()
2304 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL, in i915_gem_context_create_ioctl()
2323 args->ctx_id = id; in i915_gem_context_create_ioctl()
2336 struct drm_i915_file_private *file_priv = file->driver_priv; in i915_gem_context_destroy_ioctl()
2340 if (args->pad != 0) in i915_gem_context_destroy_ioctl()
2341 return -EINVAL; in i915_gem_context_destroy_ioctl()
2343 if (!args->ctx_id) in i915_gem_context_destroy_ioctl()
2344 return -ENOENT; in i915_gem_context_destroy_ioctl()
2346 /* We need to hold the proto-context lock here to prevent races in i915_gem_context_destroy_ioctl()
2349 mutex_lock(&file_priv->proto_context_lock); in i915_gem_context_destroy_ioctl()
2350 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); in i915_gem_context_destroy_ioctl()
2351 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id); in i915_gem_context_destroy_ioctl()
2352 mutex_unlock(&file_priv->proto_context_lock); in i915_gem_context_destroy_ioctl()
2355 return -ENOENT; in i915_gem_context_destroy_ioctl()
2359 proto_context_close(file_priv->i915, pc); in i915_gem_context_destroy_ioctl()
2375 if (args->size == 0) in get_sseu()
2377 else if (args->size < sizeof(user_sseu)) in get_sseu()
2378 return -EINVAL; in get_sseu()
2380 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), in get_sseu()
2382 return -EFAULT; in get_sseu()
2385 return -EINVAL; in get_sseu()
2388 return -EINVAL; in get_sseu()
2404 user_sseu.slice_mask = ce->sseu.slice_mask; in get_sseu()
2405 user_sseu.subslice_mask = ce->sseu.subslice_mask; in get_sseu()
2406 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; in get_sseu()
2407 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; in get_sseu()
2412 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, in get_sseu()
2414 return -EFAULT; in get_sseu()
2417 args->size = sizeof(user_sseu); in get_sseu()
2425 struct drm_i915_file_private *file_priv = file->driver_priv; in i915_gem_context_getparam_ioctl()
2431 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); in i915_gem_context_getparam_ioctl()
2435 switch (args->param) { in i915_gem_context_getparam_ioctl()
2437 args->size = 0; in i915_gem_context_getparam_ioctl()
2439 args->value = vm->total; in i915_gem_context_getparam_ioctl()
2445 args->size = 0; in i915_gem_context_getparam_ioctl()
2446 args->value = i915_gem_context_no_error_capture(ctx); in i915_gem_context_getparam_ioctl()
2450 args->size = 0; in i915_gem_context_getparam_ioctl()
2451 args->value = i915_gem_context_is_bannable(ctx); in i915_gem_context_getparam_ioctl()
2455 args->size = 0; in i915_gem_context_getparam_ioctl()
2456 args->value = i915_gem_context_is_recoverable(ctx); in i915_gem_context_getparam_ioctl()
2460 args->size = 0; in i915_gem_context_getparam_ioctl()
2461 args->value = ctx->sched.priority; in i915_gem_context_getparam_ioctl()
2473 args->size = 0; in i915_gem_context_getparam_ioctl()
2474 args->value = i915_gem_context_is_persistent(ctx); in i915_gem_context_getparam_ioctl()
2486 ret = -EINVAL; in i915_gem_context_getparam_ioctl()
2497 struct drm_i915_file_private *file_priv = file->driver_priv; in i915_gem_context_setparam_ioctl()
2503 mutex_lock(&file_priv->proto_context_lock); in i915_gem_context_setparam_ioctl()
2504 ctx = __context_lookup(file_priv, args->ctx_id); in i915_gem_context_setparam_ioctl()
2506 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); in i915_gem_context_setparam_ioctl()
2512 WARN_ON(GRAPHICS_VER(file_priv->i915) > 12); in i915_gem_context_setparam_ioctl()
2515 ret = -ENOENT; in i915_gem_context_setparam_ioctl()
2518 mutex_unlock(&file_priv->proto_context_lock); in i915_gem_context_setparam_ioctl()
2535 if (args->flags || args->pad) in i915_gem_context_reset_stats_ioctl()
2536 return -EINVAL; in i915_gem_context_reset_stats_ioctl()
2538 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id); in i915_gem_context_reset_stats_ioctl()
2550 args->reset_count = i915_reset_count(&i915->gpu_error); in i915_gem_context_reset_stats_ioctl()
2552 args->reset_count = 0; in i915_gem_context_reset_stats_ioctl()
2554 args->batch_active = atomic_read(&ctx->guilty_count); in i915_gem_context_reset_stats_ioctl()
2555 args->batch_pending = atomic_read(&ctx->active_count); in i915_gem_context_reset_stats_ioctl()
2561 /* GEM context-engines iterator: for_each_gem_engine() */
2565 const struct i915_gem_engines *e = it->engines; in i915_gem_engines_iter_next()
2572 if (it->idx >= e->num_engines) in i915_gem_engines_iter_next()
2575 ctx = e->engines[it->idx++]; in i915_gem_engines_iter_next()
2595 return -ENOMEM; in i915_gem_context_module_init()