Lines Matching +full:cs +full:- +full:out
2 * SPDX-License-Identifier: MIT
41 err = -EIO; in request_add_sync()
54 err = -ETIMEDOUT; in request_add_spin()
68 wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); in reference_lists_init()
69 gt_init_workarounds(gt->i915, &lists->gt_wa_list); in reference_lists_init()
70 wa_init_finish(&lists->gt_wa_list); in reference_lists_init()
73 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init()
75 wa_init_start(wal, "REF", engine->name); in reference_lists_init()
80 &lists->engine[id].ctx_wa_list, in reference_lists_init()
92 intel_wa_list_free(&lists->engine[id].wa_list); in reference_lists_fini()
94 intel_wa_list_free(&lists->gt_wa_list); in reference_lists_fini()
100 const u32 base = engine->mmio_base; in read_nonprivs()
104 u32 srm, *cs; in read_nonprivs() local
108 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); in read_nonprivs()
114 cs = i915_gem_object_pin_map(result, I915_MAP_WB); in read_nonprivs()
115 if (IS_ERR(cs)) { in read_nonprivs()
116 err = PTR_ERR(cs); in read_nonprivs()
119 memset(cs, 0xc5, PAGE_SIZE); in read_nonprivs()
123 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL); in read_nonprivs()
140 err = i915_request_await_object(rq, vma->obj, true); in read_nonprivs()
148 if (INTEL_GEN(ctx->i915) >= 8) in read_nonprivs()
151 cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); in read_nonprivs()
152 if (IS_ERR(cs)) { in read_nonprivs()
153 err = PTR_ERR(cs); in read_nonprivs()
158 *cs++ = srm; in read_nonprivs()
159 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i)); in read_nonprivs()
160 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; in read_nonprivs()
161 *cs++ = 0; in read_nonprivs()
163 intel_ring_advance(rq, cs); in read_nonprivs()
182 i915_reg_t reg = i < engine->whitelist.count ? in get_whitelist_reg()
183 engine->whitelist.list[i].reg : in get_whitelist_reg()
184 RING_NOPID(engine->mmio_base); in get_whitelist_reg()
218 intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ in check_whitelist()
221 if (intel_gt_is_wedged(engine->gt)) in check_whitelist()
222 err = -EIO; in check_whitelist()
241 err = -EINVAL; in check_whitelist()
254 intel_gt_reset(engine->gt, engine->mask, "live_workarounds"); in do_device_reset()
296 struct drm_i915_private *i915 = engine->i915; in check_whitelist_across_reset()
303 engine->whitelist.count, engine->name, name); in check_whitelist_across_reset()
309 err = igt_spinner_init(&spin, engine->gt); in check_whitelist_across_reset()
323 with_intel_runtime_pm(engine->uncore->rpm, wakeref) in check_whitelist_across_reset()
368 obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE); in create_batch()
404 enum intel_platform platform = INTEL_INFO(engine->i915)->platform; in wo_register()
422 reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; in timestamp()
445 int count = engine->whitelist.count; in whitelist_writable_count()
448 for (i = 0; i < engine->whitelist.count; i++) { in whitelist_writable_count()
449 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in whitelist_writable_count()
452 count--; in whitelist_writable_count()
486 struct intel_engine_cs *engine = ce->engine; in check_dirty_whitelist()
490 u32 *cs, *results; in check_dirty_whitelist() local
492 scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1); in check_dirty_whitelist()
496 batch = create_batch(ce->vm); in check_dirty_whitelist()
502 for (i = 0; i < engine->whitelist.count; i++) { in check_dirty_whitelist()
503 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in check_dirty_whitelist()
504 u64 addr = scratch->node.start; in check_dirty_whitelist()
524 if (INTEL_GEN(engine->i915) >= 8) in check_dirty_whitelist()
528 engine->name, reg); in check_dirty_whitelist()
530 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in check_dirty_whitelist()
531 if (IS_ERR(cs)) { in check_dirty_whitelist()
532 err = PTR_ERR(cs); in check_dirty_whitelist()
537 *cs++ = srm; in check_dirty_whitelist()
538 *cs++ = reg; in check_dirty_whitelist()
539 *cs++ = lower_32_bits(addr); in check_dirty_whitelist()
540 *cs++ = upper_32_bits(addr); in check_dirty_whitelist()
545 *cs++ = MI_LOAD_REGISTER_IMM(1); in check_dirty_whitelist()
546 *cs++ = reg; in check_dirty_whitelist()
547 *cs++ = values[v]; in check_dirty_whitelist()
550 *cs++ = srm; in check_dirty_whitelist()
551 *cs++ = reg; in check_dirty_whitelist()
552 *cs++ = lower_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
553 *cs++ = upper_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
558 *cs++ = MI_LOAD_REGISTER_IMM(1); in check_dirty_whitelist()
559 *cs++ = reg; in check_dirty_whitelist()
560 *cs++ = ~values[v]; in check_dirty_whitelist()
563 *cs++ = srm; in check_dirty_whitelist()
564 *cs++ = reg; in check_dirty_whitelist()
565 *cs++ = lower_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
566 *cs++ = upper_32_bits(addr + sizeof(u32) * idx); in check_dirty_whitelist()
569 GEM_BUG_ON(idx * sizeof(u32) > scratch->size); in check_dirty_whitelist()
571 /* LRM original -- don't leave garbage in the context! */ in check_dirty_whitelist()
572 *cs++ = lrm; in check_dirty_whitelist()
573 *cs++ = reg; in check_dirty_whitelist()
574 *cs++ = lower_32_bits(addr); in check_dirty_whitelist()
575 *cs++ = upper_32_bits(addr); in check_dirty_whitelist()
577 *cs++ = MI_BATCH_BUFFER_END; in check_dirty_whitelist()
579 i915_gem_object_flush_map(batch->obj); in check_dirty_whitelist()
580 i915_gem_object_unpin_map(batch->obj); in check_dirty_whitelist()
581 intel_gt_chipset_flush(engine->gt); in check_dirty_whitelist()
589 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ in check_dirty_whitelist()
590 err = engine->emit_init_breadcrumb(rq); in check_dirty_whitelist()
596 err = i915_request_await_object(rq, batch->obj, false); in check_dirty_whitelist()
604 err = i915_request_await_object(rq, scratch->obj, true); in check_dirty_whitelist()
612 err = engine->emit_bb_start(rq, in check_dirty_whitelist()
613 batch->node.start, PAGE_SIZE, in check_dirty_whitelist()
622 engine->name, reg); in check_dirty_whitelist()
623 intel_gt_set_wedged(engine->gt); in check_dirty_whitelist()
627 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); in check_dirty_whitelist()
633 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff); in check_dirty_whitelist()
639 engine->name, reg); in check_dirty_whitelist()
640 err = -EINVAL; in check_dirty_whitelist()
671 engine->name, err, reg); in check_dirty_whitelist()
674 pr_info("%s: Whitelisted read-only register: %x, original value %08x\n", in check_dirty_whitelist()
675 engine->name, reg, results[0]); in check_dirty_whitelist()
678 engine->name, reg, results[0], rsvd); in check_dirty_whitelist()
705 err = -EINVAL; in check_dirty_whitelist()
708 i915_gem_object_unpin_map(scratch->obj); in check_dirty_whitelist()
713 if (igt_flush_test(engine->i915)) in check_dirty_whitelist()
714 err = -EIO; in check_dirty_whitelist()
730 if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ in live_dirty_whitelist()
737 if (engine->whitelist.count == 0) in live_dirty_whitelist()
764 if (engine->whitelist.count == 0) in live_reset_whitelist()
772 goto out; in live_reset_whitelist()
780 goto out; in live_reset_whitelist()
784 out: in live_reset_whitelist()
795 u32 srm, *cs; in read_whitelisted_registers() local
802 err = i915_request_await_object(rq, results->obj, true); in read_whitelisted_registers()
810 if (INTEL_GEN(ctx->i915) >= 8) in read_whitelisted_registers()
813 cs = intel_ring_begin(rq, 4 * engine->whitelist.count); in read_whitelisted_registers()
814 if (IS_ERR(cs)) { in read_whitelisted_registers()
815 err = PTR_ERR(cs); in read_whitelisted_registers()
819 for (i = 0; i < engine->whitelist.count; i++) { in read_whitelisted_registers()
820 u64 offset = results->node.start + sizeof(u32) * i; in read_whitelisted_registers()
821 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in read_whitelisted_registers()
826 *cs++ = srm; in read_whitelisted_registers()
827 *cs++ = reg; in read_whitelisted_registers()
828 *cs++ = lower_32_bits(offset); in read_whitelisted_registers()
829 *cs++ = upper_32_bits(offset); in read_whitelisted_registers()
831 intel_ring_advance(rq, cs); in read_whitelisted_registers()
844 u32 *cs; in scrub_whitelisted_registers() local
852 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in scrub_whitelisted_registers()
853 if (IS_ERR(cs)) { in scrub_whitelisted_registers()
854 err = PTR_ERR(cs); in scrub_whitelisted_registers()
858 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine)); in scrub_whitelisted_registers()
859 for (i = 0; i < engine->whitelist.count; i++) { in scrub_whitelisted_registers()
860 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in scrub_whitelisted_registers()
868 *cs++ = reg; in scrub_whitelisted_registers()
869 *cs++ = 0xffffffff; in scrub_whitelisted_registers()
871 *cs++ = MI_BATCH_BUFFER_END; in scrub_whitelisted_registers()
873 i915_gem_object_flush_map(batch->obj); in scrub_whitelisted_registers()
874 intel_gt_chipset_flush(engine->gt); in scrub_whitelisted_registers()
882 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ in scrub_whitelisted_registers()
883 err = engine->emit_init_breadcrumb(rq); in scrub_whitelisted_registers()
889 err = i915_request_await_object(rq, batch->obj, false); in scrub_whitelisted_registers()
897 err = engine->emit_bb_start(rq, batch->node.start, 0, 0); in scrub_whitelisted_registers()
903 i915_gem_object_unpin_map(batch->obj); in scrub_whitelisted_registers()
921 while (count--) { in find_reg()
922 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask && in find_reg()
923 i915_mmio_reg_offset(tbl->reg) == offset) in find_reg()
945 if (a != b && !pardon_reg(engine->i915, reg)) { in result_eq()
967 if (a == b && !writeonly_reg(engine->i915, reg)) { in result_neq()
987 a = i915_gem_object_pin_map(A->obj, I915_MAP_WB); in check_whitelisted_registers()
991 b = i915_gem_object_pin_map(B->obj, I915_MAP_WB); in check_whitelisted_registers()
998 for (i = 0; i < engine->whitelist.count; i++) { in check_whitelisted_registers()
999 const struct i915_wa *wa = &engine->whitelist.list[i]; in check_whitelisted_registers()
1001 if (i915_mmio_reg_offset(wa->reg) & in check_whitelisted_registers()
1005 if (!fn(engine, a[i], b[i], wa->reg)) in check_whitelisted_registers()
1006 err = -EINVAL; in check_whitelisted_registers()
1009 i915_gem_object_unpin_map(B->obj); in check_whitelisted_registers()
1011 i915_gem_object_unpin_map(A->obj); in check_whitelisted_registers()
1031 if (!intel_engines_has_context_isolation(gt->i915)) in live_isolated_whitelist()
1038 c = kernel_context(gt->i915); in live_isolated_whitelist()
1068 if (!engine->kernel_context->vm) in live_isolated_whitelist()
1124 if (igt_flush_test(gt->i915)) in live_isolated_whitelist()
1125 err = -EIO; in live_isolated_whitelist()
1134 struct drm_i915_private *i915 = ctx->i915; in verify_wa_lists()
1139 ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); in verify_wa_lists()
1142 enum intel_engine_id id = ce->engine->id; in verify_wa_lists()
1145 &lists->engine[id].wa_list, in verify_wa_lists()
1149 &lists->engine[id].ctx_wa_list, in verify_wa_lists()
1168 ctx = kernel_context(gt->i915); in live_gpu_reset_workarounds()
1177 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in live_gpu_reset_workarounds()
1183 goto out; in live_gpu_reset_workarounds()
1189 out: in live_gpu_reset_workarounds()
1193 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in live_gpu_reset_workarounds()
1196 return ok ? 0 : -ESRCH; in live_gpu_reset_workarounds()
1215 ctx = kernel_context(gt->i915); in live_engine_reset_workarounds()
1220 wakeref = intel_runtime_pm_get(gt->uncore->rpm); in live_engine_reset_workarounds()
1225 struct intel_engine_cs *engine = ce->engine; in live_engine_reset_workarounds()
1228 pr_info("Verifying after %s reset...\n", engine->name); in live_engine_reset_workarounds()
1232 ret = -ESRCH; in live_engine_reset_workarounds()
1240 ret = -ESRCH; in live_engine_reset_workarounds()
1244 ret = igt_spinner_init(&spin, engine->gt); in live_engine_reset_workarounds()
1269 ret = -ESRCH; in live_engine_reset_workarounds()
1276 intel_runtime_pm_put(gt->uncore->rpm, wakeref); in live_engine_reset_workarounds()
1280 igt_flush_test(gt->i915); in live_engine_reset_workarounds()
1295 if (intel_gt_is_wedged(&i915->gt)) in intel_workarounds_live_selftests()
1298 return intel_gt_live_subtests(tests, &i915->gt); in intel_workarounds_live_selftests()