Lines Matching full:engine
33 } engine[I915_NUM_ENGINES]; member
63 struct intel_engine_cs *engine; in reference_lists_init() local
72 for_each_engine(engine, gt, id) { in reference_lists_init()
73 struct i915_wa_list *wal = &lists->engine[id].wa_list; in reference_lists_init()
75 wa_init_start(wal, "REF", engine->name); in reference_lists_init()
76 engine_init_workarounds(engine, wal); in reference_lists_init()
79 __intel_engine_init_ctx_wa(engine, in reference_lists_init()
80 &lists->engine[id].ctx_wa_list, in reference_lists_init()
88 struct intel_engine_cs *engine; in reference_lists_fini() local
91 for_each_engine(engine, gt, id) in reference_lists_fini()
92 intel_wa_list_free(&lists->engine[id].wa_list); in reference_lists_fini()
98 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) in read_nonprivs() argument
100 const u32 base = engine->mmio_base; in read_nonprivs()
108 result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); in read_nonprivs()
123 vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL); in read_nonprivs()
133 rq = igt_request_alloc(ctx, engine); in read_nonprivs()
180 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i) in get_whitelist_reg() argument
182 i915_reg_t reg = i < engine->whitelist.count ? in get_whitelist_reg()
183 engine->whitelist.list[i].reg : in get_whitelist_reg()
184 RING_NOPID(engine->mmio_base); in get_whitelist_reg()
190 print_results(const struct intel_engine_cs *engine, const u32 *results) in print_results() argument
195 u32 expected = get_whitelist_reg(engine, i); in print_results()
204 struct intel_engine_cs *engine) in check_whitelist() argument
212 results = read_nonprivs(ctx, engine); in check_whitelist()
218 intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ in check_whitelist()
221 if (intel_gt_is_wedged(engine->gt)) in check_whitelist()
233 u32 expected = get_whitelist_reg(engine, i); in check_whitelist()
237 print_results(engine, vaddr); in check_whitelist()
252 static int do_device_reset(struct intel_engine_cs *engine) in do_device_reset() argument
254 intel_gt_reset(engine->gt, engine->mask, "live_workarounds"); in do_device_reset()
258 static int do_engine_reset(struct intel_engine_cs *engine) in do_engine_reset() argument
260 return intel_engine_reset(engine, "live_workarounds"); in do_engine_reset()
264 switch_to_scratch_context(struct intel_engine_cs *engine, in switch_to_scratch_context() argument
271 ce = intel_context_create(engine); in switch_to_scratch_context()
292 static int check_whitelist_across_reset(struct intel_engine_cs *engine, in check_whitelist_across_reset() argument
296 struct drm_i915_private *i915 = engine->i915; in check_whitelist_across_reset()
303 engine->whitelist.count, engine->name, name); in check_whitelist_across_reset()
309 err = igt_spinner_init(&spin, engine->gt); in check_whitelist_across_reset()
313 err = check_whitelist(ctx, engine); in check_whitelist_across_reset()
319 err = switch_to_scratch_context(engine, &spin); in check_whitelist_across_reset()
323 with_intel_runtime_pm(engine->uncore->rpm, wakeref) in check_whitelist_across_reset()
324 err = reset(engine); in check_whitelist_across_reset()
333 err = check_whitelist(ctx, engine); in check_whitelist_across_reset()
348 err = check_whitelist(ctx, engine); in check_whitelist_across_reset()
402 static bool wo_register(struct intel_engine_cs *engine, u32 reg) in wo_register() argument
404 enum intel_platform platform = INTEL_INFO(engine->i915)->platform; in wo_register()
420 static bool timestamp(const struct intel_engine_cs *engine, u32 reg) in timestamp() argument
422 reg = (reg - engine->mmio_base) & ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; in timestamp()
443 static int whitelist_writable_count(struct intel_engine_cs *engine) in whitelist_writable_count() argument
445 int count = engine->whitelist.count; in whitelist_writable_count()
448 for (i = 0; i < engine->whitelist.count; i++) { in whitelist_writable_count()
449 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in whitelist_writable_count()
486 struct intel_engine_cs *engine = ce->engine; in check_dirty_whitelist() local
502 for (i = 0; i < engine->whitelist.count; i++) { in check_dirty_whitelist()
503 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in check_dirty_whitelist()
511 if (wo_register(engine, reg)) in check_dirty_whitelist()
514 if (timestamp(engine, reg)) in check_dirty_whitelist()
524 if (INTEL_GEN(engine->i915) >= 8) in check_dirty_whitelist()
528 engine->name, reg); in check_dirty_whitelist()
581 intel_gt_chipset_flush(engine->gt); in check_dirty_whitelist()
589 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ in check_dirty_whitelist()
590 err = engine->emit_init_breadcrumb(rq); in check_dirty_whitelist()
612 err = engine->emit_bb_start(rq, in check_dirty_whitelist()
622 engine->name, reg); in check_dirty_whitelist()
623 intel_gt_set_wedged(engine->gt); in check_dirty_whitelist()
639 engine->name, reg); in check_dirty_whitelist()
671 engine->name, err, reg); in check_dirty_whitelist()
675 engine->name, reg, results[0]); in check_dirty_whitelist()
678 engine->name, reg, results[0], rsvd); in check_dirty_whitelist()
713 if (igt_flush_test(engine->i915)) in check_dirty_whitelist()
725 struct intel_engine_cs *engine; in live_dirty_whitelist() local
733 for_each_engine(engine, gt, id) { in live_dirty_whitelist()
737 if (engine->whitelist.count == 0) in live_dirty_whitelist()
740 ce = intel_context_create(engine); in live_dirty_whitelist()
756 struct intel_engine_cs *engine; in live_reset_whitelist() local
763 for_each_engine(engine, gt, id) { in live_reset_whitelist()
764 if (engine->whitelist.count == 0) in live_reset_whitelist()
768 err = check_whitelist_across_reset(engine, in live_reset_whitelist()
770 "engine"); in live_reset_whitelist()
776 err = check_whitelist_across_reset(engine, in live_reset_whitelist()
790 struct intel_engine_cs *engine, in read_whitelisted_registers() argument
797 rq = igt_request_alloc(ctx, engine); in read_whitelisted_registers()
813 cs = intel_ring_begin(rq, 4 * engine->whitelist.count); in read_whitelisted_registers()
819 for (i = 0; i < engine->whitelist.count; i++) { in read_whitelisted_registers()
821 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in read_whitelisted_registers()
838 struct intel_engine_cs *engine) in scrub_whitelisted_registers() argument
858 *cs++ = MI_LOAD_REGISTER_IMM(whitelist_writable_count(engine)); in scrub_whitelisted_registers()
859 for (i = 0; i < engine->whitelist.count; i++) { in scrub_whitelisted_registers()
860 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); in scrub_whitelisted_registers()
874 intel_gt_chipset_flush(engine->gt); in scrub_whitelisted_registers()
876 rq = igt_request_alloc(ctx, engine); in scrub_whitelisted_registers()
882 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */ in scrub_whitelisted_registers()
883 err = engine->emit_init_breadcrumb(rq); in scrub_whitelisted_registers()
897 err = engine->emit_bb_start(rq, batch->node.start, 0, 0); in scrub_whitelisted_registers()
942 static bool result_eq(struct intel_engine_cs *engine, in result_eq() argument
945 if (a != b && !pardon_reg(engine->i915, reg)) { in result_eq()
964 static bool result_neq(struct intel_engine_cs *engine, in result_neq() argument
967 if (a == b && !writeonly_reg(engine->i915, reg)) { in result_neq()
977 check_whitelisted_registers(struct intel_engine_cs *engine, in check_whitelisted_registers() argument
980 bool (*fn)(struct intel_engine_cs *engine, in check_whitelisted_registers() argument
998 for (i = 0; i < engine->whitelist.count; i++) { in check_whitelisted_registers()
999 const struct i915_wa *wa = &engine->whitelist.list[i]; in check_whitelisted_registers()
1005 if (!fn(engine, a[i], b[i], wa->reg)) in check_whitelisted_registers()
1022 struct intel_engine_cs *engine; in live_isolated_whitelist() local
1067 for_each_engine(engine, gt, id) { in live_isolated_whitelist()
1068 if (!engine->kernel_context->vm) in live_isolated_whitelist()
1071 if (!whitelist_writable_count(engine)) in live_isolated_whitelist()
1075 err = read_whitelisted_registers(client[0].ctx, engine, in live_isolated_whitelist()
1081 err = scrub_whitelisted_registers(client[0].ctx, engine); in live_isolated_whitelist()
1086 err = read_whitelisted_registers(client[1].ctx, engine, in live_isolated_whitelist()
1092 err = check_whitelisted_registers(engine, in live_isolated_whitelist()
1100 err = read_whitelisted_registers(client[0].ctx, engine, in live_isolated_whitelist()
1106 err = check_whitelisted_registers(engine, in live_isolated_whitelist()
1142 enum intel_engine_id id = ce->engine->id; in verify_wa_lists()
1145 &lists->engine[id].wa_list, in verify_wa_lists()
1149 &lists->engine[id].ctx_wa_list, in verify_wa_lists()
1225 struct intel_engine_cs *engine = ce->engine; in live_engine_reset_workarounds() local
1228 pr_info("Verifying after %s reset...\n", engine->name); in live_engine_reset_workarounds()
1236 intel_engine_reset(engine, "live_workarounds"); in live_engine_reset_workarounds()
1244 ret = igt_spinner_init(&spin, engine->gt); in live_engine_reset_workarounds()
1262 intel_engine_reset(engine, "live_workarounds"); in live_engine_reset_workarounds()