Lines Matching +full:cs +full:- +full:x
2 * SPDX-License-Identifier: MIT
24 #define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4)
34 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in create_scratch()
40 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); in create_scratch()
82 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) in wait_for_submit()
86 return -ETIME; in wait_for_submit()
102 if (READ_ONCE(engine->execlists.pending[0])) in wait_for_reset()
108 if (READ_ONCE(rq->fence.error)) in wait_for_reset()
114 if (rq->fence.error != -EIO) { in wait_for_reset()
116 engine->name, in wait_for_reset()
117 rq->fence.context, in wait_for_reset()
118 rq->fence.seqno); in wait_for_reset()
119 return -EINVAL; in wait_for_reset()
124 max(0l, (long)(timeout - jiffies)) + 1) < 0) { in wait_for_reset()
126 engine->name, in wait_for_reset()
127 rq->fence.context, in wait_for_reset()
128 rq->fence.seqno); in wait_for_reset()
129 return -ETIME; in wait_for_reset()
143 if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) in live_sanitycheck()
147 return -ENOMEM; in live_sanitycheck()
170 err = -EIO; in live_sanitycheck()
175 if (igt_flush_test(gt->i915)) { in live_sanitycheck()
176 err = -EIO; in live_sanitycheck()
195 int err = -ENOMEM; in live_unlite_restore()
218 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_unlite_restore()
219 err = -EIO; in live_unlite_restore()
241 * lite-restore using the RING_TAIL from ce[1] it in live_unlite_restore()
242 * will execute garbage from ce[0]->ring. in live_unlite_restore()
244 memset(tmp->ring->vaddr, in live_unlite_restore()
246 tmp->ring->vma->size); in live_unlite_restore()
250 GEM_BUG_ON(!ce[1]->ring->size); in live_unlite_restore()
251 intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); in live_unlite_restore()
252 __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head); in live_unlite_restore()
262 GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); in live_unlite_restore()
281 * to a no-op (a wait on a request on the same engine in live_unlite_restore()
287 i915_request_await_dma_fence(rq[1], &rq[0]->fence); in live_unlite_restore()
292 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); in live_unlite_restore()
301 engine->schedule(rq[1], &attr); in live_unlite_restore()
312 i915_request_await_dma_fence(rq[0], &rq[1]->fence); in live_unlite_restore()
315 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); in live_unlite_restore()
332 err = -EIO; in live_unlite_restore()
362 * into emitting a forward lite-restore instead of the rollback. in live_unlite_ring()
366 return -ENOMEM; in live_unlite_ring()
380 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_unlite_ring()
381 err = -EIO; in live_unlite_ring()
401 memset32(tmp->ring->vaddr, in live_unlite_ring()
403 tmp->ring->vma->size / sizeof(u32)); in live_unlite_ring()
416 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_unlite_ring()
422 err = -ETIME; in live_unlite_ring()
428 while (intel_ring_direction(ce[0]->ring, in live_unlite_ring()
429 rq->wa_tail, in live_unlite_ring()
430 ce[0]->ring->tail) <= 0) { in live_unlite_ring()
445 pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", in live_unlite_ring()
446 engine->name, n, in live_unlite_ring()
447 ce[0]->ring->size, in live_unlite_ring()
448 ce[0]->ring->tail, in live_unlite_ring()
449 ce[0]->ring->emit, in live_unlite_ring()
450 rq->tail); in live_unlite_ring()
451 GEM_BUG_ON(intel_ring_direction(ce[0]->ring, in live_unlite_ring()
452 rq->tail, in live_unlite_ring()
453 ce[0]->ring->tail) <= 0); in live_unlite_ring()
463 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_unlite_ring()
471 engine->name); in live_unlite_ring()
472 err = -ETIME; in live_unlite_ring()
475 pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", in live_unlite_ring()
476 engine->name, in live_unlite_ring()
477 ce[0]->ring->tail, ce[0]->ring->emit, in live_unlite_ring()
478 ce[1]->ring->tail, ce[1]->ring->emit); in live_unlite_ring()
492 err = -EIO; in live_unlite_ring()
510 * ring->head is updated upon retire which is out of sync with pinning in live_pin_rewind()
511 * the context. Thus we cannot use ring->head to set CTX_RING_HEAD, in live_pin_rewind()
523 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_pin_rewind()
524 err = -EIO; in live_pin_rewind()
541 err = i915_active_acquire(&ce->active); in live_pin_rewind()
547 ring = ce->ring; in live_pin_rewind()
550 memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32)); in live_pin_rewind()
551 ring->emit = ring->size / 2; in live_pin_rewind()
552 ring->tail = ring->emit; in live_pin_rewind()
553 GEM_BUG_ON(ring->head); in live_pin_rewind()
560 i915_active_release(&ce->active); /* e.g. async retire */ in live_pin_rewind()
566 GEM_BUG_ON(!rq->head); in live_pin_rewind()
571 err = -EIO; in live_pin_rewind()
597 return -ENOMEM; in live_hold_reset()
620 err = -ETIME; in live_hold_reset()
627 >->reset.flags)) { in live_hold_reset()
629 err = -EBUSY; in live_hold_reset()
632 tasklet_disable(&engine->execlists.tasklet); in live_hold_reset()
634 engine->execlists.tasklet.func(engine->execlists.tasklet.data); in live_hold_reset()
635 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_hold_reset()
642 GEM_BUG_ON(rq->fence.error != -EIO); in live_hold_reset()
644 tasklet_enable(&engine->execlists.tasklet); in live_hold_reset()
646 >->reset.flags); in live_hold_reset()
651 engine->name); in live_hold_reset()
653 err = -EIO; in live_hold_reset()
662 engine->name); in live_hold_reset()
664 err = -ETIME; in live_hold_reset()
687 enum { GOOD = 0, BAD = -EIO } error[2]; in live_error_interrupt()
702 * only detects problems the HW knows about -- it will not warn when in live_error_interrupt()
718 for (p = phases; p->error[0] != GOOD; p++) { in live_error_interrupt()
719 struct i915_request *client[ARRAY_SIZE(phases->error)]; in live_error_interrupt()
720 u32 *cs; in live_error_interrupt() local
741 if (rq->engine->emit_init_breadcrumb) { in live_error_interrupt()
742 err = rq->engine->emit_init_breadcrumb(rq); in live_error_interrupt()
749 cs = intel_ring_begin(rq, 2); in live_error_interrupt()
750 if (IS_ERR(cs)) { in live_error_interrupt()
752 err = PTR_ERR(cs); in live_error_interrupt()
756 if (p->error[i]) { in live_error_interrupt()
757 *cs++ = 0xdeadbeef; in live_error_interrupt()
758 *cs++ = 0xdeadbeef; in live_error_interrupt()
760 *cs++ = MI_NOOP; in live_error_interrupt()
761 *cs++ = MI_NOOP; in live_error_interrupt()
771 engine->name); in live_error_interrupt()
772 err = -ETIME; in live_error_interrupt()
779 engine->name, in live_error_interrupt()
780 error_repr(p->error[i])); in live_error_interrupt()
784 engine->name, in live_error_interrupt()
785 error_repr(p->error[i])); in live_error_interrupt()
786 err = -ETIME; in live_error_interrupt()
792 if (client[i]->fence.error != p->error[i]) { in live_error_interrupt()
794 engine->name, in live_error_interrupt()
795 error_repr(p->error[i]), in live_error_interrupt()
797 client[i]->fence.error); in live_error_interrupt()
798 err = -EINVAL; in live_error_interrupt()
809 engine->name, p - phases, in live_error_interrupt()
810 p->error[0], p->error[1]); in live_error_interrupt()
828 u32 *cs; in emit_semaphore_chain() local
830 cs = intel_ring_begin(rq, 10); in emit_semaphore_chain()
831 if (IS_ERR(cs)) in emit_semaphore_chain()
832 return PTR_ERR(cs); in emit_semaphore_chain()
834 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; in emit_semaphore_chain()
836 *cs++ = MI_SEMAPHORE_WAIT | in emit_semaphore_chain()
840 *cs++ = 0; in emit_semaphore_chain()
841 *cs++ = i915_ggtt_offset(vma) + 4 * idx; in emit_semaphore_chain()
842 *cs++ = 0; in emit_semaphore_chain()
845 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_semaphore_chain()
846 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); in emit_semaphore_chain()
847 *cs++ = 0; in emit_semaphore_chain()
848 *cs++ = 1; in emit_semaphore_chain()
850 *cs++ = MI_NOOP; in emit_semaphore_chain()
851 *cs++ = MI_NOOP; in emit_semaphore_chain()
852 *cs++ = MI_NOOP; in emit_semaphore_chain()
853 *cs++ = MI_NOOP; in emit_semaphore_chain()
856 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; in emit_semaphore_chain()
858 intel_ring_advance(rq, cs); in emit_semaphore_chain()
878 if (rq->engine->emit_init_breadcrumb) in semaphore_queue()
879 err = rq->engine->emit_init_breadcrumb(rq); in semaphore_queue()
902 u32 *cs; in release_queue() local
908 cs = intel_ring_begin(rq, 4); in release_queue()
909 if (IS_ERR(cs)) { in release_queue()
911 return PTR_ERR(cs); in release_queue()
914 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in release_queue()
915 *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); in release_queue()
916 *cs++ = 0; in release_queue()
917 *cs++ = 1; in release_queue()
919 intel_ring_advance(rq, cs); in release_queue()
925 engine->schedule(rq, &attr); in release_queue()
947 for_each_engine(engine, outer->gt, id) { in slice_semaphore_queue()
966 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { in slice_semaphore_queue()
970 intel_gt_set_wedged(outer->gt); in slice_semaphore_queue()
971 err = -EIO; in slice_semaphore_queue()
1000 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in live_timeslice_preempt()
1004 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); in live_timeslice_preempt()
1036 if (igt_flush_test(gt->i915)) { in live_timeslice_preempt()
1037 err = -EIO; in live_timeslice_preempt()
1057 i915_ggtt_offset(ce->engine->status_page.vma) + in create_rewinder()
1060 u32 *cs; in create_rewinder() local
1068 err = i915_request_await_dma_fence(rq, &wait->fence); in create_rewinder()
1073 cs = intel_ring_begin(rq, 14); in create_rewinder()
1074 if (IS_ERR(cs)) { in create_rewinder()
1075 err = PTR_ERR(cs); in create_rewinder()
1079 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; in create_rewinder()
1080 *cs++ = MI_NOOP; in create_rewinder()
1082 *cs++ = MI_SEMAPHORE_WAIT | in create_rewinder()
1086 *cs++ = idx; in create_rewinder()
1087 *cs++ = offset; in create_rewinder()
1088 *cs++ = 0; in create_rewinder()
1090 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; in create_rewinder()
1091 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); in create_rewinder()
1092 *cs++ = offset + idx * sizeof(u32); in create_rewinder()
1093 *cs++ = 0; in create_rewinder()
1095 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in create_rewinder()
1096 *cs++ = offset; in create_rewinder()
1097 *cs++ = 0; in create_rewinder()
1098 *cs++ = idx + 1; in create_rewinder()
1100 intel_ring_advance(rq, cs); in create_rewinder()
1102 rq->sched.attr.priority = I915_PRIORITY_MASK; in create_rewinder()
1133 enum { X = 1, Z, Y }; in live_timeslice_rewind() enumerator
1144 * A:rq1 -- semaphore wait, timestamp X in live_timeslice_rewind()
1145 * A:rq2 -- write timestamp Y in live_timeslice_rewind()
1147 * B:rq1 [await A:rq1] -- write timestamp Z in live_timeslice_rewind()
1155 timeslice = xchg(&engine->props.timeslice_duration_ms, 1); in live_timeslice_rewind()
1157 slot = memset32(engine->status_page.addr + 1000, 0, 4); in live_timeslice_rewind()
1165 rq[A1] = create_rewinder(ce, NULL, slot, X); in live_timeslice_rewind()
1179 engine->name); in live_timeslice_rewind()
1197 engine->name); in live_timeslice_rewind()
1205 del_timer(&engine->execlists.timer); in live_timeslice_rewind()
1206 tasklet_hi_schedule(&engine->execlists.tasklet); in live_timeslice_rewind()
1209 /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ in live_timeslice_rewind()
1227 engine->name, i - 1); in live_timeslice_rewind()
1228 err = -ETIME; in live_timeslice_rewind()
1232 pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]); in live_timeslice_rewind()
1236 if (slot[Z] - slot[X] >= slot[Y] - slot[X]) { in live_timeslice_rewind()
1238 engine->name, in live_timeslice_rewind()
1239 slot[Z] - slot[X], in live_timeslice_rewind()
1240 slot[Y] - slot[X]); in live_timeslice_rewind()
1241 err = -EINVAL; in live_timeslice_rewind()
1245 memset32(&slot[0], -1, 4); in live_timeslice_rewind()
1248 engine->props.timeslice_duration_ms = timeslice; in live_timeslice_rewind()
1252 if (igt_flush_test(gt->i915)) in live_timeslice_rewind()
1253 err = -EIO; in live_timeslice_rewind()
1308 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in live_timeslice_queue()
1312 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); in live_timeslice_queue()
1350 engine->schedule(rq, &attr); in live_timeslice_queue()
1354 engine->name); in live_timeslice_queue()
1368 engine->name); in live_timeslice_queue()
1373 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in live_timeslice_queue()
1384 } while (READ_ONCE(engine->execlists.pending[0])); in live_timeslice_queue()
1389 drm_info_printer(gt->i915->drm.dev); in live_timeslice_queue()
1392 engine->name); in live_timeslice_queue()
1394 "%s\n", engine->name); in live_timeslice_queue()
1397 err = -EIO; in live_timeslice_queue()
1432 return -ENOMEM; in live_timeslice_nopreempt()
1449 timeslice = xchg(&engine->props.timeslice_duration_ms, 1); in live_timeslice_nopreempt()
1465 err = -ETIME; in live_timeslice_nopreempt()
1469 set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); in live_timeslice_nopreempt()
1487 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in live_timeslice_nopreempt()
1497 err = -ETIME; in live_timeslice_nopreempt()
1507 pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n", in live_timeslice_nopreempt()
1508 engine->name); in live_timeslice_nopreempt()
1509 err = -EINVAL; in live_timeslice_nopreempt()
1516 xchg(&engine->props.timeslice_duration_ms, timeslice); in live_timeslice_nopreempt()
1521 if (igt_flush_test(gt->i915)) { in live_timeslice_nopreempt()
1522 err = -EIO; in live_timeslice_nopreempt()
1539 int err = -ENOMEM; in live_busywait_preempt()
1547 ctx_hi = kernel_context(gt->i915); in live_busywait_preempt()
1549 return -ENOMEM; in live_busywait_preempt()
1550 ctx_hi->sched.priority = in live_busywait_preempt()
1553 ctx_lo = kernel_context(gt->i915); in live_busywait_preempt()
1556 ctx_lo->sched.priority = in live_busywait_preempt()
1559 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); in live_busywait_preempt()
1571 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); in live_busywait_preempt()
1588 u32 *cs; in live_busywait_preempt() local
1596 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_busywait_preempt()
1597 err = -EIO; in live_busywait_preempt()
1616 cs = intel_ring_begin(lo, 8); in live_busywait_preempt()
1617 if (IS_ERR(cs)) { in live_busywait_preempt()
1618 err = PTR_ERR(cs); in live_busywait_preempt()
1623 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in live_busywait_preempt()
1624 *cs++ = i915_ggtt_offset(vma); in live_busywait_preempt()
1625 *cs++ = 0; in live_busywait_preempt()
1626 *cs++ = 1; in live_busywait_preempt()
1630 *cs++ = MI_SEMAPHORE_WAIT | in live_busywait_preempt()
1634 *cs++ = 0; in live_busywait_preempt()
1635 *cs++ = i915_ggtt_offset(vma); in live_busywait_preempt()
1636 *cs++ = 0; in live_busywait_preempt()
1638 intel_ring_advance(lo, cs); in live_busywait_preempt()
1645 err = -ETIMEDOUT; in live_busywait_preempt()
1650 if (i915_request_wait(lo, 0, 1) != -ETIME) { in live_busywait_preempt()
1653 engine->name); in live_busywait_preempt()
1654 err = -EIO; in live_busywait_preempt()
1665 cs = intel_ring_begin(hi, 4); in live_busywait_preempt()
1666 if (IS_ERR(cs)) { in live_busywait_preempt()
1667 err = PTR_ERR(cs); in live_busywait_preempt()
1673 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in live_busywait_preempt()
1674 *cs++ = i915_ggtt_offset(vma); in live_busywait_preempt()
1675 *cs++ = 0; in live_busywait_preempt()
1676 *cs++ = 0; in live_busywait_preempt()
1678 intel_ring_advance(hi, cs); in live_busywait_preempt()
1682 struct drm_printer p = drm_info_printer(gt->i915->drm.dev); in live_busywait_preempt()
1685 engine->name); in live_busywait_preempt()
1687 intel_engine_dump(engine, &p, "%s\n", engine->name); in live_busywait_preempt()
1692 err = -EIO; in live_busywait_preempt()
1699 err = -EIO; in live_busywait_preempt()
1727 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); in spinner_create_request()
1743 int err = -ENOMEM; in live_preempt()
1745 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_preempt()
1748 if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) in live_preempt()
1752 return -ENOMEM; in live_preempt()
1757 ctx_hi = kernel_context(gt->i915); in live_preempt()
1760 ctx_hi->sched.priority = in live_preempt()
1763 ctx_lo = kernel_context(gt->i915); in live_preempt()
1766 ctx_lo->sched.priority = in live_preempt()
1776 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_preempt()
1777 err = -EIO; in live_preempt()
1793 err = -EIO; in live_preempt()
1810 err = -EIO; in live_preempt()
1818 err = -EIO; in live_preempt()
1843 int err = -ENOMEM; in live_late_preempt()
1845 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_late_preempt()
1849 return -ENOMEM; in live_late_preempt()
1854 ctx_hi = kernel_context(gt->i915); in live_late_preempt()
1858 ctx_lo = kernel_context(gt->i915); in live_late_preempt()
1863 ctx_lo->sched.priority = I915_USER_PRIORITY(1); in live_late_preempt()
1872 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_late_preempt()
1873 err = -EIO; in live_late_preempt()
1905 engine->schedule(rq, &attr); in live_late_preempt()
1917 err = -EIO; in live_late_preempt()
1937 err = -EIO; in live_late_preempt()
1948 c->ctx = kernel_context(gt->i915); in preempt_client_init()
1949 if (!c->ctx) in preempt_client_init()
1950 return -ENOMEM; in preempt_client_init()
1952 if (igt_spinner_init(&c->spin, gt)) in preempt_client_init()
1958 kernel_context_close(c->ctx); in preempt_client_init()
1959 return -ENOMEM; in preempt_client_init()
1964 igt_spinner_fini(&c->spin); in preempt_client_fini()
1965 kernel_context_close(c->ctx); in preempt_client_fini()
1974 int err = -ENOMEM; in live_nopreempt()
1981 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_nopreempt()
1985 return -ENOMEM; in live_nopreempt()
1988 b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); in live_nopreempt()
1996 engine->execlists.preempt_hang.count = 0; in live_nopreempt()
2007 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags); in live_nopreempt()
2043 if (engine->execlists.preempt_hang.count) { in live_nopreempt()
2044 pr_err("Preemption recorded x%d; should have been suppressed!\n", in live_nopreempt()
2045 engine->execlists.preempt_hang.count); in live_nopreempt()
2046 err = -EINVAL; in live_nopreempt()
2050 if (igt_flush_test(gt->i915)) in live_nopreempt()
2065 err = -EIO; in live_nopreempt()
2081 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); in __cancel_active0()
2082 if (igt_live_test_begin(&t, arg->engine->i915, in __cancel_active0()
2083 __func__, arg->engine->name)) in __cancel_active0()
2084 return -EIO; in __cancel_active0()
2086 rq = spinner_create_request(&arg->a.spin, in __cancel_active0()
2087 arg->a.ctx, arg->engine, in __cancel_active0()
2092 clear_bit(CONTEXT_BANNED, &rq->context->flags); in __cancel_active0()
2095 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { in __cancel_active0()
2096 err = -EIO; in __cancel_active0()
2100 intel_context_set_banned(rq->context); in __cancel_active0()
2101 err = intel_engine_pulse(arg->engine); in __cancel_active0()
2105 err = wait_for_reset(arg->engine, rq, HZ / 2); in __cancel_active0()
2114 err = -EIO; in __cancel_active0()
2125 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); in __cancel_active1()
2126 if (igt_live_test_begin(&t, arg->engine->i915, in __cancel_active1()
2127 __func__, arg->engine->name)) in __cancel_active1()
2128 return -EIO; in __cancel_active1()
2130 rq[0] = spinner_create_request(&arg->a.spin, in __cancel_active1()
2131 arg->a.ctx, arg->engine, in __cancel_active1()
2136 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); in __cancel_active1()
2139 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { in __cancel_active1()
2140 err = -EIO; in __cancel_active1()
2144 rq[1] = spinner_create_request(&arg->b.spin, in __cancel_active1()
2145 arg->b.ctx, arg->engine, in __cancel_active1()
2152 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); in __cancel_active1()
2154 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); in __cancel_active1()
2159 intel_context_set_banned(rq[1]->context); in __cancel_active1()
2160 err = intel_engine_pulse(arg->engine); in __cancel_active1()
2164 igt_spinner_end(&arg->a.spin); in __cancel_active1()
2165 err = wait_for_reset(arg->engine, rq[1], HZ / 2); in __cancel_active1()
2169 if (rq[0]->fence.error != 0) { in __cancel_active1()
2171 err = -EINVAL; in __cancel_active1()
2175 if (rq[1]->fence.error != -EIO) { in __cancel_active1()
2176 pr_err("Cancelled inflight1 request did not report -EIO\n"); in __cancel_active1()
2177 err = -EINVAL; in __cancel_active1()
2185 err = -EIO; in __cancel_active1()
2196 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); in __cancel_queued()
2197 if (igt_live_test_begin(&t, arg->engine->i915, in __cancel_queued()
2198 __func__, arg->engine->name)) in __cancel_queued()
2199 return -EIO; in __cancel_queued()
2201 rq[0] = spinner_create_request(&arg->a.spin, in __cancel_queued()
2202 arg->a.ctx, arg->engine, in __cancel_queued()
2207 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); in __cancel_queued()
2210 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { in __cancel_queued()
2211 err = -EIO; in __cancel_queued()
2215 rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); in __cancel_queued()
2221 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); in __cancel_queued()
2223 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); in __cancel_queued()
2228 rq[2] = spinner_create_request(&arg->b.spin, in __cancel_queued()
2229 arg->a.ctx, arg->engine, in __cancel_queued()
2237 err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); in __cancel_queued()
2242 intel_context_set_banned(rq[2]->context); in __cancel_queued()
2243 err = intel_engine_pulse(arg->engine); in __cancel_queued()
2247 err = wait_for_reset(arg->engine, rq[2], HZ / 2); in __cancel_queued()
2251 if (rq[0]->fence.error != -EIO) { in __cancel_queued()
2252 pr_err("Cancelled inflight0 request did not report -EIO\n"); in __cancel_queued()
2253 err = -EINVAL; in __cancel_queued()
2257 if (rq[1]->fence.error != 0) { in __cancel_queued()
2259 err = -EINVAL; in __cancel_queued()
2263 if (rq[2]->fence.error != -EIO) { in __cancel_queued()
2264 pr_err("Cancelled queued request did not report -EIO\n"); in __cancel_queued()
2265 err = -EINVAL; in __cancel_queued()
2274 err = -EIO; in __cancel_queued()
2283 /* Preempt cancel non-preemptible spinner in ELSP0 */ in __cancel_hostile()
2287 if (!intel_has_reset_engine(arg->engine->gt)) in __cancel_hostile()
2290 GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); in __cancel_hostile()
2291 rq = spinner_create_request(&arg->a.spin, in __cancel_hostile()
2292 arg->a.ctx, arg->engine, in __cancel_hostile()
2297 clear_bit(CONTEXT_BANNED, &rq->context->flags); in __cancel_hostile()
2300 if (!igt_wait_for_spinner(&arg->a.spin, rq)) { in __cancel_hostile()
2301 err = -EIO; in __cancel_hostile()
2305 intel_context_set_banned(rq->context); in __cancel_hostile()
2306 err = intel_engine_pulse(arg->engine); /* force reset */ in __cancel_hostile()
2310 err = wait_for_reset(arg->engine, rq, HZ / 2); in __cancel_hostile()
2318 if (igt_flush_test(arg->engine->i915)) in __cancel_hostile()
2319 err = -EIO; in __cancel_hostile()
2328 int err = -ENOMEM; in live_preempt_cancel()
2335 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_preempt_cancel()
2339 return -ENOMEM; in live_preempt_cancel()
2388 int err = -ENOMEM; in live_suppress_self_preempt()
2392 * the current execution order, the preempt-to-idle injection is in live_suppress_self_preempt()
2393 * skipped and that we do not accidentally apply it after the CS in live_suppress_self_preempt()
2397 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_suppress_self_preempt()
2400 if (intel_uc_uses_guc_submission(>->uc)) in live_suppress_self_preempt()
2403 if (intel_vgpu_active(gt->i915)) in live_suppress_self_preempt()
2407 return -ENOMEM; in live_suppress_self_preempt()
2418 if (igt_flush_test(gt->i915)) in live_suppress_self_preempt()
2422 engine->execlists.preempt_hang.count = 0; in live_suppress_self_preempt()
2441 mod_timer(&engine->execlists.timer, jiffies + HZ); in live_suppress_self_preempt()
2454 engine->schedule(rq_a, &attr); in live_suppress_self_preempt()
2468 if (engine->execlists.preempt_hang.count) { in live_suppress_self_preempt()
2469 pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n", in live_suppress_self_preempt()
2470 engine->name, in live_suppress_self_preempt()
2471 engine->execlists.preempt_hang.count, in live_suppress_self_preempt()
2474 err = -EINVAL; in live_suppress_self_preempt()
2479 if (igt_flush_test(gt->i915)) in live_suppress_self_preempt()
2494 err = -EIO; in live_suppress_self_preempt()
2504 int err = -ENOMEM; in live_chain_preempt()
2512 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_chain_preempt()
2516 return -ENOMEM; in live_chain_preempt()
2541 ring_size = rq->wa_tail - rq->head; in live_chain_preempt()
2543 ring_size += rq->ring->size; in live_chain_preempt()
2544 ring_size = rq->ring->size / ring_size; in live_chain_preempt()
2546 __func__, engine->name, ring_size); in live_chain_preempt()
2550 pr_err("Timed out waiting to flush %s\n", engine->name); in live_chain_preempt()
2556 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_chain_preempt()
2557 err = -EIO; in live_chain_preempt()
2591 engine->schedule(rq, &attr); in live_chain_preempt()
2596 drm_info_printer(gt->i915->drm.dev); in live_chain_preempt()
2601 "%s\n", engine->name); in live_chain_preempt()
2617 drm_info_printer(gt->i915->drm.dev); in live_chain_preempt()
2622 "%s\n", engine->name); in live_chain_preempt()
2631 err = -EIO; in live_chain_preempt()
2647 err = -EIO; in live_chain_preempt()
2658 u32 *cs; in create_gang() local
2665 obj = i915_gem_object_create_internal(engine->i915, 4096); in create_gang()
2671 vma = i915_vma_instance(obj, ce->vm, NULL); in create_gang()
2681 cs = i915_gem_object_pin_map(obj, I915_MAP_WC); in create_gang()
2682 if (IS_ERR(cs)) in create_gang()
2686 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; in create_gang()
2688 *cs++ = MI_SEMAPHORE_WAIT | in create_gang()
2691 *cs++ = 0; in create_gang()
2692 *cs++ = lower_32_bits(vma->node.start); in create_gang()
2693 *cs++ = upper_32_bits(vma->node.start); in create_gang()
2696 u64 offset = (*prev)->batch->node.start; in create_gang()
2699 *cs++ = MI_STORE_DWORD_IMM_GEN4; in create_gang()
2700 *cs++ = lower_32_bits(offset); in create_gang()
2701 *cs++ = upper_32_bits(offset); in create_gang()
2702 *cs++ = 0; in create_gang()
2705 *cs++ = MI_BATCH_BUFFER_END; in create_gang()
2713 rq->batch = i915_vma_get(vma); in create_gang()
2717 err = i915_request_await_object(rq, vma->obj, false); in create_gang()
2721 err = rq->engine->emit_bb_start(rq, in create_gang()
2722 vma->node.start, in create_gang()
2732 rq->mock.link.next = &(*prev)->mock.link; in create_gang()
2737 i915_vma_put(rq->batch); in create_gang()
2756 if (igt_live_test_begin(&t, engine->i915, __func__, engine->name)) in __live_preempt_ring()
2757 return -EIO; in __live_preempt_ring()
2768 tmp->ring = __intel_context_ring_size(ring_sz); in __live_preempt_ring()
2776 memset32(tmp->ring->vaddr, in __live_preempt_ring()
2778 tmp->ring->vma->size / sizeof(u32)); in __live_preempt_ring()
2790 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in __live_preempt_ring()
2794 intel_gt_set_wedged(engine->gt); in __live_preempt_ring()
2796 err = -ETIME; in __live_preempt_ring()
2802 while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) { in __live_preempt_ring()
2817 pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", in __live_preempt_ring()
2818 engine->name, queue_sz, n, in __live_preempt_ring()
2819 ce[0]->ring->size, in __live_preempt_ring()
2820 ce[0]->ring->tail, in __live_preempt_ring()
2821 ce[0]->ring->emit, in __live_preempt_ring()
2822 rq->tail); in __live_preempt_ring()
2832 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in __live_preempt_ring()
2840 engine->name); in __live_preempt_ring()
2841 err = -ETIME; in __live_preempt_ring()
2844 pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", in __live_preempt_ring()
2845 engine->name, in __live_preempt_ring()
2846 ce[0]->ring->tail, ce[0]->ring->emit, in __live_preempt_ring()
2847 ce[1]->ring->tail, ce[1]->ring->emit); in __live_preempt_ring()
2860 err = -EIO; in __live_preempt_ring()
2879 return -ENOMEM; in live_preempt_ring()
2914 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_preempt_gang()
2933 u32 *cs; in live_preempt_gang() local
2938 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) in live_preempt_gang()
2939 return -EIO; in live_preempt_gang()
2951 engine->schedule(rq, &attr); in live_preempt_gang()
2955 engine->name, prio); in live_preempt_gang()
2963 cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC); in live_preempt_gang()
2964 if (!IS_ERR(cs)) { in live_preempt_gang()
2965 *cs = 0; in live_preempt_gang()
2966 i915_gem_object_unpin_map(rq->batch->obj); in live_preempt_gang()
2968 err = PTR_ERR(cs); in live_preempt_gang()
2977 drm_info_printer(engine->i915->drm.dev); in live_preempt_gang()
2982 "%s\n", engine->name); in live_preempt_gang()
2984 err = -ETIME; in live_preempt_gang()
2987 i915_vma_put(rq->batch); in live_preempt_gang()
2993 err = -EIO; in live_preempt_gang()
3008 u32 *cs; in create_gpr_user() local
3012 obj = i915_gem_object_create_internal(engine->i915, 4096); in create_gpr_user()
3016 vma = i915_vma_instance(obj, result->vm, NULL); in create_gpr_user()
3028 cs = i915_gem_object_pin_map(obj, I915_MAP_WC); in create_gpr_user()
3029 if (IS_ERR(cs)) { in create_gpr_user()
3031 return ERR_CAST(cs); in create_gpr_user()
3035 *cs++ = MI_LOAD_REGISTER_IMM(1); in create_gpr_user()
3036 *cs++ = CS_GPR(engine, 0); in create_gpr_user()
3037 *cs++ = 1; in create_gpr_user()
3049 *cs++ = MI_MATH(4); in create_gpr_user()
3050 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i)); in create_gpr_user()
3051 *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0)); in create_gpr_user()
3052 *cs++ = MI_MATH_ADD; in create_gpr_user()
3053 *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU); in create_gpr_user()
3055 addr = result->node.start + offset + i * sizeof(*cs); in create_gpr_user()
3056 *cs++ = MI_STORE_REGISTER_MEM_GEN8; in create_gpr_user()
3057 *cs++ = CS_GPR(engine, 2 * i); in create_gpr_user()
3058 *cs++ = lower_32_bits(addr); in create_gpr_user()
3059 *cs++ = upper_32_bits(addr); in create_gpr_user()
3061 *cs++ = MI_SEMAPHORE_WAIT | in create_gpr_user()
3064 *cs++ = i; in create_gpr_user()
3065 *cs++ = lower_32_bits(result->node.start); in create_gpr_user()
3066 *cs++ = upper_32_bits(result->node.start); in create_gpr_user()
3069 *cs++ = MI_BATCH_BUFFER_END; in create_gpr_user()
3082 obj = i915_gem_object_create_internal(gt->i915, sz); in create_global()
3086 vma = i915_vma_instance(obj, >->ggtt->vm, NULL); in create_global()
3115 vma = i915_vma_instance(global->obj, ce->vm, NULL); in create_gpr_client()
3138 err = i915_request_await_object(rq, vma->obj, false); in create_gpr_client()
3145 err = i915_request_await_object(rq, batch->obj, false); in create_gpr_client()
3149 err = rq->engine->emit_bb_start(rq, in create_gpr_client()
3150 batch->node.start, in create_gpr_client()
3177 u32 *cs; in preempt_user() local
3183 cs = intel_ring_begin(rq, 4); in preempt_user()
3184 if (IS_ERR(cs)) { in preempt_user()
3186 return PTR_ERR(cs); in preempt_user()
3189 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in preempt_user()
3190 *cs++ = i915_ggtt_offset(global); in preempt_user()
3191 *cs++ = 0; in preempt_user()
3192 *cs++ = id; in preempt_user()
3194 intel_ring_advance(rq, cs); in preempt_user()
3199 engine->schedule(rq, &attr); in preempt_user()
3202 err = -ETIME; in preempt_user()
3217 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_preempt_user()
3238 result = i915_gem_object_pin_map(global->obj, I915_MAP_WC); in live_preempt_user()
3252 if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS) in live_preempt_user()
3253 continue; /* we need per-context GPR */ in live_preempt_user()
3255 if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { in live_preempt_user()
3256 err = -EIO; in live_preempt_user()
3282 engine->name); in live_preempt_user()
3283 err = -EIO; in live_preempt_user()
3291 err = -ETIME; in live_preempt_user()
3298 engine->name, in live_preempt_user()
3300 err = -EINVAL; in live_preempt_user()
3315 smp_store_mb(result[0], -1); in live_preempt_user()
3317 err = -EIO; in live_preempt_user()
3333 int err = -ENOMEM; in live_preempt_timeout()
3342 if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) in live_preempt_timeout()
3349 return -ENOMEM; in live_preempt_timeout()
3351 ctx_hi = kernel_context(gt->i915); in live_preempt_timeout()
3354 ctx_hi->sched.priority = in live_preempt_timeout()
3357 ctx_lo = kernel_context(gt->i915); in live_preempt_timeout()
3360 ctx_lo->sched.priority = in live_preempt_timeout()
3380 err = -EIO; in live_preempt_timeout()
3391 /* Flush the previous CS ack before changing timeouts */ in live_preempt_timeout()
3392 while (READ_ONCE(engine->execlists.pending[0])) in live_preempt_timeout()
3395 saved_timeout = engine->props.preempt_timeout_ms; in live_preempt_timeout()
3396 engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ in live_preempt_timeout()
3402 engine->props.preempt_timeout_ms = saved_timeout; in live_preempt_timeout()
3407 err = -ETIME; in live_preempt_timeout()
3427 return i915_prandom_u32_max_state(max - min, rnd) + min; in random_range()
3447 return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, in smoke_context()
3448 &smoke->prng)]; in smoke_context()
3473 ctx->sched.priority = prio; in smoke_submit()
3475 rq = igt_request_alloc(ctx, smoke->engine); in smoke_submit()
3483 err = i915_request_await_object(rq, vma->obj, false); in smoke_submit()
3487 err = rq->engine->emit_bb_start(rq, in smoke_submit()
3488 vma->node.start, in smoke_submit()
3515 smoke->batch); in smoke_crescendo_thread()
3520 } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); in smoke_crescendo_thread()
3522 smoke->count = count; in smoke_crescendo_thread()
3536 for_each_engine(engine, smoke->gt, id) { in smoke_crescendo()
3555 for_each_engine(engine, smoke->gt, id) { in smoke_crescendo()
3570 pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", in smoke_crescendo()
3571 count, flags, smoke->gt->info.num_engines, smoke->ncontext); in smoke_crescendo()
3583 for_each_engine(smoke->engine, smoke->gt, id) { in smoke_random()
3588 ctx, random_priority(&smoke->prng), in smoke_random()
3589 flags & BATCH ? smoke->batch : NULL); in smoke_random()
3595 } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); in smoke_random()
3597 pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", in smoke_random()
3598 count, flags, smoke->gt->info.num_engines, smoke->ncontext); in smoke_random()
3611 int err = -ENOMEM; in live_preempt_smoke()
3612 u32 *cs; in live_preempt_smoke() local
3615 if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915)) in live_preempt_smoke()
3622 return -ENOMEM; in live_preempt_smoke()
3625 i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE); in live_preempt_smoke()
3631 cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); in live_preempt_smoke()
3632 if (IS_ERR(cs)) { in live_preempt_smoke()
3633 err = PTR_ERR(cs); in live_preempt_smoke()
3636 for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) in live_preempt_smoke()
3637 cs[n] = MI_ARB_CHECK; in live_preempt_smoke()
3638 cs[n] = MI_BATCH_BUFFER_END; in live_preempt_smoke()
3642 if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) { in live_preempt_smoke()
3643 err = -EIO; in live_preempt_smoke()
3648 smoke.contexts[n] = kernel_context(smoke.gt->i915); in live_preempt_smoke()
3665 err = -EIO; in live_preempt_smoke()
3714 err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name); in nop_virtual_engine()
3760 __func__, ve[0]->engine->name, in nop_virtual_engine()
3761 request[nc]->fence.context, in nop_virtual_engine()
3762 request[nc]->fence.seqno); in nop_virtual_engine()
3765 __func__, ve[0]->engine->name, in nop_virtual_engine()
3766 request[nc]->fence.context, in nop_virtual_engine()
3767 request[nc]->fence.seqno); in nop_virtual_engine()
3792 nctx, ve[0]->engine->name, ktime_to_ns(times[0]), in nop_virtual_engine()
3796 if (igt_flush_test(gt->i915)) in nop_virtual_engine()
3797 err = -EIO; in nop_virtual_engine()
3817 if (!gt->engine_class[class][inst]) in __select_siblings()
3820 if (filter && !filter(gt->engine_class[class][inst])) in __select_siblings()
3823 siblings[n++] = gt->engine_class[class][inst]; in __select_siblings()
3846 if (intel_uc_uses_guc_submission(>->uc)) in live_virtual_engine()
3853 engine->name, err); in live_virtual_engine()
3905 err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); in mask_virtual_engine()
3918 request[n]->execution_mask = siblings[nsibling - n - 1]->mask; in mask_virtual_engine()
3927 __func__, ve->engine->name, in mask_virtual_engine()
3928 request[n]->fence.context, in mask_virtual_engine()
3929 request[n]->fence.seqno); in mask_virtual_engine()
3932 __func__, ve->engine->name, in mask_virtual_engine()
3933 request[n]->fence.context, in mask_virtual_engine()
3934 request[n]->fence.seqno); in mask_virtual_engine()
3937 err = -EIO; in mask_virtual_engine()
3941 if (request[n]->engine != siblings[nsibling - n - 1]) { in mask_virtual_engine()
3943 request[n]->engine->name, in mask_virtual_engine()
3944 siblings[nsibling - n - 1]->name); in mask_virtual_engine()
3945 err = -EINVAL; in mask_virtual_engine()
3952 if (igt_flush_test(gt->i915)) in mask_virtual_engine()
3953 err = -EIO; in mask_virtual_engine()
3973 if (intel_uc_uses_guc_submission(>->uc)) in live_virtual_mask()
4007 return -ENOMEM; in slicein_virtual_engine()
4043 __func__, rq->engine->name); in slicein_virtual_engine()
4046 err = -EIO; in slicein_virtual_engine()
4052 if (igt_flush_test(gt->i915)) in slicein_virtual_engine()
4053 err = -EIO; in slicein_virtual_engine()
4074 return -ENOMEM; in sliceout_virtual_engine()
4112 __func__, siblings[n]->name); in sliceout_virtual_engine()
4115 err = -EIO; in sliceout_virtual_engine()
4122 if (igt_flush_test(gt->i915)) in sliceout_virtual_engine()
4123 err = -EIO; in sliceout_virtual_engine()
4135 if (intel_uc_uses_guc_submission(>->uc)) in live_virtual_slice()
4168 u32 *cs; in preserved_virtual_engine() local
4170 scratch = create_scratch(siblings[0]->gt); in preserved_virtual_engine()
4188 err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); in preserved_virtual_engine()
4205 cs = intel_ring_begin(rq, 8); in preserved_virtual_engine()
4206 if (IS_ERR(cs)) { in preserved_virtual_engine()
4208 err = PTR_ERR(cs); in preserved_virtual_engine()
4212 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; in preserved_virtual_engine()
4213 *cs++ = CS_GPR(engine, n); in preserved_virtual_engine()
4214 *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); in preserved_virtual_engine()
4215 *cs++ = 0; in preserved_virtual_engine()
4217 *cs++ = MI_LOAD_REGISTER_IMM(1); in preserved_virtual_engine()
4218 *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW); in preserved_virtual_engine()
4219 *cs++ = n + 1; in preserved_virtual_engine()
4221 *cs++ = MI_NOOP; in preserved_virtual_engine()
4222 intel_ring_advance(rq, cs); in preserved_virtual_engine()
4225 rq->execution_mask = engine->mask; in preserved_virtual_engine()
4230 err = -ETIME; in preserved_virtual_engine()
4234 cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); in preserved_virtual_engine()
4235 if (IS_ERR(cs)) { in preserved_virtual_engine()
4236 err = PTR_ERR(cs); in preserved_virtual_engine()
4241 if (cs[n] != n) { in preserved_virtual_engine()
4243 cs[n], n); in preserved_virtual_engine()
4244 err = -EINVAL; in preserved_virtual_engine()
4249 i915_gem_object_unpin_map(scratch->obj); in preserved_virtual_engine()
4253 err = -EIO; in preserved_virtual_engine()
4271 * Check that the context image retains non-privileged (user) registers in live_virtual_preserved()
4276 if (intel_uc_uses_guc_submission(>->uc)) in live_virtual_preserved()
4280 if (INTEL_GEN(gt->i915) < 9) in live_virtual_preserved()
4314 * across a number of engines. We use one request per-engine in bond_virtual_engine()
4332 * With the submit-fence, we have identified three possible phases in bond_virtual_engine()
4344 * it imposes no constraint, and is effectively a no-op, we do not in bond_virtual_engine()
4351 GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); in bond_virtual_engine()
4354 return -ENOMEM; in bond_virtual_engine()
4357 rq[0] = ERR_PTR(-ENOMEM); in bond_virtual_engine()
4362 if (master->class == class) in bond_virtual_engine()
4371 memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); in bond_virtual_engine()
4383 err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit, in bond_virtual_engine()
4394 err = -EIO; in bond_virtual_engine()
4408 err = intel_virtual_engine_attach_bond(ve->engine, in bond_virtual_engine()
4434 &rq[0]->fence, in bond_virtual_engine()
4435 ve->engine->bond_execute); in bond_virtual_engine()
4448 rq[0]->engine->name); in bond_virtual_engine()
4449 err = -EIO; in bond_virtual_engine()
4456 err = -EIO; in bond_virtual_engine()
4460 if (rq[n + 1]->engine != siblings[n]) { in bond_virtual_engine()
4462 siblings[n]->name, in bond_virtual_engine()
4463 rq[n + 1]->engine->name, in bond_virtual_engine()
4464 rq[0]->engine->name); in bond_virtual_engine()
4465 err = -EINVAL; in bond_virtual_engine()
4472 rq[0] = ERR_PTR(-ENOMEM); in bond_virtual_engine()
4478 if (igt_flush_test(gt->i915)) in bond_virtual_engine()
4479 err = -EIO; in bond_virtual_engine()
4500 if (intel_uc_uses_guc_submission(>->uc)) in live_virtual_bond()
4511 for (p = phases; p->name; p++) { in live_virtual_bond()
4514 p->flags); in live_virtual_bond()
4517 __func__, p->name, class, nsibling, err); in live_virtual_bond()
4544 return -ENOMEM; in reset_virtual_engine()
4564 err = -ETIME; in reset_virtual_engine()
4568 engine = rq->engine; in reset_virtual_engine()
4569 GEM_BUG_ON(engine == ve->engine); in reset_virtual_engine()
4572 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, in reset_virtual_engine()
4573 >->reset.flags)) { in reset_virtual_engine()
4575 err = -EBUSY; in reset_virtual_engine()
4578 tasklet_disable(&engine->execlists.tasklet); in reset_virtual_engine()
4580 engine->execlists.tasklet.func(engine->execlists.tasklet.data); in reset_virtual_engine()
4581 GEM_BUG_ON(execlists_active(&engine->execlists) != rq); in reset_virtual_engine()
4584 spin_lock_irq(&engine->active.lock); in reset_virtual_engine()
4586 spin_unlock_irq(&engine->active.lock); in reset_virtual_engine()
4587 GEM_BUG_ON(rq->engine != ve->engine); in reset_virtual_engine()
4594 GEM_BUG_ON(rq->fence.error != -EIO); in reset_virtual_engine()
4596 /* Release our grasp on the engine, letting CS flow again */ in reset_virtual_engine()
4597 tasklet_enable(&engine->execlists.tasklet); in reset_virtual_engine()
4598 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, >->reset.flags); in reset_virtual_engine()
4604 engine->name); in reset_virtual_engine()
4606 err = -EIO; in reset_virtual_engine()
4615 engine->name); in reset_virtual_engine()
4617 err = -ETIME; in reset_virtual_engine()
4645 if (intel_uc_uses_guc_submission(>->uc)) in live_virtual_reset()
4703 if (intel_gt_is_wedged(&i915->gt)) in intel_execlists_live_selftests()
4706 return intel_gt_live_subtests(tests, &i915->gt); in intel_execlists_live_selftests()
4712 i915_ggtt_offset(ce->engine->status_page.vma) + in emit_semaphore_signal()
4715 u32 *cs; in emit_semaphore_signal() local
4721 cs = intel_ring_begin(rq, 4); in emit_semaphore_signal()
4722 if (IS_ERR(cs)) { in emit_semaphore_signal()
4724 return PTR_ERR(cs); in emit_semaphore_signal()
4727 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in emit_semaphore_signal()
4728 *cs++ = offset; in emit_semaphore_signal()
4729 *cs++ = 0; in emit_semaphore_signal()
4730 *cs++ = 1; in emit_semaphore_signal()
4732 intel_ring_advance(rq, cs); in emit_semaphore_signal()
4734 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in emit_semaphore_signal()
4745 rq = intel_engine_create_kernel_request(ce->engine); in context_flush()
4749 fence = i915_active_fence_get(&ce->timeline->last_request); in context_flush()
4758 err = -ETIME; in context_flush()
4780 return -ENOMEM; in live_lrc_layout()
4787 if (!engine->default_state) in live_lrc_layout()
4790 hw = shmem_pin_map(engine->default_state); in live_lrc_layout()
4798 engine->kernel_context, in live_lrc_layout()
4800 engine->kernel_context->ring, in live_lrc_layout()
4813 pr_debug("%s: skipped instruction %x at dword %d\n", in live_lrc_layout()
4814 engine->name, lri, dw); in live_lrc_layout()
4820 pr_err("%s: Expected LRI command at dword %d, found %08x\n", in live_lrc_layout()
4821 engine->name, dw, lri); in live_lrc_layout()
4822 err = -EINVAL; in live_lrc_layout()
4827 pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n", in live_lrc_layout()
4828 engine->name, dw, lri, lrc[dw]); in live_lrc_layout()
4829 err = -EINVAL; in live_lrc_layout()
4839 pr_err("%s: Different registers found at dword %d, expected %x, found %x\n", in live_lrc_layout()
4840 engine->name, dw, hw[dw], lrc[dw]); in live_lrc_layout()
4841 err = -EINVAL; in live_lrc_layout()
4850 lri -= 2; in live_lrc_layout()
4855 pr_info("%s: HW register image:\n", engine->name); in live_lrc_layout()
4858 pr_info("%s: SW register image:\n", engine->name); in live_lrc_layout()
4862 shmem_unpin_map(engine->default_state, hw); in live_lrc_layout()
4879 return -1; in find_offset()
4901 i915_mmio_reg_offset(RING_START(engine->mmio_base)), in live_lrc_fixed()
4902 CTX_RING_START - 1, in live_lrc_fixed()
4906 i915_mmio_reg_offset(RING_CTL(engine->mmio_base)), in live_lrc_fixed()
4907 CTX_RING_CTL - 1, in live_lrc_fixed()
4911 i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)), in live_lrc_fixed()
4912 CTX_RING_HEAD - 1, in live_lrc_fixed()
4916 i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)), in live_lrc_fixed()
4917 CTX_RING_TAIL - 1, in live_lrc_fixed()
4921 i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)), in live_lrc_fixed()
4926 i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)), in live_lrc_fixed()
4927 CTX_BB_STATE - 1, in live_lrc_fixed()
4931 i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(engine->mmio_base)), in live_lrc_fixed()
4936 i915_mmio_reg_offset(RING_INDIRECT_CTX(engine->mmio_base)), in live_lrc_fixed()
4941 i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(engine->mmio_base)), in live_lrc_fixed()
4946 i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)), in live_lrc_fixed()
4947 CTX_TIMESTAMP - 1, in live_lrc_fixed()
4951 i915_mmio_reg_offset(GEN8_RING_CS_GPR(engine->mmio_base, 0)), in live_lrc_fixed()
4956 i915_mmio_reg_offset(RING_CMD_BUF_CCTL(engine->mmio_base)), in live_lrc_fixed()
4964 if (!engine->default_state) in live_lrc_fixed()
4967 hw = shmem_pin_map(engine->default_state); in live_lrc_fixed()
4974 for (t = tbl; t->name; t++) { in live_lrc_fixed()
4975 int dw = find_offset(hw, t->reg); in live_lrc_fixed()
4977 if (dw != t->offset) { in live_lrc_fixed()
4978 pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n", in live_lrc_fixed()
4979 engine->name, in live_lrc_fixed()
4980 t->name, in live_lrc_fixed()
4981 t->reg, in live_lrc_fixed()
4983 t->offset); in live_lrc_fixed()
4984 err = -EINVAL; in live_lrc_fixed()
4988 shmem_unpin_map(engine->default_state, hw); in live_lrc_fixed()
5006 u32 *cs; in __live_lrc_state() local
5016 err = i915_gem_object_lock(scratch->obj, &ww); in __live_lrc_state()
5028 cs = intel_ring_begin(rq, 4 * MAX_IDX); in __live_lrc_state()
5029 if (IS_ERR(cs)) { in __live_lrc_state()
5030 err = PTR_ERR(cs); in __live_lrc_state()
5035 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; in __live_lrc_state()
5036 *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base)); in __live_lrc_state()
5037 *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32); in __live_lrc_state()
5038 *cs++ = 0; in __live_lrc_state()
5040 expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma); in __live_lrc_state()
5042 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; in __live_lrc_state()
5043 *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)); in __live_lrc_state()
5044 *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32); in __live_lrc_state()
5045 *cs++ = 0; in __live_lrc_state()
5047 err = i915_request_await_object(rq, scratch->obj, true); in __live_lrc_state()
5057 expected[RING_TAIL_IDX] = ce->ring->tail; in __live_lrc_state()
5060 err = -ETIME; in __live_lrc_state()
5064 cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); in __live_lrc_state()
5065 if (IS_ERR(cs)) { in __live_lrc_state()
5066 err = PTR_ERR(cs); in __live_lrc_state()
5071 if (cs[n] != expected[n]) { in __live_lrc_state()
5072 pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n", in __live_lrc_state()
5073 engine->name, n, cs[n], expected[n]); in __live_lrc_state()
5074 err = -EINVAL; in __live_lrc_state()
5079 i915_gem_object_unpin_map(scratch->obj); in __live_lrc_state()
5086 if (err == -EDEADLK) { in __live_lrc_state()
5119 if (igt_flush_test(gt->i915)) in live_lrc_state()
5120 err = -EIO; in live_lrc_state()
5129 u32 *cs; in gpr_make_dirty() local
5136 cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2); in gpr_make_dirty()
5137 if (IS_ERR(cs)) { in gpr_make_dirty()
5139 return PTR_ERR(cs); in gpr_make_dirty()
5142 *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW); in gpr_make_dirty()
5144 *cs++ = CS_GPR(ce->engine, n); in gpr_make_dirty()
5145 *cs++ = STACK_MAGIC; in gpr_make_dirty()
5147 *cs++ = MI_NOOP; in gpr_make_dirty()
5149 intel_ring_advance(rq, cs); in gpr_make_dirty()
5151 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in gpr_make_dirty()
5161 i915_ggtt_offset(ce->engine->status_page.vma) + in __gpr_read()
5164 u32 *cs; in __gpr_read() local
5172 cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW); in __gpr_read()
5173 if (IS_ERR(cs)) { in __gpr_read()
5175 return ERR_CAST(cs); in __gpr_read()
5178 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; in __gpr_read()
5179 *cs++ = MI_NOOP; in __gpr_read()
5181 *cs++ = MI_SEMAPHORE_WAIT | in __gpr_read()
5185 *cs++ = 0; in __gpr_read()
5186 *cs++ = offset; in __gpr_read()
5187 *cs++ = 0; in __gpr_read()
5190 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; in __gpr_read()
5191 *cs++ = CS_GPR(ce->engine, n); in __gpr_read()
5192 *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); in __gpr_read()
5193 *cs++ = 0; in __gpr_read()
5197 err = i915_request_await_object(rq, scratch->obj, true); in __gpr_read()
5216 u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4); in __live_lrc_gpr()
5219 u32 *cs; in __live_lrc_gpr() local
5223 if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS) in __live_lrc_gpr()
5226 err = gpr_make_dirty(engine->kernel_context); in __live_lrc_gpr()
5245 err = gpr_make_dirty(engine->kernel_context); in __live_lrc_gpr()
5249 err = emit_semaphore_signal(engine->kernel_context, slot); in __live_lrc_gpr()
5258 err = -ETIME; in __live_lrc_gpr()
5262 cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); in __live_lrc_gpr()
5263 if (IS_ERR(cs)) { in __live_lrc_gpr()
5264 err = PTR_ERR(cs); in __live_lrc_gpr()
5269 if (cs[n]) { in __live_lrc_gpr()
5270 pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n", in __live_lrc_gpr()
5271 engine->name, in __live_lrc_gpr()
5273 cs[n]); in __live_lrc_gpr()
5274 err = -EINVAL; in __live_lrc_gpr()
5279 i915_gem_object_unpin_map(scratch->obj); in __live_lrc_gpr()
5282 memset32(&slot[0], -1, 4); in __live_lrc_gpr()
5320 if (igt_flush_test(gt->i915)) in live_lrc_gpr()
5321 err = -EIO; in live_lrc_gpr()
5334 i915_ggtt_offset(ce->engine->status_page.vma) + in create_timestamp()
5337 u32 *cs; in create_timestamp() local
5344 cs = intel_ring_begin(rq, 10); in create_timestamp()
5345 if (IS_ERR(cs)) { in create_timestamp()
5346 err = PTR_ERR(cs); in create_timestamp()
5350 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; in create_timestamp()
5351 *cs++ = MI_NOOP; in create_timestamp()
5353 *cs++ = MI_SEMAPHORE_WAIT | in create_timestamp()
5357 *cs++ = 0; in create_timestamp()
5358 *cs++ = offset; in create_timestamp()
5359 *cs++ = 0; in create_timestamp()
5361 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; in create_timestamp()
5362 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base)); in create_timestamp()
5363 *cs++ = offset + idx * sizeof(u32); in create_timestamp()
5364 *cs++ = 0; in create_timestamp()
5366 intel_ring_advance(rq, cs); in create_timestamp()
5368 rq->sched.attr.priority = I915_PRIORITY_MASK; in create_timestamp()
5389 return (s32)(end - start) > 0; in timestamp_advanced()
5394 u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4); in __lrc_timestamp()
5399 arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison; in __lrc_timestamp()
5400 rq = create_timestamp(arg->ce[0], slot, 1); in __lrc_timestamp()
5404 err = wait_for_submit(rq->engine, rq, HZ / 2); in __lrc_timestamp()
5409 arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef; in __lrc_timestamp()
5410 err = emit_semaphore_signal(arg->ce[1], slot); in __lrc_timestamp()
5419 err = context_flush(arg->ce[0], HZ / 2); in __lrc_timestamp()
5423 if (!timestamp_advanced(arg->poison, slot[1])) { in __lrc_timestamp()
5424 pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n", in __lrc_timestamp()
5425 arg->engine->name, preempt ? "preempt" : "simple", in __lrc_timestamp()
5426 arg->poison, slot[1]); in __lrc_timestamp()
5427 err = -EINVAL; in __lrc_timestamp()
5430 timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]); in __lrc_timestamp()
5432 pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n", in __lrc_timestamp()
5433 arg->engine->name, preempt ? "preempt" : "simple", in __lrc_timestamp()
5435 err = -EINVAL; in __lrc_timestamp()
5439 memset32(slot, -1, 4); in __lrc_timestamp()
5510 if (igt_flush_test(gt->i915)) in live_lrc_timestamp()
5511 err = -EIO; in live_lrc_timestamp()
5526 obj = i915_gem_object_create_internal(vm->i915, size); in create_user_vma()
5549 u32 dw, x, *cs, *hw; in store_context() local
5552 batch = create_user_vma(ce->vm, SZ_64K); in store_context()
5556 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in store_context()
5557 if (IS_ERR(cs)) { in store_context()
5559 return ERR_CAST(cs); in store_context()
5562 defaults = shmem_pin_map(ce->engine->default_state); in store_context()
5564 i915_gem_object_unpin_map(batch->obj); in store_context()
5566 return ERR_PTR(-ENOMEM); in store_context()
5569 x = 0; in store_context()
5588 while (len--) { in store_context()
5589 *cs++ = MI_STORE_REGISTER_MEM_GEN8; in store_context()
5590 *cs++ = hw[dw]; in store_context()
5591 *cs++ = lower_32_bits(scratch->node.start + x); in store_context()
5592 *cs++ = upper_32_bits(scratch->node.start + x); in store_context()
5595 x += 4; in store_context()
5600 *cs++ = MI_BATCH_BUFFER_END; in store_context()
5602 shmem_unpin_map(ce->engine->default_state, defaults); in store_context()
5604 i915_gem_object_flush_map(batch->obj); in store_context()
5605 i915_gem_object_unpin_map(batch->obj); in store_context()
5617 err = i915_request_await_object(rq, vma->obj, flags); in move_to_active()
5633 u32 *cs; in record_registers() local
5666 cs = intel_ring_begin(rq, 14); in record_registers()
5667 if (IS_ERR(cs)) { in record_registers()
5668 err = PTR_ERR(cs); in record_registers()
5672 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; in record_registers()
5673 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8); in record_registers()
5674 *cs++ = lower_32_bits(b_before->node.start); in record_registers()
5675 *cs++ = upper_32_bits(b_before->node.start); in record_registers()
5677 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; in record_registers()
5678 *cs++ = MI_SEMAPHORE_WAIT | in record_registers()
5682 *cs++ = 0; in record_registers()
5683 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + in record_registers()
5685 *cs++ = 0; in record_registers()
5686 *cs++ = MI_NOOP; in record_registers()
5688 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; in record_registers()
5689 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8); in record_registers()
5690 *cs++ = lower_32_bits(b_after->node.start); in record_registers()
5691 *cs++ = upper_32_bits(b_after->node.start); in record_registers()
5693 intel_ring_advance(rq, cs); in record_registers()
5713 u32 dw, *cs, *hw; in load_context() local
5716 batch = create_user_vma(ce->vm, SZ_64K); in load_context()
5720 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); in load_context()
5721 if (IS_ERR(cs)) { in load_context()
5723 return ERR_CAST(cs); in load_context()
5726 defaults = shmem_pin_map(ce->engine->default_state); in load_context()
5728 i915_gem_object_unpin_map(batch->obj); in load_context()
5730 return ERR_PTR(-ENOMEM); in load_context()
5751 *cs++ = MI_LOAD_REGISTER_IMM(len); in load_context()
5752 while (len--) { in load_context()
5753 *cs++ = hw[dw]; in load_context()
5754 *cs++ = poison; in load_context()
5760 *cs++ = MI_BATCH_BUFFER_END; in load_context()
5762 shmem_unpin_map(ce->engine->default_state, defaults); in load_context()
5764 i915_gem_object_flush_map(batch->obj); in load_context()
5765 i915_gem_object_unpin_map(batch->obj); in load_context()
5774 u32 *cs; in poison_registers() local
5791 cs = intel_ring_begin(rq, 8); in poison_registers()
5792 if (IS_ERR(cs)) { in poison_registers()
5793 err = PTR_ERR(cs); in poison_registers()
5797 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; in poison_registers()
5798 *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8); in poison_registers()
5799 *cs++ = lower_32_bits(batch->node.start); in poison_registers()
5800 *cs++ = upper_32_bits(batch->node.start); in poison_registers()
5802 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; in poison_registers()
5803 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + in poison_registers()
5805 *cs++ = 0; in poison_registers()
5806 *cs++ = 1; in poison_registers()
5808 intel_ring_advance(rq, cs); in poison_registers()
5810 rq->sched.attr.priority = I915_PRIORITY_BARRIER; in poison_registers()
5829 u32 x, dw, *hw, *lrc; in compare_isolation() local
5834 A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC); in compare_isolation()
5838 A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC); in compare_isolation()
5844 B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC); in compare_isolation()
5850 B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC); in compare_isolation()
5856 lrc = i915_gem_object_pin_map(ce->state->obj, in compare_isolation()
5857 i915_coherent_map_type(engine->i915)); in compare_isolation()
5864 defaults = shmem_pin_map(ce->engine->default_state); in compare_isolation()
5866 err = -ENOMEM; in compare_isolation()
5870 x = 0; in compare_isolation()
5889 while (len--) { in compare_isolation()
5890 if (!is_moving(A[0][x], A[1][x]) && in compare_isolation()
5891 (A[0][x] != B[0][x] || A[1][x] != B[1][x])) { in compare_isolation()
5898 …err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison … in compare_isolation()
5899 engine->name, dw, in compare_isolation()
5901 A[0][x], B[0][x], B[1][x], in compare_isolation()
5903 err = -EINVAL; in compare_isolation()
5907 x++; in compare_isolation()
5912 shmem_unpin_map(ce->engine->default_state, defaults); in compare_isolation()
5914 i915_gem_object_unpin_map(ce->state->obj); in compare_isolation()
5916 i915_gem_object_unpin_map(result[1]->obj); in compare_isolation()
5918 i915_gem_object_unpin_map(result[0]->obj); in compare_isolation()
5920 i915_gem_object_unpin_map(ref[1]->obj); in compare_isolation()
5922 i915_gem_object_unpin_map(ref[0]->obj); in compare_isolation()
5928 u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1); in __lrc_isolation()
5944 ref[0] = create_user_vma(A->vm, SZ_64K); in __lrc_isolation()
5950 ref[1] = create_user_vma(A->vm, SZ_64K); in __lrc_isolation()
5967 err = -ETIME; in __lrc_isolation()
5972 result[0] = create_user_vma(A->vm, SZ_64K); in __lrc_isolation()
5978 result[1] = create_user_vma(A->vm, SZ_64K); in __lrc_isolation()
5992 WRITE_ONCE(*sema, -1); in __lrc_isolation()
5999 err = -ETIME; in __lrc_isolation()
6023 if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9) in skip_isolation()
6026 if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11) in skip_isolation()
6047 * Our goal is try and verify that per-context state cannot be in live_lrc_isolation()
6048 * tampered with by another non-privileged client. in live_lrc_isolation()
6075 if (igt_flush_test(gt->i915)) { in live_lrc_isolation()
6076 err = -EIO; in live_lrc_isolation()
6097 err = -ETIME; in indirect_ctx_submit_req()
6108 emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs) in emit_indirect_ctx_bb_canary() argument
6110 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | in emit_indirect_ctx_bb_canary()
6113 *cs++ = i915_mmio_reg_offset(RING_START(0)); in emit_indirect_ctx_bb_canary()
6114 *cs++ = i915_ggtt_offset(ce->state) + in emit_indirect_ctx_bb_canary()
6117 *cs++ = 0; in emit_indirect_ctx_bb_canary()
6119 return cs; in emit_indirect_ctx_bb_canary()
6125 u32 *cs = context_indirect_bb(ce); in indirect_ctx_bb_setup() local
6127 cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d; in indirect_ctx_bb_setup()
6129 setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary); in indirect_ctx_bb_setup()
6134 const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) - in check_ring_start()
6137 if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START]) in check_ring_start()
6140 pr_err("ring start mismatch: canary 0x%08x vs state 0x%08x\n", in check_ring_start()
6142 ce->lrc_reg_state[CTX_RING_START]); in check_ring_start()
6156 return -EINVAL; in indirect_ctx_bb_check()
6183 if (!a->wa_bb_page) { in __live_lrc_indirect_ctx_bb()
6184 GEM_BUG_ON(b->wa_bb_page); in __live_lrc_indirect_ctx_bb()
6185 GEM_BUG_ON(INTEL_GEN(engine->i915) == 12); in __live_lrc_indirect_ctx_bb()
6229 if (igt_flush_test(gt->i915)) in live_lrc_indirect_ctx_bb()
6230 err = -EIO; in live_lrc_indirect_ctx_bb()
6242 const unsigned int bit = I915_RESET_ENGINE + engine->id; in garbage_reset()
6243 unsigned long *lock = &engine->gt->reset.flags; in garbage_reset()
6248 tasklet_disable(&engine->execlists.tasklet); in garbage_reset()
6250 if (!rq->fence.error) in garbage_reset()
6253 tasklet_enable(&engine->execlists.tasklet); in garbage_reset()
6268 ce->lrc_reg_state, in garbage()
6269 ce->engine->context_size - in garbage()
6305 err = -ETIME; in __lrc_garbage()
6313 if (!hang->fence.error) { in __lrc_garbage()
6316 engine->name); in __lrc_garbage()
6317 err = -EINVAL; in __lrc_garbage()
6323 engine->name); in __lrc_garbage()
6325 err = -EIO; in __lrc_garbage()
6353 if (!intel_has_reset_engine(engine->gt)) in live_lrc_garbage()
6364 if (igt_flush_test(gt->i915)) in live_lrc_garbage()
6365 err = -EIO; in live_lrc_garbage()
6384 ce->runtime.num_underflow = 0; in __live_pphwsp_runtime()
6385 ce->runtime.max_underflow = 0; in __live_pphwsp_runtime()
6397 if (--loop == 0) in __live_pphwsp_runtime()
6411 pr_err("%s: request not completed!\n", engine->name); in __live_pphwsp_runtime()
6415 igt_flush_test(engine->i915); in __live_pphwsp_runtime()
6418 engine->name, in __live_pphwsp_runtime()
6423 if (ce->runtime.num_underflow) { in __live_pphwsp_runtime()
6425 engine->name, in __live_pphwsp_runtime()
6426 ce->runtime.num_underflow, in __live_pphwsp_runtime()
6427 ce->runtime.max_underflow); in __live_pphwsp_runtime()
6429 err = -EOVERFLOW; in __live_pphwsp_runtime()
6457 if (igt_flush_test(gt->i915)) in live_pphwsp_runtime()
6458 err = -EIO; in live_pphwsp_runtime()
6480 return intel_gt_live_subtests(tests, &i915->gt); in intel_lrc_live_selftests()