Lines Matching +full:gen +full:- +full:2

2  * SPDX-License-Identifier: GPL-2.0
21 return *a - *b; in cmp_u32()
29 atomic_inc(&gt->rps.num_waiters); in perf_begin()
30 schedule_work(&gt->rps.work); in perf_begin()
31 flush_work(&gt->rps.work); in perf_begin()
36 atomic_dec(&gt->rps.num_waiters); in perf_end()
39 return igt_flush_test(gt->i915); in perf_end()
52 if (INTEL_GEN(rq->engine->i915) >= 8) in write_timestamp()
55 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); in write_timestamp()
56 *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32); in write_timestamp()
71 obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE); in create_empty_batch()
85 vma = i915_vma_instance(obj, ce->vm, NULL); in create_empty_batch()
111 sum = mul_u32_u32(a[2], 2); in trifilter()
115 return sum >> 2; in trifilter()
125 if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ in perf_mi_bb_start()
130 struct intel_context *ce = engine->kernel_context; in perf_mi_bb_start()
160 err = write_timestamp(rq, 2); in perf_mi_bb_start()
164 err = rq->engine->emit_bb_start(rq, in perf_mi_bb_start()
165 batch->node.start, 8, in perf_mi_bb_start()
179 err = -EIO; in perf_mi_bb_start()
184 cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2]; in perf_mi_bb_start()
192 engine->name, trifilter(cycles)); in perf_mi_bb_start()
195 err = -EIO; in perf_mi_bb_start()
207 obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K); in create_nop_batch()
218 cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END; in create_nop_batch()
222 vma = i915_vma_instance(obj, ce->vm, NULL); in create_nop_batch()
249 if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ in perf_mi_noop()
254 struct intel_context *ce = engine->kernel_context; in perf_mi_noop()
300 err = write_timestamp(rq, 2); in perf_mi_noop()
304 err = rq->engine->emit_bb_start(rq, in perf_mi_noop()
305 base->node.start, 8, in perf_mi_noop()
314 err = rq->engine->emit_bb_start(rq, in perf_mi_noop()
315 nop->node.start, in perf_mi_noop()
316 nop->node.size, in perf_mi_noop()
330 err = -EIO; in perf_mi_noop()
336 (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) - in perf_mi_noop()
337 (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]); in perf_mi_noop()
346 engine->name, trifilter(cycles)); in perf_mi_noop()
349 err = -EIO; in perf_mi_noop()
361 if (intel_gt_is_wedged(&i915->gt)) in intel_engine_cs_perf_selftests()
364 return intel_gt_live_subtests(tests, &i915->gt); in intel_engine_cs_perf_selftests()
376 u8 gen = info->mmio_bases[j].gen; in intel_mmio_bases_check() local
377 u32 base = info->mmio_bases[j].base; in intel_mmio_bases_check()
379 if (gen >= prev) { in intel_mmio_bases_check()
380 pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n", in intel_mmio_bases_check()
382 intel_engine_class_repr(info->class), in intel_mmio_bases_check()
383 info->class, info->instance, in intel_mmio_bases_check()
384 prev, gen); in intel_mmio_bases_check()
385 return -EINVAL; in intel_mmio_bases_check()
388 if (gen == 0) in intel_mmio_bases_check()
392 pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n", in intel_mmio_bases_check()
394 intel_engine_class_repr(info->class), in intel_mmio_bases_check()
395 info->class, info->instance, in intel_mmio_bases_check()
396 base, gen, j); in intel_mmio_bases_check()
397 return -EINVAL; in intel_mmio_bases_check()
400 prev = gen; in intel_mmio_bases_check()
403 pr_debug("%s: min gen supported for %s%d is %d\n", in intel_mmio_bases_check()
405 intel_engine_class_repr(info->class), in intel_mmio_bases_check()
406 info->instance, in intel_mmio_bases_check()