Lines Matching +full:cs +full:- +full:x

2  * SPDX-License-Identifier: GPL-2.0
21 return *a - *b; in cmp_u32()
29 atomic_inc(&gt->rps.num_waiters); in perf_begin()
30 schedule_work(&gt->rps.work); in perf_begin()
31 flush_work(&gt->rps.work); in perf_begin()
36 atomic_dec(&gt->rps.num_waiters); in perf_end()
39 return igt_flush_test(gt->i915); in perf_end()
45 u32 *cs; in write_timestamp() local
47 cs = intel_ring_begin(rq, 4); in write_timestamp()
48 if (IS_ERR(cs)) in write_timestamp()
49 return PTR_ERR(cs); in write_timestamp()
52 if (INTEL_GEN(rq->engine->i915) >= 8) in write_timestamp()
54 *cs++ = cmd; in write_timestamp()
55 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); in write_timestamp()
56 *cs++ = i915_request_timeline(rq)->hwsp_offset + slot * sizeof(u32); in write_timestamp()
57 *cs++ = 0; in write_timestamp()
59 intel_ring_advance(rq, cs); in write_timestamp()
68 u32 *cs; in create_empty_batch() local
71 obj = i915_gem_object_create_internal(ce->engine->i915, PAGE_SIZE); in create_empty_batch()
75 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); in create_empty_batch()
76 if (IS_ERR(cs)) { in create_empty_batch()
77 err = PTR_ERR(cs); in create_empty_batch()
81 cs[0] = MI_BATCH_BUFFER_END; in create_empty_batch()
85 vma = i915_vma_instance(obj, ce->vm, NULL); in create_empty_batch()
125 if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ in perf_mi_bb_start()
130 struct intel_context *ce = engine->kernel_context; in perf_mi_bb_start()
164 err = rq->engine->emit_bb_start(rq, in perf_mi_bb_start()
165 batch->node.start, 8, in perf_mi_bb_start()
179 err = -EIO; in perf_mi_bb_start()
184 cycles[i] = rq->hwsp_seqno[3] - rq->hwsp_seqno[2]; in perf_mi_bb_start()
192 engine->name, trifilter(cycles)); in perf_mi_bb_start()
195 err = -EIO; in perf_mi_bb_start()
204 u32 *cs; in create_nop_batch() local
207 obj = i915_gem_object_create_internal(ce->engine->i915, SZ_64K); in create_nop_batch()
211 cs = i915_gem_object_pin_map(obj, I915_MAP_WB); in create_nop_batch()
212 if (IS_ERR(cs)) { in create_nop_batch()
213 err = PTR_ERR(cs); in create_nop_batch()
217 memset(cs, 0, SZ_64K); in create_nop_batch()
218 cs[SZ_64K / sizeof(*cs) - 1] = MI_BATCH_BUFFER_END; in create_nop_batch()
222 vma = i915_vma_instance(obj, ce->vm, NULL); in create_nop_batch()
249 if (INTEL_GEN(gt->i915) < 7) /* for per-engine CS_TIMESTAMP */ in perf_mi_noop()
254 struct intel_context *ce = engine->kernel_context; in perf_mi_noop()
304 err = rq->engine->emit_bb_start(rq, in perf_mi_noop()
305 base->node.start, 8, in perf_mi_noop()
314 err = rq->engine->emit_bb_start(rq, in perf_mi_noop()
315 nop->node.start, in perf_mi_noop()
316 nop->node.size, in perf_mi_noop()
330 err = -EIO; in perf_mi_noop()
336 (rq->hwsp_seqno[4] - rq->hwsp_seqno[3]) - in perf_mi_noop()
337 (rq->hwsp_seqno[3] - rq->hwsp_seqno[2]); in perf_mi_noop()
346 engine->name, trifilter(cycles)); in perf_mi_noop()
349 err = -EIO; in perf_mi_noop()
361 if (intel_gt_is_wedged(&i915->gt)) in intel_engine_cs_perf_selftests()
364 return intel_gt_live_subtests(tests, &i915->gt); in intel_engine_cs_perf_selftests()
376 u8 gen = info->mmio_bases[j].gen; in intel_mmio_bases_check()
377 u32 base = info->mmio_bases[j].base; in intel_mmio_bases_check()
380 pr_err("%s(%s, class:%d, instance:%d): mmio base for gen %x is before the one for gen %x\n", in intel_mmio_bases_check()
382 intel_engine_class_repr(info->class), in intel_mmio_bases_check()
383 info->class, info->instance, in intel_mmio_bases_check()
385 return -EINVAL; in intel_mmio_bases_check()
392 pr_err("%s(%s, class:%d, instance:%d): invalid mmio base (%x) for gen %x at entry %u\n", in intel_mmio_bases_check()
394 intel_engine_class_repr(info->class), in intel_mmio_bases_check()
395 info->class, info->instance, in intel_mmio_bases_check()
397 return -EINVAL; in intel_mmio_bases_check()
405 intel_engine_class_repr(info->class), in intel_mmio_bases_check()
406 info->instance, in intel_mmio_bases_check()