Lines Matching full:stream
36 * descriptor representing a stream of GPU metrics which can then be read() as
37 * a stream of sample records.
56 * i915 perf file descriptors represent a "stream" instead of an "event"; where
57 * a perf event primarily corresponds to a single 64bit value, while a stream
61 * of related counters. Samples for an i915 perf stream capturing OA metrics
64 * selected by the user opening the stream. Perf has support for grouping
68 * i915 perf stream configurations are provided as an array of u64 (key,value)
343 * struct perf_open_properties - for validated properties given to open a stream
362 * to open a stream of metrics the configuration is built up in the structure
431 struct i915_perf_regs *__oa_regs(struct i915_perf_stream *stream) in __oa_regs() argument
433 return &stream->engine->oa_group->regs; in __oa_regs()
436 static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) in gen12_oa_hw_tail_read() argument
438 struct intel_uncore *uncore = stream->uncore; in gen12_oa_hw_tail_read()
440 return intel_uncore_read(uncore, __oa_regs(stream)->oa_tail_ptr) & in gen12_oa_hw_tail_read()
444 static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) in gen8_oa_hw_tail_read() argument
446 struct intel_uncore *uncore = stream->uncore; in gen8_oa_hw_tail_read()
451 static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) in gen7_oa_hw_tail_read() argument
453 struct intel_uncore *uncore = stream->uncore; in gen7_oa_hw_tail_read()
462 static u64 oa_report_id(struct i915_perf_stream *stream, void *report) in oa_report_id() argument
464 return oa_report_header_64bit(stream) ? *(u64 *)report : *(u32 *)report; in oa_report_id()
467 static u64 oa_report_reason(struct i915_perf_stream *stream, void *report) in oa_report_reason() argument
469 return (oa_report_id(stream, report) >> OAREPORT_REASON_SHIFT) & in oa_report_reason()
470 (GRAPHICS_VER(stream->perf->i915) == 12 ? in oa_report_reason()
475 static void oa_report_id_clear(struct i915_perf_stream *stream, u32 *report) in oa_report_id_clear() argument
477 if (oa_report_header_64bit(stream)) in oa_report_id_clear()
483 static bool oa_report_ctx_invalid(struct i915_perf_stream *stream, void *report) in oa_report_ctx_invalid() argument
485 return !(oa_report_id(stream, report) & in oa_report_ctx_invalid()
486 stream->perf->gen8_valid_ctx_bit); in oa_report_ctx_invalid()
489 static u64 oa_timestamp(struct i915_perf_stream *stream, void *report) in oa_timestamp() argument
491 return oa_report_header_64bit(stream) ? in oa_timestamp()
496 static void oa_timestamp_clear(struct i915_perf_stream *stream, u32 *report) in oa_timestamp_clear() argument
498 if (oa_report_header_64bit(stream)) in oa_timestamp_clear()
504 static u32 oa_context_id(struct i915_perf_stream *stream, u32 *report) in oa_context_id() argument
506 u32 ctx_id = oa_report_header_64bit(stream) ? report[4] : report[2]; in oa_context_id()
508 return ctx_id & stream->specific_ctx_id_mask; in oa_context_id()
511 static void oa_context_id_squash(struct i915_perf_stream *stream, u32 *report) in oa_context_id_squash() argument
513 if (oa_report_header_64bit(stream)) in oa_context_id_squash()
521 * @stream: i915 stream instance
537 * only called while the stream is enabled, while the global OA configuration
542 static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) in oa_buffer_check_unlocked() argument
544 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); in oa_buffer_check_unlocked()
545 int report_size = stream->oa_buffer.format->size; in oa_buffer_check_unlocked()
555 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in oa_buffer_check_unlocked()
557 hw_tail = stream->perf->ops.oa_hw_tail_read(stream); in oa_buffer_check_unlocked()
564 partial_report_size = OA_TAKEN(hw_tail, stream->oa_buffer.tail); in oa_buffer_check_unlocked()
572 /* Walk the stream backward until we find a report with report in oa_buffer_check_unlocked()
583 while (OA_TAKEN(tail, stream->oa_buffer.tail) >= report_size) { in oa_buffer_check_unlocked()
584 void *report = stream->oa_buffer.vaddr + tail; in oa_buffer_check_unlocked()
586 if (oa_report_id(stream, report) || in oa_buffer_check_unlocked()
587 oa_timestamp(stream, report)) in oa_buffer_check_unlocked()
594 __ratelimit(&stream->perf->tail_pointer_race)) in oa_buffer_check_unlocked()
595 drm_notice(&stream->uncore->i915->drm, in oa_buffer_check_unlocked()
597 stream->oa_buffer.head, tail, hw_tail); in oa_buffer_check_unlocked()
599 stream->oa_buffer.tail = tail; in oa_buffer_check_unlocked()
601 pollin = OA_TAKEN(stream->oa_buffer.tail, in oa_buffer_check_unlocked()
602 stream->oa_buffer.head) >= report_size; in oa_buffer_check_unlocked()
604 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in oa_buffer_check_unlocked()
611 * @stream: An i915-perf stream opened for OA metrics
624 static int append_oa_status(struct i915_perf_stream *stream, in append_oa_status() argument
645 * @stream: An i915-perf stream opened for OA metrics
652 * properties when opening a stream, tracked as `stream->sample_flags`. This
660 static int append_oa_sample(struct i915_perf_stream *stream, in append_oa_sample() argument
666 int report_size = stream->oa_buffer.format->size; in append_oa_sample()
673 header.size = stream->sample_size; in append_oa_sample()
683 oa_buf_end = stream->oa_buffer.vaddr + OA_BUFFER_SIZE; in append_oa_sample()
691 if (copy_to_user(buf, stream->oa_buffer.vaddr, in append_oa_sample()
706 * @stream: An i915-perf stream opened for OA metrics
724 static int gen8_append_oa_reports(struct i915_perf_stream *stream, in gen8_append_oa_reports() argument
729 struct intel_uncore *uncore = stream->uncore; in gen8_append_oa_reports()
730 int report_size = stream->oa_buffer.format->size; in gen8_append_oa_reports()
731 u8 *oa_buf_base = stream->oa_buffer.vaddr; in gen8_append_oa_reports()
732 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); in gen8_append_oa_reports()
739 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) in gen8_append_oa_reports()
742 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in gen8_append_oa_reports()
744 head = stream->oa_buffer.head; in gen8_append_oa_reports()
745 tail = stream->oa_buffer.tail; in gen8_append_oa_reports()
747 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in gen8_append_oa_reports()
776 reason = oa_report_reason(stream, report); in gen8_append_oa_reports()
777 ctx_id = oa_context_id(stream, report32); in gen8_append_oa_reports()
819 if (oa_report_ctx_invalid(stream, report) && in gen8_append_oa_reports()
820 GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) { in gen8_append_oa_reports()
822 oa_context_id_squash(stream, report32); in gen8_append_oa_reports()
856 if (!stream->ctx || in gen8_append_oa_reports()
857 stream->specific_ctx_id == ctx_id || in gen8_append_oa_reports()
858 stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || in gen8_append_oa_reports()
865 if (stream->ctx && in gen8_append_oa_reports()
866 stream->specific_ctx_id != ctx_id) { in gen8_append_oa_reports()
867 oa_context_id_squash(stream, report32); in gen8_append_oa_reports()
870 ret = append_oa_sample(stream, buf, count, offset, in gen8_append_oa_reports()
875 stream->oa_buffer.last_ctx_id = ctx_id; in gen8_append_oa_reports()
883 oa_report_id_clear(stream, report32); in gen8_append_oa_reports()
884 oa_timestamp_clear(stream, report32); in gen8_append_oa_reports()
886 u8 *oa_buf_end = stream->oa_buffer.vaddr + in gen8_append_oa_reports()
903 oaheadptr = GRAPHICS_VER(stream->perf->i915) == 12 ? in gen8_append_oa_reports()
904 __oa_regs(stream)->oa_head_ptr : in gen8_append_oa_reports()
907 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in gen8_append_oa_reports()
915 stream->oa_buffer.head = head; in gen8_append_oa_reports()
917 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in gen8_append_oa_reports()
925 * @stream: An i915-perf stream opened for OA metrics
943 static int gen8_oa_read(struct i915_perf_stream *stream, in gen8_oa_read() argument
948 struct intel_uncore *uncore = stream->uncore; in gen8_oa_read()
953 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) in gen8_oa_read()
956 oastatus_reg = GRAPHICS_VER(stream->perf->i915) == 12 ? in gen8_oa_read()
957 __oa_regs(stream)->oa_status : in gen8_oa_read()
977 ret = append_oa_status(stream, buf, count, offset, in gen8_oa_read()
982 drm_dbg(&stream->perf->i915->drm, in gen8_oa_read()
984 stream->period_exponent); in gen8_oa_read()
986 stream->perf->ops.oa_disable(stream); in gen8_oa_read()
987 stream->perf->ops.oa_enable(stream); in gen8_oa_read()
997 ret = append_oa_status(stream, buf, count, offset, in gen8_oa_read()
1010 return gen8_append_oa_reports(stream, buf, count, offset); in gen8_oa_read()
1016 * @stream: An i915-perf stream opened for OA metrics
1034 static int gen7_append_oa_reports(struct i915_perf_stream *stream, in gen7_append_oa_reports() argument
1039 struct intel_uncore *uncore = stream->uncore; in gen7_append_oa_reports()
1040 int report_size = stream->oa_buffer.format->size; in gen7_append_oa_reports()
1041 u8 *oa_buf_base = stream->oa_buffer.vaddr; in gen7_append_oa_reports()
1042 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); in gen7_append_oa_reports()
1049 if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled)) in gen7_append_oa_reports()
1052 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in gen7_append_oa_reports()
1054 head = stream->oa_buffer.head; in gen7_append_oa_reports()
1055 tail = stream->oa_buffer.tail; in gen7_append_oa_reports()
1057 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in gen7_append_oa_reports()
1101 if (__ratelimit(&stream->perf->spurious_report_rs)) in gen7_append_oa_reports()
1107 ret = append_oa_sample(stream, buf, count, offset, report); in gen7_append_oa_reports()
1119 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in gen7_append_oa_reports()
1124 stream->oa_buffer.head = head; in gen7_append_oa_reports()
1126 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in gen7_append_oa_reports()
1134 * @stream: An i915-perf stream opened for OA metrics
1148 static int gen7_oa_read(struct i915_perf_stream *stream, in gen7_oa_read() argument
1153 struct intel_uncore *uncore = stream->uncore; in gen7_oa_read()
1157 if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr)) in gen7_oa_read()
1167 oastatus1 &= ~stream->perf->gen7_latched_oastatus1; in gen7_oa_read()
1190 ret = append_oa_status(stream, buf, count, offset, in gen7_oa_read()
1195 drm_dbg(&stream->perf->i915->drm, in gen7_oa_read()
1197 stream->period_exponent); in gen7_oa_read()
1199 stream->perf->ops.oa_disable(stream); in gen7_oa_read()
1200 stream->perf->ops.oa_enable(stream); in gen7_oa_read()
1206 ret = append_oa_status(stream, buf, count, offset, in gen7_oa_read()
1210 stream->perf->gen7_latched_oastatus1 |= in gen7_oa_read()
1214 return gen7_append_oa_reports(stream, buf, count, offset); in gen7_oa_read()
1219 * @stream: An i915-perf stream opened for OA metrics
1221 * Called when userspace tries to read() from a blocking stream FD opened
1231 static int i915_oa_wait_unlocked(struct i915_perf_stream *stream) in i915_oa_wait_unlocked() argument
1234 if (!stream->periodic) in i915_oa_wait_unlocked()
1237 return wait_event_interruptible(stream->poll_wq, in i915_oa_wait_unlocked()
1238 oa_buffer_check_unlocked(stream)); in i915_oa_wait_unlocked()
1242 * i915_oa_poll_wait - call poll_wait() for an OA stream poll()
1243 * @stream: An i915-perf stream opened for OA metrics
1244 * @file: An i915 perf stream file
1247 * For handling userspace polling on an i915 perf stream opened for OA metrics,
1251 static void i915_oa_poll_wait(struct i915_perf_stream *stream, in i915_oa_poll_wait() argument
1255 poll_wait(file, &stream->poll_wq, wait); in i915_oa_poll_wait()
1260 * @stream: An i915-perf stream opened for OA metrics
1270 static int i915_oa_read(struct i915_perf_stream *stream, in i915_oa_read() argument
1275 return stream->perf->ops.read(stream, buf, count, offset); in i915_oa_read()
1278 static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) in oa_pin_context() argument
1281 struct i915_gem_context *ctx = stream->ctx; in oa_pin_context()
1287 if (ce->engine != stream->engine) /* first match! */ in oa_pin_context()
1315 stream->pinned_ctx = ce; in oa_pin_context()
1316 return stream->pinned_ctx; in oa_pin_context()
1410 static int gen12_get_render_context_id(struct i915_perf_stream *stream) in gen12_get_render_context_id() argument
1415 if (intel_engine_uses_guc(stream->engine)) { in gen12_get_render_context_id()
1416 ret = gen12_guc_sw_ctx_id(stream->pinned_ctx, &ctx_id); in gen12_get_render_context_id()
1422 } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) { in gen12_get_render_context_id()
1435 stream->specific_ctx_id = ctx_id & mask; in gen12_get_render_context_id()
1436 stream->specific_ctx_id_mask = mask; in gen12_get_render_context_id()
1514 * @stream: An i915-perf stream opened for OA metrics
1517 * lifetime of the stream. This ensures that we don't have to worry about
1522 static int oa_get_render_ctx_id(struct i915_perf_stream *stream) in oa_get_render_ctx_id() argument
1527 ce = oa_pin_context(stream); in oa_get_render_ctx_id()
1531 if (engine_supports_mi_query(stream->engine) && in oa_get_render_ctx_id()
1532 HAS_LOGICAL_RING_CONTEXTS(stream->perf->i915)) { in oa_get_render_ctx_id()
1540 drm_err(&stream->perf->i915->drm, in oa_get_render_ctx_id()
1542 stream->engine->name); in oa_get_render_ctx_id()
1553 stream->specific_ctx_id = i915_ggtt_offset(ce->state); in oa_get_render_ctx_id()
1554 stream->specific_ctx_id_mask = 0; in oa_get_render_ctx_id()
1571 stream->specific_ctx_id = ce->lrc.lrca >> 12; in oa_get_render_ctx_id()
1577 stream->specific_ctx_id_mask = in oa_get_render_ctx_id()
1580 stream->specific_ctx_id_mask = in oa_get_render_ctx_id()
1582 stream->specific_ctx_id = stream->specific_ctx_id_mask; in oa_get_render_ctx_id()
1588 ret = gen12_get_render_context_id(stream); in oa_get_render_ctx_id()
1595 ce->tag = stream->specific_ctx_id; in oa_get_render_ctx_id()
1597 drm_dbg(&stream->perf->i915->drm, in oa_get_render_ctx_id()
1599 stream->specific_ctx_id, in oa_get_render_ctx_id()
1600 stream->specific_ctx_id_mask); in oa_get_render_ctx_id()
1607 * @stream: An i915-perf stream opened for OA metrics
1610 * for the lifetime of the stream, then that can be undone here.
1612 static void oa_put_render_ctx_id(struct i915_perf_stream *stream) in oa_put_render_ctx_id() argument
1616 ce = fetch_and_zero(&stream->pinned_ctx); in oa_put_render_ctx_id()
1622 stream->specific_ctx_id = INVALID_CTX_ID; in oa_put_render_ctx_id()
1623 stream->specific_ctx_id_mask = 0; in oa_put_render_ctx_id()
1627 free_oa_buffer(struct i915_perf_stream *stream) in free_oa_buffer() argument
1629 i915_vma_unpin_and_release(&stream->oa_buffer.vma, in free_oa_buffer()
1632 stream->oa_buffer.vaddr = NULL; in free_oa_buffer()
1636 free_oa_configs(struct i915_perf_stream *stream) in free_oa_configs() argument
1640 i915_oa_config_put(stream->oa_config); in free_oa_configs()
1641 llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) in free_oa_configs()
1646 free_noa_wait(struct i915_perf_stream *stream) in free_noa_wait() argument
1648 i915_vma_unpin_and_release(&stream->noa_wait, 0); in free_noa_wait()
1661 static void i915_oa_stream_destroy(struct i915_perf_stream *stream) in i915_oa_stream_destroy() argument
1663 struct i915_perf *perf = stream->perf; in i915_oa_stream_destroy()
1664 struct intel_gt *gt = stream->engine->gt; in i915_oa_stream_destroy()
1665 struct i915_perf_group *g = stream->engine->oa_group; in i915_oa_stream_destroy()
1667 if (WARN_ON(stream != g->exclusive_stream)) in i915_oa_stream_destroy()
1677 perf->ops.disable_metric_set(stream); in i915_oa_stream_destroy()
1679 free_oa_buffer(stream); in i915_oa_stream_destroy()
1681 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); in i915_oa_stream_destroy()
1682 intel_engine_pm_put(stream->engine); in i915_oa_stream_destroy()
1684 if (stream->ctx) in i915_oa_stream_destroy()
1685 oa_put_render_ctx_id(stream); in i915_oa_stream_destroy()
1687 free_oa_configs(stream); in i915_oa_stream_destroy()
1688 free_noa_wait(stream); in i915_oa_stream_destroy()
1696 static void gen7_init_oa_buffer(struct i915_perf_stream *stream) in gen7_init_oa_buffer() argument
1698 struct intel_uncore *uncore = stream->uncore; in gen7_init_oa_buffer()
1699 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); in gen7_init_oa_buffer()
1702 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in gen7_init_oa_buffer()
1709 stream->oa_buffer.head = 0; in gen7_init_oa_buffer()
1717 stream->oa_buffer.tail = 0; in gen7_init_oa_buffer()
1719 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in gen7_init_oa_buffer()
1725 stream->perf->gen7_latched_oastatus1 = 0; in gen7_init_oa_buffer()
1730 * when re-enabling a stream or in error/reset paths. in gen7_init_oa_buffer()
1738 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); in gen7_init_oa_buffer()
1741 static void gen8_init_oa_buffer(struct i915_perf_stream *stream) in gen8_init_oa_buffer() argument
1743 struct intel_uncore *uncore = stream->uncore; in gen8_init_oa_buffer()
1744 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); in gen8_init_oa_buffer()
1747 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in gen8_init_oa_buffer()
1751 stream->oa_buffer.head = 0; in gen8_init_oa_buffer()
1768 stream->oa_buffer.tail = 0; in gen8_init_oa_buffer()
1775 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; in gen8_init_oa_buffer()
1777 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in gen8_init_oa_buffer()
1783 * when re-enabling a stream or in error/reset paths. in gen8_init_oa_buffer()
1791 memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); in gen8_init_oa_buffer()
1794 static void gen12_init_oa_buffer(struct i915_perf_stream *stream) in gen12_init_oa_buffer() argument
1796 struct intel_uncore *uncore = stream->uncore; in gen12_init_oa_buffer()
1797 u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); in gen12_init_oa_buffer()
1800 spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); in gen12_init_oa_buffer()
1802 intel_uncore_write(uncore, __oa_regs(stream)->oa_status, 0); in gen12_init_oa_buffer()
1803 intel_uncore_write(uncore, __oa_regs(stream)->oa_head_ptr, in gen12_init_oa_buffer()
1805 stream->oa_buffer.head = 0; in gen12_init_oa_buffer()
1815 intel_uncore_write(uncore, __oa_regs(stream)->oa_buffer, gtt_offset | in gen12_init_oa_buffer()
1817 intel_uncore_write(uncore, __oa_regs(stream)->oa_tail_ptr, in gen12_init_oa_buffer()
1821 stream->oa_buffer.tail = 0; in gen12_init_oa_buffer()
1828 stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; in gen12_init_oa_buffer()
1830 spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); in gen12_init_oa_buffer()
1836 * when re-enabling a stream or in error/reset paths. in gen12_init_oa_buffer()
1844 memset(stream->oa_buffer.vaddr, 0, in gen12_init_oa_buffer()
1845 stream->oa_buffer.vma->size); in gen12_init_oa_buffer()
1848 static int alloc_oa_buffer(struct i915_perf_stream *stream) in alloc_oa_buffer() argument
1850 struct drm_i915_private *i915 = stream->perf->i915; in alloc_oa_buffer()
1851 struct intel_gt *gt = stream->engine->gt; in alloc_oa_buffer()
1856 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma)) in alloc_oa_buffer()
1862 bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); in alloc_oa_buffer()
1887 stream->oa_buffer.vma = vma; in alloc_oa_buffer()
1889 stream->oa_buffer.vaddr = in alloc_oa_buffer()
1891 if (IS_ERR(stream->oa_buffer.vaddr)) { in alloc_oa_buffer()
1892 ret = PTR_ERR(stream->oa_buffer.vaddr); in alloc_oa_buffer()
1904 stream->oa_buffer.vaddr = NULL; in alloc_oa_buffer()
1905 stream->oa_buffer.vma = NULL; in alloc_oa_buffer()
1910 static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, in save_restore_register() argument
1919 if (GRAPHICS_VER(stream->perf->i915) >= 8) in save_restore_register()
1925 *cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d; in save_restore_register()
1932 static int alloc_noa_wait(struct i915_perf_stream *stream) in alloc_noa_wait() argument
1934 struct drm_i915_private *i915 = stream->perf->i915; in alloc_noa_wait()
1935 struct intel_gt *gt = stream->engine->gt; in alloc_noa_wait()
1939 intel_gt_ns_to_clock_interval(to_gt(stream->perf->i915), in alloc_noa_wait()
1940 atomic64_read(&stream->perf->noa_programming_delay)); in alloc_noa_wait()
1941 const u32 base = stream->engine->mmio_base; in alloc_noa_wait()
1980 * needs to be fixed during the lifetime of the i915/perf stream. in alloc_noa_wait()
1998 stream->noa_wait = vma; in alloc_noa_wait()
2006 stream, cs, true /* save */, CS_GPR(i), in alloc_noa_wait()
2009 stream, cs, true /* save */, mi_predicate_result, in alloc_noa_wait()
2125 stream, cs, false /* restore */, CS_GPR(i), in alloc_noa_wait()
2128 stream, cs, false /* restore */, mi_predicate_result, in alloc_noa_wait()
2189 alloc_oa_config_buffer(struct i915_perf_stream *stream, in alloc_oa_config_buffer() argument
2209 obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); in alloc_oa_config_buffer()
2238 *cs++ = (GRAPHICS_VER(stream->perf->i915) < 8 ? in alloc_oa_config_buffer()
2241 *cs++ = i915_ggtt_offset(stream->noa_wait); in alloc_oa_config_buffer()
2248 &stream->engine->gt->ggtt->vm, in alloc_oa_config_buffer()
2256 llist_add(&oa_bo->node, &stream->oa_config_bos); in alloc_oa_config_buffer()
2277 get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) in get_oa_vma() argument
2283 * to the stream. in get_oa_vma()
2285 llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { in get_oa_vma()
2293 oa_bo = alloc_oa_config_buffer(stream, oa_config); in get_oa_vma()
2302 emit_oa_config(struct i915_perf_stream *stream, in emit_oa_config() argument
2312 vma = get_oa_vma(stream, oa_config); in emit_oa_config()
2372 static struct intel_context *oa_context(struct i915_perf_stream *stream) in oa_context() argument
2374 return stream->pinned_ctx ?: stream->engine->kernel_context; in oa_context()
2378 hsw_enable_metric_set(struct i915_perf_stream *stream, in hsw_enable_metric_set() argument
2381 struct intel_uncore *uncore = stream->uncore; in hsw_enable_metric_set()
2398 return emit_oa_config(stream, in hsw_enable_metric_set()
2399 stream->oa_config, oa_context(stream), in hsw_enable_metric_set()
2403 static void hsw_disable_metric_set(struct i915_perf_stream *stream) in hsw_disable_metric_set() argument
2405 struct intel_uncore *uncore = stream->uncore; in hsw_disable_metric_set()
2445 const struct i915_perf_stream *stream) in gen8_update_reg_state_unlocked() argument
2447 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; in gen8_update_reg_state_unlocked()
2448 u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; in gen8_update_reg_state_unlocked()
2463 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | in gen8_update_reg_state_unlocked()
2464 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | in gen8_update_reg_state_unlocked()
2469 oa_config_flex_reg(stream->oa_config, flex_regs[i]); in gen8_update_reg_state_unlocked()
2576 static int gen8_configure_context(struct i915_perf_stream *stream, in gen8_configure_context() argument
2606 static int gen12_configure_oar_context(struct i915_perf_stream *stream, in gen12_configure_oar_context() argument
2610 struct intel_context *ce = stream->pinned_ctx; in gen12_configure_oar_context()
2611 u32 format = stream->oa_buffer.format->format; in gen12_configure_oar_context()
2612 u32 offset = stream->perf->ctx_oactxctrl_offset; in gen12_configure_oar_context()
2657 * Manages updating the per-context aspects of the OA stream
2682 oa_configure_all_contexts(struct i915_perf_stream *stream, in oa_configure_all_contexts() argument
2687 struct drm_i915_private *i915 = stream->perf->i915; in oa_configure_all_contexts()
2689 struct intel_gt *gt = stream->engine->gt; in oa_configure_all_contexts()
2718 err = gen8_configure_context(stream, ctx, regs, num_regs); in oa_configure_all_contexts()
2752 gen12_configure_all_contexts(struct i915_perf_stream *stream, in gen12_configure_all_contexts() argument
2763 if (stream->engine->class != RENDER_CLASS) in gen12_configure_all_contexts()
2766 return oa_configure_all_contexts(stream, in gen12_configure_all_contexts()
2772 lrc_configure_all_contexts(struct i915_perf_stream *stream, in lrc_configure_all_contexts() argument
2776 u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; in lrc_configure_all_contexts()
2778 const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; in lrc_configure_all_contexts()
2801 (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | in lrc_configure_all_contexts()
2802 (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | in lrc_configure_all_contexts()
2808 return oa_configure_all_contexts(stream, in lrc_configure_all_contexts()
2814 gen8_enable_metric_set(struct i915_perf_stream *stream, in gen8_enable_metric_set() argument
2817 struct intel_uncore *uncore = stream->uncore; in gen8_enable_metric_set()
2818 struct i915_oa_config *oa_config = stream->oa_config; in gen8_enable_metric_set()
2844 if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) { in gen8_enable_metric_set()
2855 ret = lrc_configure_all_contexts(stream, oa_config, active); in gen8_enable_metric_set()
2859 return emit_oa_config(stream, in gen8_enable_metric_set()
2860 stream->oa_config, oa_context(stream), in gen8_enable_metric_set()
2864 static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream) in oag_report_ctx_switches() argument
2867 (stream->sample_flags & SAMPLE_OA_REPORT) ? in oag_report_ctx_switches()
2872 gen12_enable_metric_set(struct i915_perf_stream *stream, in gen12_enable_metric_set() argument
2875 struct drm_i915_private *i915 = stream->perf->i915; in gen12_enable_metric_set()
2876 struct intel_uncore *uncore = stream->uncore; in gen12_enable_metric_set()
2877 struct i915_oa_config *oa_config = stream->oa_config; in gen12_enable_metric_set()
2878 bool periodic = stream->periodic; in gen12_enable_metric_set()
2879 u32 period_exponent = stream->period_exponent; in gen12_enable_metric_set()
2895 intel_uncore_write(uncore, __oa_regs(stream)->oa_debug, in gen12_enable_metric_set()
2903 oag_report_ctx_switches(stream)); in gen12_enable_metric_set()
2905 intel_uncore_write(uncore, __oa_regs(stream)->oa_ctx_ctrl, periodic ? in gen12_enable_metric_set()
2926 ret = gen12_configure_all_contexts(stream, oa_config, active); in gen12_enable_metric_set()
2935 if (stream->ctx) { in gen12_enable_metric_set()
2936 ret = gen12_configure_oar_context(stream, active); in gen12_enable_metric_set()
2941 return emit_oa_config(stream, in gen12_enable_metric_set()
2942 stream->oa_config, oa_context(stream), in gen12_enable_metric_set()
2946 static void gen8_disable_metric_set(struct i915_perf_stream *stream) in gen8_disable_metric_set() argument
2948 struct intel_uncore *uncore = stream->uncore; in gen8_disable_metric_set()
2951 lrc_configure_all_contexts(stream, NULL, NULL); in gen8_disable_metric_set()
2956 static void gen11_disable_metric_set(struct i915_perf_stream *stream) in gen11_disable_metric_set() argument
2958 struct intel_uncore *uncore = stream->uncore; in gen11_disable_metric_set()
2961 lrc_configure_all_contexts(stream, NULL, NULL); in gen11_disable_metric_set()
2967 static void gen12_disable_metric_set(struct i915_perf_stream *stream) in gen12_disable_metric_set() argument
2969 struct intel_uncore *uncore = stream->uncore; in gen12_disable_metric_set()
2970 struct drm_i915_private *i915 = stream->perf->i915; in gen12_disable_metric_set()
2985 gen12_configure_all_contexts(stream, NULL, NULL); in gen12_disable_metric_set()
2988 if (stream->ctx) in gen12_disable_metric_set()
2989 gen12_configure_oar_context(stream, NULL); in gen12_disable_metric_set()
3001 static void gen7_oa_enable(struct i915_perf_stream *stream) in gen7_oa_enable() argument
3003 struct intel_uncore *uncore = stream->uncore; in gen7_oa_enable()
3004 struct i915_gem_context *ctx = stream->ctx; in gen7_oa_enable()
3005 u32 ctx_id = stream->specific_ctx_id; in gen7_oa_enable()
3006 bool periodic = stream->periodic; in gen7_oa_enable()
3007 u32 period_exponent = stream->period_exponent; in gen7_oa_enable()
3008 u32 report_format = stream->oa_buffer.format->format; in gen7_oa_enable()
3019 gen7_init_oa_buffer(stream); in gen7_oa_enable()
3031 static void gen8_oa_enable(struct i915_perf_stream *stream) in gen8_oa_enable() argument
3033 struct intel_uncore *uncore = stream->uncore; in gen8_oa_enable()
3034 u32 report_format = stream->oa_buffer.format->format; in gen8_oa_enable()
3045 gen8_init_oa_buffer(stream); in gen8_oa_enable()
3057 static void gen12_oa_enable(struct i915_perf_stream *stream) in gen12_oa_enable() argument
3066 if (!(stream->sample_flags & SAMPLE_OA_REPORT)) in gen12_oa_enable()
3069 gen12_init_oa_buffer(stream); in gen12_oa_enable()
3071 regs = __oa_regs(stream); in gen12_oa_enable()
3072 val = (stream->oa_buffer.format->format << regs->oa_ctrl_counter_format_shift) | in gen12_oa_enable()
3075 intel_uncore_write(stream->uncore, regs->oa_ctrl, val); in gen12_oa_enable()
3079 * i915_oa_stream_enable - handle `I915_PERF_IOCTL_ENABLE` for OA stream
3080 * @stream: An i915 perf stream opened for OA metrics
3083 * when opening the stream. This also starts a hrtimer that will periodically
3087 static void i915_oa_stream_enable(struct i915_perf_stream *stream) in i915_oa_stream_enable() argument
3089 stream->pollin = false; in i915_oa_stream_enable()
3091 stream->perf->ops.oa_enable(stream); in i915_oa_stream_enable()
3093 if (stream->sample_flags & SAMPLE_OA_REPORT) in i915_oa_stream_enable()
3094 hrtimer_start(&stream->poll_check_timer, in i915_oa_stream_enable()
3095 ns_to_ktime(stream->poll_oa_period), in i915_oa_stream_enable()
3099 static void gen7_oa_disable(struct i915_perf_stream *stream) in gen7_oa_disable() argument
3101 struct intel_uncore *uncore = stream->uncore; in gen7_oa_disable()
3107 drm_err(&stream->perf->i915->drm, in gen7_oa_disable()
3111 static void gen8_oa_disable(struct i915_perf_stream *stream) in gen8_oa_disable() argument
3113 struct intel_uncore *uncore = stream->uncore; in gen8_oa_disable()
3119 drm_err(&stream->perf->i915->drm, in gen8_oa_disable()
3123 static void gen12_oa_disable(struct i915_perf_stream *stream) in gen12_oa_disable() argument
3125 struct intel_uncore *uncore = stream->uncore; in gen12_oa_disable()
3127 intel_uncore_write(uncore, __oa_regs(stream)->oa_ctrl, 0); in gen12_oa_disable()
3129 __oa_regs(stream)->oa_ctrl, in gen12_oa_disable()
3132 drm_err(&stream->perf->i915->drm, in gen12_oa_disable()
3140 drm_err(&stream->perf->i915->drm, in gen12_oa_disable()
3145 * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream
3146 * @stream: An i915 perf stream opened for OA metrics
3152 static void i915_oa_stream_disable(struct i915_perf_stream *stream) in i915_oa_stream_disable() argument
3154 stream->perf->ops.oa_disable(stream); in i915_oa_stream_disable()
3156 if (stream->sample_flags & SAMPLE_OA_REPORT) in i915_oa_stream_disable()
3157 hrtimer_cancel(&stream->poll_check_timer); in i915_oa_stream_disable()
3169 static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream) in i915_perf_stream_enable_sync() argument
3178 err = stream->perf->ops.enable_metric_set(stream, active); in i915_perf_stream_enable_sync()
3245 * i915_oa_stream_init - validate combined props for OA stream and init
3246 * @stream: An i915 perf stream
3248 * @props: The property state that configures stream (individually validated)
3253 * At this point it has been determined that userspace wants a stream of
3262 static int i915_oa_stream_init(struct i915_perf_stream *stream, in i915_oa_stream_init() argument
3266 struct drm_i915_private *i915 = stream->perf->i915; in i915_oa_stream_init()
3267 struct i915_perf *perf = stream->perf; in i915_oa_stream_init()
3272 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3284 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3290 (GRAPHICS_VER(perf->i915) < 12 || !stream->ctx)) { in i915_oa_stream_init()
3291 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3297 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3308 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3314 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3319 stream->engine = props->engine; in i915_oa_stream_init()
3320 stream->uncore = stream->engine->gt->uncore; in i915_oa_stream_init()
3322 stream->sample_size = sizeof(struct drm_i915_perf_record_header); in i915_oa_stream_init()
3324 stream->oa_buffer.format = &perf->oa_formats[props->oa_format]; in i915_oa_stream_init()
3325 if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format->size == 0)) in i915_oa_stream_init()
3328 stream->sample_flags = props->sample_flags; in i915_oa_stream_init()
3329 stream->sample_size += stream->oa_buffer.format->size; in i915_oa_stream_init()
3331 stream->hold_preemption = props->hold_preemption; in i915_oa_stream_init()
3333 stream->periodic = props->oa_periodic; in i915_oa_stream_init()
3334 if (stream->periodic) in i915_oa_stream_init()
3335 stream->period_exponent = props->oa_period_exponent; in i915_oa_stream_init()
3337 if (stream->ctx) { in i915_oa_stream_init()
3338 ret = oa_get_render_ctx_id(stream); in i915_oa_stream_init()
3340 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3346 ret = alloc_noa_wait(stream); in i915_oa_stream_init()
3348 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3353 stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); in i915_oa_stream_init()
3354 if (!stream->oa_config) { in i915_oa_stream_init()
3355 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3373 intel_engine_pm_get(stream->engine); in i915_oa_stream_init()
3374 intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); in i915_oa_stream_init()
3376 ret = alloc_oa_buffer(stream); in i915_oa_stream_init()
3380 stream->ops = &i915_oa_stream_ops; in i915_oa_stream_init()
3382 stream->engine->gt->perf.sseu = props->sseu; in i915_oa_stream_init()
3383 WRITE_ONCE(g->exclusive_stream, stream); in i915_oa_stream_init()
3385 ret = i915_perf_stream_enable_sync(stream); in i915_oa_stream_init()
3387 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3392 drm_dbg(&stream->perf->i915->drm, in i915_oa_stream_init()
3393 "opening stream oa config uuid=%s\n", in i915_oa_stream_init()
3394 stream->oa_config->uuid); in i915_oa_stream_init()
3396 hrtimer_init(&stream->poll_check_timer, in i915_oa_stream_init()
3398 stream->poll_check_timer.function = oa_poll_check_timer_cb; in i915_oa_stream_init()
3399 init_waitqueue_head(&stream->poll_wq); in i915_oa_stream_init()
3400 spin_lock_init(&stream->oa_buffer.ptr_lock); in i915_oa_stream_init()
3401 mutex_init(&stream->lock); in i915_oa_stream_init()
3407 perf->ops.disable_metric_set(stream); in i915_oa_stream_init()
3409 free_oa_buffer(stream); in i915_oa_stream_init()
3412 intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); in i915_oa_stream_init()
3413 intel_engine_pm_put(stream->engine); in i915_oa_stream_init()
3415 free_oa_configs(stream); in i915_oa_stream_init()
3418 free_noa_wait(stream); in i915_oa_stream_init()
3421 if (stream->ctx) in i915_oa_stream_init()
3422 oa_put_render_ctx_id(stream); in i915_oa_stream_init()
3430 struct i915_perf_stream *stream; in i915_oa_init_reg_state() local
3436 stream = READ_ONCE(engine->oa_group->exclusive_stream); in i915_oa_init_reg_state()
3437 if (stream && GRAPHICS_VER(stream->perf->i915) < 12) in i915_oa_init_reg_state()
3438 gen8_update_reg_state_unlocked(ce, stream); in i915_oa_init_reg_state()
3442 * i915_perf_read - handles read() FOP for i915 perf stream FDs
3443 * @file: An i915 perf stream file
3448 * The entry point for handling a read() on a stream file descriptor from
3450 * &i915_perf_stream_ops->read but to save having stream implementations (of
3453 * We can also consistently treat trying to read from a disabled stream
3454 * as an IO error so implementations can assume the stream is enabled
3464 struct i915_perf_stream *stream = file->private_data; in i915_perf_read() local
3469 * disabled stream as an error. In particular it might otherwise lead in i915_perf_read()
3472 if (!stream->enabled || !(stream->sample_flags & SAMPLE_OA_REPORT)) in i915_perf_read()
3477 * stream->ops->wait_unlocked. in i915_perf_read()
3484 ret = stream->ops->wait_unlocked(stream); in i915_perf_read()
3488 mutex_lock(&stream->lock); in i915_perf_read()
3489 ret = stream->ops->read(stream, buf, count, &offset); in i915_perf_read()
3490 mutex_unlock(&stream->lock); in i915_perf_read()
3493 mutex_lock(&stream->lock); in i915_perf_read()
3494 ret = stream->ops->read(stream, buf, count, &offset); in i915_perf_read()
3495 mutex_unlock(&stream->lock); in i915_perf_read()
3510 stream->pollin = false; in i915_perf_read()
3518 struct i915_perf_stream *stream = in oa_poll_check_timer_cb() local
3519 container_of(hrtimer, typeof(*stream), poll_check_timer); in oa_poll_check_timer_cb()
3521 if (oa_buffer_check_unlocked(stream)) { in oa_poll_check_timer_cb()
3522 stream->pollin = true; in oa_poll_check_timer_cb()
3523 wake_up(&stream->poll_wq); in oa_poll_check_timer_cb()
3527 ns_to_ktime(stream->poll_oa_period)); in oa_poll_check_timer_cb()
3533 * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream
3534 * @stream: An i915 perf stream
3535 * @file: An i915 perf stream file
3538 * For handling userspace polling on an i915 perf stream, this calls through to
3540 * will be woken for new stream data.
3544 static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, in i915_perf_poll_locked() argument
3550 stream->ops->poll_wait(stream, file, wait); in i915_perf_poll_locked()
3558 if (stream->pollin) in i915_perf_poll_locked()
3565 * i915_perf_poll - call poll_wait() with a suitable wait queue for stream
3566 * @file: An i915 perf stream file
3569 * For handling userspace polling on an i915 perf stream, this ensures
3570 * poll_wait() gets called with a wait queue that will be woken for new stream
3579 struct i915_perf_stream *stream = file->private_data; in i915_perf_poll() local
3582 mutex_lock(&stream->lock); in i915_perf_poll()
3583 ret = i915_perf_poll_locked(stream, file, wait); in i915_perf_poll()
3584 mutex_unlock(&stream->lock); in i915_perf_poll()
3591 * @stream: A disabled i915 perf stream
3593 * [Re]enables the associated capture of data for this stream.
3595 * If a stream was previously enabled then there's currently no intention
3599 static void i915_perf_enable_locked(struct i915_perf_stream *stream) in i915_perf_enable_locked() argument
3601 if (stream->enabled) in i915_perf_enable_locked()
3604 /* Allow stream->ops->enable() to refer to this */ in i915_perf_enable_locked()
3605 stream->enabled = true; in i915_perf_enable_locked()
3607 if (stream->ops->enable) in i915_perf_enable_locked()
3608 stream->ops->enable(stream); in i915_perf_enable_locked()
3610 if (stream->hold_preemption) in i915_perf_enable_locked()
3611 intel_context_set_nopreempt(stream->pinned_ctx); in i915_perf_enable_locked()
3616 * @stream: An enabled i915 perf stream
3618 * Disables the associated capture of data for this stream.
3620 * The intention is that disabling an re-enabling a stream will ideally be
3621 * cheaper than destroying and re-opening a stream with the same configuration,
3623 * must be retained between disabling and re-enabling a stream.
3625 * Note: while a stream is disabled it's considered an error for userspace
3626 * to attempt to read from the stream (-EIO).
3628 static void i915_perf_disable_locked(struct i915_perf_stream *stream) in i915_perf_disable_locked() argument
3630 if (!stream->enabled) in i915_perf_disable_locked()
3633 /* Allow stream->ops->disable() to refer to this */ in i915_perf_disable_locked()
3634 stream->enabled = false; in i915_perf_disable_locked()
3636 if (stream->hold_preemption) in i915_perf_disable_locked()
3637 intel_context_clear_nopreempt(stream->pinned_ctx); in i915_perf_disable_locked()
3639 if (stream->ops->disable) in i915_perf_disable_locked()
3640 stream->ops->disable(stream); in i915_perf_disable_locked()
3643 static long i915_perf_config_locked(struct i915_perf_stream *stream, in i915_perf_config_locked() argument
3647 long ret = stream->oa_config->id; in i915_perf_config_locked()
3649 config = i915_perf_get_oa_config(stream->perf, metrics_set); in i915_perf_config_locked()
3653 if (config != stream->oa_config) { in i915_perf_config_locked()
3665 err = emit_oa_config(stream, config, oa_context(stream), NULL); in i915_perf_config_locked()
3667 config = xchg(&stream->oa_config, config); in i915_perf_config_locked()
3678 * i915_perf_ioctl_locked - support ioctl() usage with i915 perf stream FDs
3679 * @stream: An i915 perf stream
3686 static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, in i915_perf_ioctl_locked() argument
3692 i915_perf_enable_locked(stream); in i915_perf_ioctl_locked()
3695 i915_perf_disable_locked(stream); in i915_perf_ioctl_locked()
3698 return i915_perf_config_locked(stream, arg); in i915_perf_ioctl_locked()
3705 * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs
3706 * @file: An i915 perf stream file
3719 struct i915_perf_stream *stream = file->private_data; in i915_perf_ioctl() local
3722 mutex_lock(&stream->lock); in i915_perf_ioctl()
3723 ret = i915_perf_ioctl_locked(stream, cmd, arg); in i915_perf_ioctl()
3724 mutex_unlock(&stream->lock); in i915_perf_ioctl()
3730 * i915_perf_destroy_locked - destroy an i915 perf stream
3731 * @stream: An i915 perf stream
3733 * Frees all resources associated with the given i915 perf @stream, disabling
3739 static void i915_perf_destroy_locked(struct i915_perf_stream *stream) in i915_perf_destroy_locked() argument
3741 if (stream->enabled) in i915_perf_destroy_locked()
3742 i915_perf_disable_locked(stream); in i915_perf_destroy_locked()
3744 if (stream->ops->destroy) in i915_perf_destroy_locked()
3745 stream->ops->destroy(stream); in i915_perf_destroy_locked()
3747 if (stream->ctx) in i915_perf_destroy_locked()
3748 i915_gem_context_put(stream->ctx); in i915_perf_destroy_locked()
3750 kfree(stream); in i915_perf_destroy_locked()
3754 * i915_perf_release - handles userspace close() of a stream file
3756 * @file: An i915 perf stream file
3758 * Cleans up any resources associated with an open i915 perf stream file.
3766 struct i915_perf_stream *stream = file->private_data; in i915_perf_release() local
3767 struct i915_perf *perf = stream->perf; in i915_perf_release()
3768 struct intel_gt *gt = stream->engine->gt; in i915_perf_release()
3772 * other user of stream->lock. Use the perf lock to destroy the stream in i915_perf_release()
3776 i915_perf_destroy_locked(stream); in i915_perf_release()
3779 /* Release the reference the perf stream kept on the driver. */ in i915_perf_release()
3801 * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD
3809 * Implements further stream config validation and stream initialization on
3818 * config validation and stream initialization details will be handled by
3820 * will be relevant to all stream types / backends.
3831 struct i915_perf_stream *stream = NULL; in i915_perf_open_ioctl_locked() local
3844 "Failed to look up context with ID %u for opening perf stream\n", in i915_perf_open_ioctl_locked()
3867 * doesn't request global stream access (i.e. query based sampling in i915_perf_open_ioctl_locked()
3902 "Insufficient privileges to open i915 perf stream\n"); in i915_perf_open_ioctl_locked()
3907 stream = kzalloc(sizeof(*stream), GFP_KERNEL); in i915_perf_open_ioctl_locked()
3908 if (!stream) { in i915_perf_open_ioctl_locked()
3913 stream->perf = perf; in i915_perf_open_ioctl_locked()
3914 stream->ctx = specific_ctx; in i915_perf_open_ioctl_locked()
3915 stream->poll_oa_period = props->poll_oa_period; in i915_perf_open_ioctl_locked()
3917 ret = i915_oa_stream_init(stream, param, props); in i915_perf_open_ioctl_locked()
3921 /* we avoid simply assigning stream->sample_flags = props->sample_flags in i915_perf_open_ioctl_locked()
3925 if (WARN_ON(stream->sample_flags != props->sample_flags)) { in i915_perf_open_ioctl_locked()
3935 stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); in i915_perf_open_ioctl_locked()
3942 i915_perf_enable_locked(stream); in i915_perf_open_ioctl_locked()
3952 if (stream->ops->destroy) in i915_perf_open_ioctl_locked()
3953 stream->ops->destroy(stream); in i915_perf_open_ioctl_locked()
3955 kfree(stream); in i915_perf_open_ioctl_locked()
3984 * read_properties_unlocked - validate + copy userspace stream open properties
3988 * @props: The stream configuration built up while validating properties
3992 * properties necessary for a particular kind of stream have been set.
4226 * i915_perf_open_ioctl - DRM ioctl() for userspace to open a stream FD
4231 * Validates the stream open parameters given by userspace including flags
4234 * Very little is assumed up front about the nature of the stream being
4236 * i915-perf stream is expected to be a suitable interface for other forms of
4246 * Return: A newly opened i915 Perf stream file descriptor or negative
4293 * used to open an i915-perf stream.
4787 * and their content will be freed when the stream using the config is closed.
5180 * stream instead of waiting until driver _fini which no one in i915_perf_init()