1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #ifndef __INTEL_GT__
7 #define __INTEL_GT__
8
9 #include "intel_engine_types.h"
10 #include "intel_gt_types.h"
11 #include "intel_reset.h"
12
13 struct drm_i915_private;
14 struct drm_printer;
15
16 /*
17 * Check that the GT is a graphics GT and has an IP version within the
18 * specified range (inclusive).
19 */
20 #define IS_GFX_GT_IP_RANGE(gt, from, until) ( \
21 BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
22 BUILD_BUG_ON_ZERO((until) < (from)) + \
23 ((gt)->type != GT_MEDIA && \
24 GRAPHICS_VER_FULL((gt)->i915) >= (from) && \
25 GRAPHICS_VER_FULL((gt)->i915) <= (until)))
26
27 /*
28 * Check that the GT is a media GT and has an IP version within the
29 * specified range (inclusive).
30 *
31 * Only usable on platforms with a standalone media design (i.e., IP version 13
32 * and higher).
33 */
34 #define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \
35 BUILD_BUG_ON_ZERO((from) < IP_VER(13, 0)) + \
36 BUILD_BUG_ON_ZERO((until) < (from)) + \
37 ((gt) && (gt)->type == GT_MEDIA && \
38 MEDIA_VER_FULL((gt)->i915) >= (from) && \
39 MEDIA_VER_FULL((gt)->i915) <= (until)))
40
41 /*
42 * Check that the GT is a graphics GT with a specific IP version and has
43 * a stepping in the range [from, until). The lower stepping bound is
44 * inclusive, the upper bound is exclusive. The most common use-case of this
45 * macro is for checking bounds for workarounds, which usually have a stepping
46 * ("from") at which the hardware issue is first present and another stepping
47 * ("until") at which a hardware fix is present and the software workaround is
48 * no longer necessary. E.g.,
49 *
50 * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)
51 * IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER)
52 *
53 * "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
54 * stepping bound for the specified IP version.
55 */
56 #define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \
57 BUILD_BUG_ON_ZERO((until) <= (from)) + \
58 (IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \
59 IS_GRAPHICS_STEP((gt)->i915, (from), (until))))
60
61 /*
62 * Check that the GT is a media GT with a specific IP version and has
63 * a stepping in the range [from, until). The lower stepping bound is
64 * inclusive, the upper bound is exclusive. The most common use-case of this
65 * macro is for checking bounds for workarounds, which usually have a stepping
66 * ("from") at which the hardware issue is first present and another stepping
67 * ("until") at which a hardware fix is present and the software workaround is
68 * no longer necessary. "STEP_FOREVER" can be passed as "until" for
69 * workarounds that have no upper stepping bound for the specified IP version.
70 *
71 * This macro may only be used to match on platforms that have a standalone
72 * media design (i.e., media version 13 or higher).
73 */
74 #define IS_MEDIA_GT_IP_STEP(gt, ipver, from, until) ( \
75 BUILD_BUG_ON_ZERO((until) <= (from)) + \
76 (IS_MEDIA_GT_IP_RANGE((gt), (ipver), (ipver)) && \
77 IS_MEDIA_STEP((gt)->i915, (from), (until))))
78
79 #define GT_TRACE(gt, fmt, ...) do { \
80 const struct intel_gt *gt__ __maybe_unused = (gt); \
81 GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
82 ##__VA_ARGS__); \
83 } while (0)
84
85 #define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
86 IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
87 engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
88
gt_is_root(struct intel_gt * gt)89 static inline bool gt_is_root(struct intel_gt *gt)
90 {
91 return !gt->info.id;
92 }
93
94 bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
95
uc_to_gt(struct intel_uc * uc)96 static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
97 {
98 return container_of(uc, struct intel_gt, uc);
99 }
100
guc_to_gt(struct intel_guc * guc)101 static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
102 {
103 return container_of(guc, struct intel_gt, uc.guc);
104 }
105
huc_to_gt(struct intel_huc * huc)106 static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
107 {
108 return container_of(huc, struct intel_gt, uc.huc);
109 }
110
gsc_uc_to_gt(struct intel_gsc_uc * gsc_uc)111 static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc)
112 {
113 return container_of(gsc_uc, struct intel_gt, uc.gsc);
114 }
115
gsc_to_gt(struct intel_gsc * gsc)116 static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
117 {
118 return container_of(gsc, struct intel_gt, gsc);
119 }
120
guc_to_i915(struct intel_guc * guc)121 static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
122 {
123 return guc_to_gt(guc)->i915;
124 }
125
126 void intel_gt_common_init_early(struct intel_gt *gt);
127 int intel_root_gt_init_early(struct drm_i915_private *i915);
128 int intel_gt_assign_ggtt(struct intel_gt *gt);
129 int intel_gt_init_mmio(struct intel_gt *gt);
130 int __must_check intel_gt_init_hw(struct intel_gt *gt);
131 int intel_gt_init(struct intel_gt *gt);
132 void intel_gt_driver_register(struct intel_gt *gt);
133
134 void intel_gt_driver_unregister(struct intel_gt *gt);
135 void intel_gt_driver_remove(struct intel_gt *gt);
136 void intel_gt_driver_release(struct intel_gt *gt);
137 void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
138
139 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
140
141 void intel_gt_check_and_clear_faults(struct intel_gt *gt);
142 i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt);
143 void intel_gt_clear_error_registers(struct intel_gt *gt,
144 intel_engine_mask_t engine_mask);
145
146 void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
147 void intel_gt_chipset_flush(struct intel_gt *gt);
148
intel_gt_scratch_offset(const struct intel_gt * gt,enum intel_gt_scratch_field field)149 static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
150 enum intel_gt_scratch_field field)
151 {
152 return i915_ggtt_offset(gt->scratch) + field;
153 }
154
intel_gt_has_unrecoverable_error(const struct intel_gt * gt)155 static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
156 {
157 return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) ||
158 test_bit(I915_WEDGED_ON_FINI, >->reset.flags);
159 }
160
intel_gt_is_wedged(const struct intel_gt * gt)161 static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
162 {
163 GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
164 !test_bit(I915_WEDGED, >->reset.flags));
165
166 return unlikely(test_bit(I915_WEDGED, >->reset.flags));
167 }
168
169 int intel_gt_probe_all(struct drm_i915_private *i915);
170 int intel_gt_tiles_init(struct drm_i915_private *i915);
171 void intel_gt_release_all(struct drm_i915_private *i915);
172
173 #define for_each_gt(gt__, i915__, id__) \
174 for ((id__) = 0; \
175 (id__) < I915_MAX_GT; \
176 (id__)++) \
177 for_each_if(((gt__) = (i915__)->gt[(id__)]))
178
179 /* Simple iterator over all initialised engines */
180 #define for_each_engine(engine__, gt__, id__) \
181 for ((id__) = 0; \
182 (id__) < I915_NUM_ENGINES; \
183 (id__)++) \
184 for_each_if ((engine__) = (gt__)->engine[(id__)])
185
186 /* Iterator over subset of engines selected by mask */
187 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
188 for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
189 (tmp__) ? \
190 ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
191 0;)
192
193 void intel_gt_info_print(const struct intel_gt_info *info,
194 struct drm_printer *p);
195
196 void intel_gt_watchdog_work(struct work_struct *work);
197
198 enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
199 struct drm_i915_gem_object *obj,
200 bool always_coherent);
201
202 void intel_gt_bind_context_set_ready(struct intel_gt *gt);
203 void intel_gt_bind_context_set_unready(struct intel_gt *gt);
204 bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
205 #endif /* __INTEL_GT_H__ */
206