1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include <linux/debugfs.h>
7
8 #include "hsw_ips.h"
9 #include "i915_drv.h"
10 #include "i915_reg.h"
11 #include "intel_color_regs.h"
12 #include "intel_de.h"
13 #include "intel_display_types.h"
14 #include "intel_pcode.h"
15
hsw_ips_enable(const struct intel_crtc_state * crtc_state)16 static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
17 {
18 struct intel_display *display = to_intel_display(crtc_state);
19 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
20 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
21 u32 val;
22
23 if (!crtc_state->ips_enabled)
24 return;
25
26 /*
27 * We can only enable IPS after we enable a plane and wait for a vblank
28 * This function is called from post_plane_update, which is run after
29 * a vblank wait.
30 */
31 drm_WARN_ON(display->drm,
32 !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
33
34 val = IPS_ENABLE;
35
36 if (display->ips.false_color)
37 val |= IPS_FALSE_COLOR;
38
39 if (display->platform.broadwell) {
40 drm_WARN_ON(display->drm,
41 snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
42 val | IPS_PCODE_CONTROL));
43 /*
44 * Quoting Art Runyan: "its not safe to expect any particular
45 * value in IPS_CTL bit 31 after enabling IPS through the
46 * mailbox." Moreover, the mailbox may return a bogus state,
47 * so we need to just enable it and continue on.
48 */
49 } else {
50 intel_de_write(display, IPS_CTL, val);
51 /*
52 * The bit only becomes 1 in the next vblank, so this wait here
53 * is essentially intel_wait_for_vblank. If we don't have this
54 * and don't wait for vblanks until the end of crtc_enable, then
55 * the HW state readout code will complain that the expected
56 * IPS_CTL value is not the one we read.
57 */
58 if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50))
59 drm_err(display->drm,
60 "Timed out waiting for IPS enable\n");
61 }
62 }
63
hsw_ips_disable(const struct intel_crtc_state * crtc_state)64 bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
65 {
66 struct intel_display *display = to_intel_display(crtc_state);
67 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
68 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
69 bool need_vblank_wait = false;
70
71 if (!crtc_state->ips_enabled)
72 return need_vblank_wait;
73
74 if (display->platform.broadwell) {
75 drm_WARN_ON(display->drm,
76 snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
77 /*
78 * Wait for PCODE to finish disabling IPS. The BSpec specified
79 * 42ms timeout value leads to occasional timeouts so use 100ms
80 * instead.
81 */
82 if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100))
83 drm_err(display->drm,
84 "Timed out waiting for IPS disable\n");
85 } else {
86 intel_de_write(display, IPS_CTL, 0);
87 intel_de_posting_read(display, IPS_CTL);
88 }
89
90 /* We need to wait for a vblank before we can disable the plane. */
91 need_vblank_wait = true;
92
93 return need_vblank_wait;
94 }
95
hsw_ips_need_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)96 static bool hsw_ips_need_disable(struct intel_atomic_state *state,
97 struct intel_crtc *crtc)
98 {
99 struct intel_display *display = to_intel_display(state);
100 const struct intel_crtc_state *old_crtc_state =
101 intel_atomic_get_old_crtc_state(state, crtc);
102 const struct intel_crtc_state *new_crtc_state =
103 intel_atomic_get_new_crtc_state(state, crtc);
104
105 if (!old_crtc_state->ips_enabled)
106 return false;
107
108 if (intel_crtc_needs_modeset(new_crtc_state))
109 return true;
110
111 /*
112 * Workaround : Do not read or write the pipe palette/gamma data while
113 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
114 *
115 * Disable IPS before we program the LUT.
116 */
117 if (display->platform.haswell &&
118 intel_crtc_needs_color_update(new_crtc_state) &&
119 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
120 return true;
121
122 return !new_crtc_state->ips_enabled;
123 }
124
hsw_ips_pre_update(struct intel_atomic_state * state,struct intel_crtc * crtc)125 bool hsw_ips_pre_update(struct intel_atomic_state *state,
126 struct intel_crtc *crtc)
127 {
128 const struct intel_crtc_state *old_crtc_state =
129 intel_atomic_get_old_crtc_state(state, crtc);
130
131 if (!hsw_ips_need_disable(state, crtc))
132 return false;
133
134 return hsw_ips_disable(old_crtc_state);
135 }
136
hsw_ips_need_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)137 static bool hsw_ips_need_enable(struct intel_atomic_state *state,
138 struct intel_crtc *crtc)
139 {
140 struct intel_display *display = to_intel_display(state);
141 const struct intel_crtc_state *old_crtc_state =
142 intel_atomic_get_old_crtc_state(state, crtc);
143 const struct intel_crtc_state *new_crtc_state =
144 intel_atomic_get_new_crtc_state(state, crtc);
145
146 if (!new_crtc_state->ips_enabled)
147 return false;
148
149 if (intel_crtc_needs_modeset(new_crtc_state))
150 return true;
151
152 /*
153 * Workaround : Do not read or write the pipe palette/gamma data while
154 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
155 *
156 * Re-enable IPS after the LUT has been programmed.
157 */
158 if (display->platform.haswell &&
159 intel_crtc_needs_color_update(new_crtc_state) &&
160 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
161 return true;
162
163 /*
164 * We can't read out IPS on broadwell, assume the worst and
165 * forcibly enable IPS on the first fastset.
166 */
167 if (intel_crtc_needs_fastset(new_crtc_state) && old_crtc_state->inherited)
168 return true;
169
170 return !old_crtc_state->ips_enabled;
171 }
172
hsw_ips_post_update(struct intel_atomic_state * state,struct intel_crtc * crtc)173 void hsw_ips_post_update(struct intel_atomic_state *state,
174 struct intel_crtc *crtc)
175 {
176 const struct intel_crtc_state *new_crtc_state =
177 intel_atomic_get_new_crtc_state(state, crtc);
178
179 if (!hsw_ips_need_enable(state, crtc))
180 return;
181
182 hsw_ips_enable(new_crtc_state);
183 }
184
185 /* IPS only exists on ULT machines and is tied to pipe A. */
hsw_crtc_supports_ips(struct intel_crtc * crtc)186 bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
187 {
188 struct intel_display *display = to_intel_display(crtc);
189
190 return HAS_IPS(display) && crtc->pipe == PIPE_A;
191 }
192
hsw_crtc_state_ips_capable(const struct intel_crtc_state * crtc_state)193 static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
194 {
195 struct intel_display *display = to_intel_display(crtc_state);
196 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
197
198 /* IPS only exists on ULT machines and is tied to pipe A. */
199 if (!hsw_crtc_supports_ips(crtc))
200 return false;
201
202 if (!display->params.enable_ips)
203 return false;
204
205 if (crtc_state->pipe_bpp > 24)
206 return false;
207
208 /*
209 * We compare against max which means we must take
210 * the increased cdclk requirement into account when
211 * calculating the new cdclk.
212 *
213 * Should measure whether using a lower cdclk w/o IPS
214 */
215 if (display->platform.broadwell &&
216 crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100)
217 return false;
218
219 return true;
220 }
221
hsw_ips_min_cdclk(const struct intel_crtc_state * crtc_state)222 int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
223 {
224 struct intel_display *display = to_intel_display(crtc_state);
225
226 if (!display->platform.broadwell)
227 return 0;
228
229 if (!hsw_crtc_state_ips_capable(crtc_state))
230 return 0;
231
232 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
233 return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
234 }
235
hsw_ips_compute_config(struct intel_atomic_state * state,struct intel_crtc * crtc)236 int hsw_ips_compute_config(struct intel_atomic_state *state,
237 struct intel_crtc *crtc)
238 {
239 struct intel_display *display = to_intel_display(state);
240 struct intel_crtc_state *crtc_state =
241 intel_atomic_get_new_crtc_state(state, crtc);
242
243 crtc_state->ips_enabled = false;
244
245 if (!hsw_crtc_state_ips_capable(crtc_state))
246 return 0;
247
248 /*
249 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
250 * enabled and disabled dynamically based on package C states,
251 * user space can't make reliable use of the CRCs, so let's just
252 * completely disable it.
253 */
254 if (crtc_state->crc_enabled)
255 return 0;
256
257 /* IPS should be fine as long as at least one plane is enabled. */
258 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
259 return 0;
260
261 if (display->platform.broadwell) {
262 const struct intel_cdclk_state *cdclk_state;
263
264 cdclk_state = intel_atomic_get_cdclk_state(state);
265 if (IS_ERR(cdclk_state))
266 return PTR_ERR(cdclk_state);
267
268 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
269 if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
270 return 0;
271 }
272
273 crtc_state->ips_enabled = true;
274
275 return 0;
276 }
277
hsw_ips_get_config(struct intel_crtc_state * crtc_state)278 void hsw_ips_get_config(struct intel_crtc_state *crtc_state)
279 {
280 struct intel_display *display = to_intel_display(crtc_state);
281 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
282
283 if (!hsw_crtc_supports_ips(crtc))
284 return;
285
286 if (display->platform.haswell) {
287 crtc_state->ips_enabled = intel_de_read(display, IPS_CTL) & IPS_ENABLE;
288 } else {
289 /*
290 * We cannot readout IPS state on broadwell, set to
291 * true so we can set it to a defined state on first
292 * commit.
293 */
294 crtc_state->ips_enabled = true;
295 }
296 }
297
hsw_ips_debugfs_false_color_get(void * data,u64 * val)298 static int hsw_ips_debugfs_false_color_get(void *data, u64 *val)
299 {
300 struct intel_crtc *crtc = data;
301 struct intel_display *display = to_intel_display(crtc);
302
303 *val = display->ips.false_color;
304
305 return 0;
306 }
307
hsw_ips_debugfs_false_color_set(void * data,u64 val)308 static int hsw_ips_debugfs_false_color_set(void *data, u64 val)
309 {
310 struct intel_crtc *crtc = data;
311 struct intel_display *display = to_intel_display(crtc);
312 struct intel_crtc_state *crtc_state;
313 int ret;
314
315 ret = drm_modeset_lock(&crtc->base.mutex, NULL);
316 if (ret)
317 return ret;
318
319 display->ips.false_color = val;
320
321 crtc_state = to_intel_crtc_state(crtc->base.state);
322
323 if (!crtc_state->hw.active)
324 goto unlock;
325
326 if (crtc_state->uapi.commit &&
327 !try_wait_for_completion(&crtc_state->uapi.commit->hw_done))
328 goto unlock;
329
330 hsw_ips_enable(crtc_state);
331
332 unlock:
333 drm_modeset_unlock(&crtc->base.mutex);
334
335 return ret;
336 }
337
338 DEFINE_DEBUGFS_ATTRIBUTE(hsw_ips_debugfs_false_color_fops,
339 hsw_ips_debugfs_false_color_get,
340 hsw_ips_debugfs_false_color_set,
341 "%llu\n");
342
hsw_ips_debugfs_status_show(struct seq_file * m,void * unused)343 static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
344 {
345 struct intel_crtc *crtc = m->private;
346 struct intel_display *display = to_intel_display(crtc);
347 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
348 intel_wakeref_t wakeref;
349
350 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
351
352 seq_printf(m, "Enabled by kernel parameter: %s\n",
353 str_yes_no(display->params.enable_ips));
354
355 if (DISPLAY_VER(display) >= 8) {
356 seq_puts(m, "Currently: unknown\n");
357 } else {
358 if (intel_de_read(display, IPS_CTL) & IPS_ENABLE)
359 seq_puts(m, "Currently: enabled\n");
360 else
361 seq_puts(m, "Currently: disabled\n");
362 }
363
364 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
365
366 return 0;
367 }
368
369 DEFINE_SHOW_ATTRIBUTE(hsw_ips_debugfs_status);
370
hsw_ips_crtc_debugfs_add(struct intel_crtc * crtc)371 void hsw_ips_crtc_debugfs_add(struct intel_crtc *crtc)
372 {
373 if (!hsw_crtc_supports_ips(crtc))
374 return;
375
376 debugfs_create_file("i915_ips_false_color", 0644, crtc->base.debugfs_entry,
377 crtc, &hsw_ips_debugfs_false_color_fops);
378
379 debugfs_create_file("i915_ips_status", 0444, crtc->base.debugfs_entry,
380 crtc, &hsw_ips_debugfs_status_fops);
381 }
382