1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020-2021 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_de.h"
9 #include "intel_display_types.h"
10 #include "intel_dp.h"
11 #include "intel_dp_aux.h"
12 #include "intel_dp_aux_regs.h"
13 #include "intel_pps.h"
14 #include "intel_quirks.h"
15 #include "intel_tc.h"
16 #include "intel_uncore_trace.h"
17
18 #define AUX_CH_NAME_BUFSIZE 6
19
aux_ch_name(struct intel_display * display,char * buf,int size,enum aux_ch aux_ch)20 static const char *aux_ch_name(struct intel_display *display,
21 char *buf, int size, enum aux_ch aux_ch)
22 {
23 if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD)
24 snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
25 else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1)
26 snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
27 else
28 snprintf(buf, size, "%c", 'A' + aux_ch);
29
30 return buf;
31 }
32
intel_dp_aux_pack(const u8 * src,int src_bytes)33 u32 intel_dp_aux_pack(const u8 *src, int src_bytes)
34 {
35 int i;
36 u32 v = 0;
37
38 if (src_bytes > 4)
39 src_bytes = 4;
40 for (i = 0; i < src_bytes; i++)
41 v |= ((u32)src[i]) << ((3 - i) * 8);
42 return v;
43 }
44
intel_dp_aux_unpack(u32 src,u8 * dst,int dst_bytes)45 static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
46 {
47 int i;
48
49 if (dst_bytes > 4)
50 dst_bytes = 4;
51 for (i = 0; i < dst_bytes; i++)
52 dst[i] = src >> ((3 - i) * 8);
53 }
54
55 static u32
intel_dp_aux_wait_done(struct intel_dp * intel_dp)56 intel_dp_aux_wait_done(struct intel_dp *intel_dp)
57 {
58 struct intel_display *display = to_intel_display(intel_dp);
59 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
60 const unsigned int timeout_ms = 10;
61 u32 status;
62 int ret;
63
64 ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
65 0,
66 2, timeout_ms, &status);
67
68 if (ret == -ETIMEDOUT)
69 drm_err(display->drm,
70 "%s: did not complete or timeout within %ums (status 0x%08x)\n",
71 intel_dp->aux.name, timeout_ms, status);
72
73 return status;
74 }
75
g4x_get_aux_clock_divider(struct intel_dp * intel_dp,int index)76 static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
77 {
78 struct intel_display *display = to_intel_display(intel_dp);
79
80 if (index)
81 return 0;
82
83 /*
84 * The clock divider is based off the hrawclk, and would like to run at
85 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
86 */
87 return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000);
88 }
89
ilk_get_aux_clock_divider(struct intel_dp * intel_dp,int index)90 static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
91 {
92 struct intel_display *display = to_intel_display(intel_dp);
93 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
94 u32 freq;
95
96 if (index)
97 return 0;
98
99 /*
100 * The clock divider is based off the cdclk or PCH rawclk, and would
101 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
102 * divide by 2000 and use that
103 */
104 if (dig_port->aux_ch == AUX_CH_A)
105 freq = display->cdclk.hw.cdclk;
106 else
107 freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq;
108 return DIV_ROUND_CLOSEST(freq, 2000);
109 }
110
hsw_get_aux_clock_divider(struct intel_dp * intel_dp,int index)111 static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
112 {
113 struct intel_display *display = to_intel_display(intel_dp);
114 struct drm_i915_private *i915 = to_i915(display->drm);
115 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
116
117 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
118 /* Workaround for non-ULT HSW */
119 switch (index) {
120 case 0: return 63;
121 case 1: return 72;
122 default: return 0;
123 }
124 }
125
126 return ilk_get_aux_clock_divider(intel_dp, index);
127 }
128
skl_get_aux_clock_divider(struct intel_dp * intel_dp,int index)129 static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
130 {
131 /*
132 * SKL doesn't need us to program the AUX clock divider (Hardware will
133 * derive the clock from CDCLK automatically). We still implement the
134 * get_aux_clock_divider vfunc to plug-in into the existing code.
135 */
136 return index ? 0 : 1;
137 }
138
intel_dp_aux_sync_len(void)139 static int intel_dp_aux_sync_len(void)
140 {
141 int precharge = 16; /* 10-16 */
142 int preamble = 16;
143
144 return precharge + preamble;
145 }
146
intel_dp_aux_fw_sync_len(struct intel_dp * intel_dp)147 int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp)
148 {
149 int precharge = 10; /* 10-16 */
150 int preamble = 8;
151
152 /*
153 * We faced some glitches on Dell Precision 5490 MTL laptop with panel:
154 * "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
155 * is fixing these problems with the panel. It is still within range
156 * mentioned in eDP specification. Increasing Fast Wake sync length is
157 * causing problems with other panels: increase length as a quirk for
158 * this specific laptop.
159 */
160 if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN))
161 precharge += 2;
162
163 return precharge + preamble;
164 }
165
g4x_dp_aux_precharge_len(void)166 static int g4x_dp_aux_precharge_len(void)
167 {
168 int precharge_min = 10;
169 int preamble = 16;
170
171 /* HW wants the length of the extra precharge in 2us units */
172 return (intel_dp_aux_sync_len() -
173 precharge_min - preamble) / 2;
174 }
175
g4x_get_aux_send_ctl(struct intel_dp * intel_dp,int send_bytes,u32 aux_clock_divider)176 static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
177 int send_bytes,
178 u32 aux_clock_divider)
179 {
180 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
181 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
182 u32 timeout;
183
184 /* Max timeout value on G4x-BDW: 1.6ms */
185 if (IS_BROADWELL(i915))
186 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
187 else
188 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
189
190 return DP_AUX_CH_CTL_SEND_BUSY |
191 DP_AUX_CH_CTL_DONE |
192 DP_AUX_CH_CTL_INTERRUPT |
193 DP_AUX_CH_CTL_TIME_OUT_ERROR |
194 timeout |
195 DP_AUX_CH_CTL_RECEIVE_ERROR |
196 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
197 DP_AUX_CH_CTL_PRECHARGE_2US(g4x_dp_aux_precharge_len()) |
198 DP_AUX_CH_CTL_BIT_CLOCK_2X(aux_clock_divider);
199 }
200
skl_get_aux_send_ctl(struct intel_dp * intel_dp,int send_bytes,u32 unused)201 static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
202 int send_bytes,
203 u32 unused)
204 {
205 struct intel_display *display = to_intel_display(intel_dp);
206 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
207 u32 ret;
208
209 /*
210 * Max timeout values:
211 * SKL-GLK: 1.6ms
212 * ICL+: 4ms
213 */
214 ret = DP_AUX_CH_CTL_SEND_BUSY |
215 DP_AUX_CH_CTL_DONE |
216 DP_AUX_CH_CTL_INTERRUPT |
217 DP_AUX_CH_CTL_TIME_OUT_ERROR |
218 DP_AUX_CH_CTL_TIME_OUT_MAX |
219 DP_AUX_CH_CTL_RECEIVE_ERROR |
220 DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
221 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) |
222 DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
223
224 if (intel_tc_port_in_tbt_alt_mode(dig_port))
225 ret |= DP_AUX_CH_CTL_TBT_IO;
226
227 /*
228 * Power request bit is already set during aux power well enable.
229 * Preserve the bit across aux transactions.
230 */
231 if (DISPLAY_VER(display) >= 14)
232 ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
233
234 return ret;
235 }
236
237 static int
intel_dp_aux_xfer(struct intel_dp * intel_dp,const u8 * send,int send_bytes,u8 * recv,int recv_size,u32 aux_send_ctl_flags)238 intel_dp_aux_xfer(struct intel_dp *intel_dp,
239 const u8 *send, int send_bytes,
240 u8 *recv, int recv_size,
241 u32 aux_send_ctl_flags)
242 {
243 struct intel_display *display = to_intel_display(intel_dp);
244 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
245 struct intel_encoder *encoder = &dig_port->base;
246 i915_reg_t ch_ctl, ch_data[5];
247 u32 aux_clock_divider;
248 enum intel_display_power_domain aux_domain;
249 intel_wakeref_t aux_wakeref;
250 intel_wakeref_t pps_wakeref;
251 int i, ret, recv_bytes;
252 int try, clock = 0;
253 u32 status;
254 bool vdd;
255
256 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
257 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
258 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
259
260 intel_digital_port_lock(encoder);
261 /*
262 * Abort transfers on a disconnected port as required by
263 * DP 1.4a link CTS 4.2.1.5, also avoiding the long AUX
264 * timeouts that would otherwise happen.
265 */
266 if (!intel_dp_is_edp(intel_dp) &&
267 !intel_digital_port_connected_locked(&dig_port->base)) {
268 ret = -ENXIO;
269 goto out_unlock;
270 }
271
272 aux_domain = intel_aux_power_domain(dig_port);
273
274 aux_wakeref = intel_display_power_get(display, aux_domain);
275 pps_wakeref = intel_pps_lock(intel_dp);
276
277 /*
278 * We will be called with VDD already enabled for dpcd/edid/oui reads.
279 * In such cases we want to leave VDD enabled and it's up to upper layers
280 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
281 * ourselves.
282 */
283 vdd = intel_pps_vdd_on_unlocked(intel_dp);
284
285 /*
286 * dp aux is extremely sensitive to irq latency, hence request the
287 * lowest possible wakeup latency and so prevent the cpu from going into
288 * deep sleep states.
289 */
290 cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
291
292 intel_pps_check_power_unlocked(intel_dp);
293
294 /*
295 * FIXME PSR should be disabled here to prevent
296 * it using the same AUX CH simultaneously
297 */
298
299 /* Try to wait for any previous AUX channel activity */
300 for (try = 0; try < 3; try++) {
301 status = intel_de_read_notrace(display, ch_ctl);
302 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
303 break;
304 msleep(1);
305 }
306 /* just trace the final value */
307 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
308
309 if (try == 3) {
310 const u32 status = intel_de_read(display, ch_ctl);
311
312 if (status != intel_dp->aux_busy_last_status) {
313 drm_WARN(display->drm, 1,
314 "%s: not started (status 0x%08x)\n",
315 intel_dp->aux.name, status);
316 intel_dp->aux_busy_last_status = status;
317 }
318
319 ret = -EBUSY;
320 goto out;
321 }
322
323 /* Only 5 data registers! */
324 if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) {
325 ret = -E2BIG;
326 goto out;
327 }
328
329 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
330 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
331 send_bytes,
332 aux_clock_divider);
333
334 send_ctl |= aux_send_ctl_flags;
335
336 /* Must try at least 3 times according to DP spec */
337 for (try = 0; try < 5; try++) {
338 /* Load the send data into the aux channel data registers */
339 for (i = 0; i < send_bytes; i += 4)
340 intel_de_write(display, ch_data[i >> 2],
341 intel_dp_aux_pack(send + i,
342 send_bytes - i));
343
344 /* Send the command and wait for it to complete */
345 intel_de_write(display, ch_ctl, send_ctl);
346
347 status = intel_dp_aux_wait_done(intel_dp);
348
349 /* Clear done status and any errors */
350 intel_de_write(display, ch_ctl,
351 status | DP_AUX_CH_CTL_DONE |
352 DP_AUX_CH_CTL_TIME_OUT_ERROR |
353 DP_AUX_CH_CTL_RECEIVE_ERROR);
354
355 /*
356 * DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
357 * 400us delay required for errors and timeouts
358 * Timeout errors from the HW already meet this
359 * requirement so skip to next iteration
360 */
361 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
362 continue;
363
364 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
365 usleep_range(400, 500);
366 continue;
367 }
368 if (status & DP_AUX_CH_CTL_DONE)
369 goto done;
370 }
371 }
372
373 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
374 drm_err(display->drm, "%s: not done (status 0x%08x)\n",
375 intel_dp->aux.name, status);
376 ret = -EBUSY;
377 goto out;
378 }
379
380 done:
381 /*
382 * Check for timeout or receive error. Timeouts occur when the sink is
383 * not connected.
384 */
385 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
386 drm_err(display->drm, "%s: receive error (status 0x%08x)\n",
387 intel_dp->aux.name, status);
388 ret = -EIO;
389 goto out;
390 }
391
392 /*
393 * Timeouts occur when the device isn't connected, so they're "normal"
394 * -- don't fill the kernel log with these
395 */
396 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
397 drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n",
398 intel_dp->aux.name, status);
399 ret = -ETIMEDOUT;
400 goto out;
401 }
402
403 /* Unload any bytes sent back from the other side */
404 recv_bytes = REG_FIELD_GET(DP_AUX_CH_CTL_MESSAGE_SIZE_MASK, status);
405
406 /*
407 * By BSpec: "Message sizes of 0 or >20 are not allowed."
408 * We have no idea of what happened so we return -EBUSY so
409 * drm layer takes care for the necessary retries.
410 */
411 if (recv_bytes == 0 || recv_bytes > 20) {
412 drm_dbg_kms(display->drm,
413 "%s: Forbidden recv_bytes = %d on aux transaction\n",
414 intel_dp->aux.name, recv_bytes);
415 ret = -EBUSY;
416 goto out;
417 }
418
419 if (recv_bytes > recv_size)
420 recv_bytes = recv_size;
421
422 for (i = 0; i < recv_bytes; i += 4)
423 intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]),
424 recv + i, recv_bytes - i);
425
426 ret = recv_bytes;
427 out:
428 cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
429
430 if (vdd)
431 intel_pps_vdd_off_unlocked(intel_dp, false);
432
433 intel_pps_unlock(intel_dp, pps_wakeref);
434 intel_display_power_put_async(display, aux_domain, aux_wakeref);
435 out_unlock:
436 intel_digital_port_unlock(encoder);
437
438 return ret;
439 }
440
441 #define BARE_ADDRESS_SIZE 3
442 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
443
444 static void
intel_dp_aux_header(u8 txbuf[HEADER_SIZE],const struct drm_dp_aux_msg * msg)445 intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
446 const struct drm_dp_aux_msg *msg)
447 {
448 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
449 txbuf[1] = (msg->address >> 8) & 0xff;
450 txbuf[2] = msg->address & 0xff;
451 txbuf[3] = msg->size - 1;
452 }
453
intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg * msg)454 static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
455 {
456 /*
457 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
458 * select bit to inform the hardware to send the Aksv after our header
459 * since we can't access that data from software.
460 */
461 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
462 msg->address == DP_AUX_HDCP_AKSV)
463 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
464
465 return 0;
466 }
467
468 static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)469 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
470 {
471 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
472 struct intel_display *display = to_intel_display(intel_dp);
473 u8 txbuf[20], rxbuf[20];
474 size_t txsize, rxsize;
475 u32 flags = intel_dp_aux_xfer_flags(msg);
476 int ret;
477
478 intel_dp_aux_header(txbuf, msg);
479
480 switch (msg->request & ~DP_AUX_I2C_MOT) {
481 case DP_AUX_NATIVE_WRITE:
482 case DP_AUX_I2C_WRITE:
483 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
484 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
485 rxsize = 2; /* 0 or 1 data bytes */
486
487 if (drm_WARN_ON(display->drm, txsize > 20))
488 return -E2BIG;
489
490 drm_WARN_ON(display->drm, !msg->buffer != !msg->size);
491
492 if (msg->buffer)
493 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
494
495 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
496 rxbuf, rxsize, flags);
497 if (ret > 0) {
498 msg->reply = rxbuf[0] >> 4;
499
500 if (ret > 1) {
501 /* Number of bytes written in a short write. */
502 ret = clamp_t(int, rxbuf[1], 0, msg->size);
503 } else {
504 /* Return payload size. */
505 ret = msg->size;
506 }
507 }
508 break;
509
510 case DP_AUX_NATIVE_READ:
511 case DP_AUX_I2C_READ:
512 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
513 rxsize = msg->size + 1;
514
515 if (drm_WARN_ON(display->drm, rxsize > 20))
516 return -E2BIG;
517
518 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
519 rxbuf, rxsize, flags);
520 if (ret > 0) {
521 msg->reply = rxbuf[0] >> 4;
522 /*
523 * Assume happy day, and copy the data. The caller is
524 * expected to check msg->reply before touching it.
525 *
526 * Return payload size.
527 */
528 ret--;
529 memcpy(msg->buffer, rxbuf + 1, ret);
530 }
531 break;
532
533 default:
534 ret = -EINVAL;
535 break;
536 }
537
538 return ret;
539 }
540
vlv_aux_ctl_reg(struct intel_dp * intel_dp)541 static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
542 {
543 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
544 enum aux_ch aux_ch = dig_port->aux_ch;
545
546 switch (aux_ch) {
547 case AUX_CH_B:
548 case AUX_CH_C:
549 case AUX_CH_D:
550 return VLV_DP_AUX_CH_CTL(aux_ch);
551 default:
552 MISSING_CASE(aux_ch);
553 return VLV_DP_AUX_CH_CTL(AUX_CH_B);
554 }
555 }
556
vlv_aux_data_reg(struct intel_dp * intel_dp,int index)557 static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
558 {
559 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
560 enum aux_ch aux_ch = dig_port->aux_ch;
561
562 switch (aux_ch) {
563 case AUX_CH_B:
564 case AUX_CH_C:
565 case AUX_CH_D:
566 return VLV_DP_AUX_CH_DATA(aux_ch, index);
567 default:
568 MISSING_CASE(aux_ch);
569 return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
570 }
571 }
572
g4x_aux_ctl_reg(struct intel_dp * intel_dp)573 static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
574 {
575 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
576 enum aux_ch aux_ch = dig_port->aux_ch;
577
578 switch (aux_ch) {
579 case AUX_CH_B:
580 case AUX_CH_C:
581 case AUX_CH_D:
582 return DP_AUX_CH_CTL(aux_ch);
583 default:
584 MISSING_CASE(aux_ch);
585 return DP_AUX_CH_CTL(AUX_CH_B);
586 }
587 }
588
g4x_aux_data_reg(struct intel_dp * intel_dp,int index)589 static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
590 {
591 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
592 enum aux_ch aux_ch = dig_port->aux_ch;
593
594 switch (aux_ch) {
595 case AUX_CH_B:
596 case AUX_CH_C:
597 case AUX_CH_D:
598 return DP_AUX_CH_DATA(aux_ch, index);
599 default:
600 MISSING_CASE(aux_ch);
601 return DP_AUX_CH_DATA(AUX_CH_B, index);
602 }
603 }
604
ilk_aux_ctl_reg(struct intel_dp * intel_dp)605 static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
606 {
607 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
608 enum aux_ch aux_ch = dig_port->aux_ch;
609
610 switch (aux_ch) {
611 case AUX_CH_A:
612 return DP_AUX_CH_CTL(aux_ch);
613 case AUX_CH_B:
614 case AUX_CH_C:
615 case AUX_CH_D:
616 return PCH_DP_AUX_CH_CTL(aux_ch);
617 default:
618 MISSING_CASE(aux_ch);
619 return DP_AUX_CH_CTL(AUX_CH_A);
620 }
621 }
622
ilk_aux_data_reg(struct intel_dp * intel_dp,int index)623 static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
624 {
625 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
626 enum aux_ch aux_ch = dig_port->aux_ch;
627
628 switch (aux_ch) {
629 case AUX_CH_A:
630 return DP_AUX_CH_DATA(aux_ch, index);
631 case AUX_CH_B:
632 case AUX_CH_C:
633 case AUX_CH_D:
634 return PCH_DP_AUX_CH_DATA(aux_ch, index);
635 default:
636 MISSING_CASE(aux_ch);
637 return DP_AUX_CH_DATA(AUX_CH_A, index);
638 }
639 }
640
skl_aux_ctl_reg(struct intel_dp * intel_dp)641 static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
642 {
643 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
644 enum aux_ch aux_ch = dig_port->aux_ch;
645
646 switch (aux_ch) {
647 case AUX_CH_A:
648 case AUX_CH_B:
649 case AUX_CH_C:
650 case AUX_CH_D:
651 case AUX_CH_E:
652 case AUX_CH_F:
653 return DP_AUX_CH_CTL(aux_ch);
654 default:
655 MISSING_CASE(aux_ch);
656 return DP_AUX_CH_CTL(AUX_CH_A);
657 }
658 }
659
skl_aux_data_reg(struct intel_dp * intel_dp,int index)660 static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
661 {
662 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
663 enum aux_ch aux_ch = dig_port->aux_ch;
664
665 switch (aux_ch) {
666 case AUX_CH_A:
667 case AUX_CH_B:
668 case AUX_CH_C:
669 case AUX_CH_D:
670 case AUX_CH_E:
671 case AUX_CH_F:
672 return DP_AUX_CH_DATA(aux_ch, index);
673 default:
674 MISSING_CASE(aux_ch);
675 return DP_AUX_CH_DATA(AUX_CH_A, index);
676 }
677 }
678
tgl_aux_ctl_reg(struct intel_dp * intel_dp)679 static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
680 {
681 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
682 enum aux_ch aux_ch = dig_port->aux_ch;
683
684 switch (aux_ch) {
685 case AUX_CH_A:
686 case AUX_CH_B:
687 case AUX_CH_C:
688 case AUX_CH_USBC1:
689 case AUX_CH_USBC2:
690 case AUX_CH_USBC3:
691 case AUX_CH_USBC4:
692 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
693 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
694 return DP_AUX_CH_CTL(aux_ch);
695 default:
696 MISSING_CASE(aux_ch);
697 return DP_AUX_CH_CTL(AUX_CH_A);
698 }
699 }
700
tgl_aux_data_reg(struct intel_dp * intel_dp,int index)701 static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
702 {
703 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
704 enum aux_ch aux_ch = dig_port->aux_ch;
705
706 switch (aux_ch) {
707 case AUX_CH_A:
708 case AUX_CH_B:
709 case AUX_CH_C:
710 case AUX_CH_USBC1:
711 case AUX_CH_USBC2:
712 case AUX_CH_USBC3:
713 case AUX_CH_USBC4:
714 case AUX_CH_USBC5: /* aka AUX_CH_D_XELPD */
715 case AUX_CH_USBC6: /* aka AUX_CH_E_XELPD */
716 return DP_AUX_CH_DATA(aux_ch, index);
717 default:
718 MISSING_CASE(aux_ch);
719 return DP_AUX_CH_DATA(AUX_CH_A, index);
720 }
721 }
722
xelpdp_aux_ctl_reg(struct intel_dp * intel_dp)723 static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
724 {
725 struct intel_display *display = to_intel_display(intel_dp);
726 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
727 enum aux_ch aux_ch = dig_port->aux_ch;
728
729 switch (aux_ch) {
730 case AUX_CH_A:
731 case AUX_CH_B:
732 case AUX_CH_USBC1:
733 case AUX_CH_USBC2:
734 case AUX_CH_USBC3:
735 case AUX_CH_USBC4:
736 return XELPDP_DP_AUX_CH_CTL(display, aux_ch);
737 default:
738 MISSING_CASE(aux_ch);
739 return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A);
740 }
741 }
742
xelpdp_aux_data_reg(struct intel_dp * intel_dp,int index)743 static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
744 {
745 struct intel_display *display = to_intel_display(intel_dp);
746 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
747 enum aux_ch aux_ch = dig_port->aux_ch;
748
749 switch (aux_ch) {
750 case AUX_CH_A:
751 case AUX_CH_B:
752 case AUX_CH_USBC1:
753 case AUX_CH_USBC2:
754 case AUX_CH_USBC3:
755 case AUX_CH_USBC4:
756 return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index);
757 default:
758 MISSING_CASE(aux_ch);
759 return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index);
760 }
761 }
762
intel_dp_aux_fini(struct intel_dp * intel_dp)763 void intel_dp_aux_fini(struct intel_dp *intel_dp)
764 {
765 if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
766 cpu_latency_qos_remove_request(&intel_dp->pm_qos);
767
768 kfree(intel_dp->aux.name);
769 }
770
intel_dp_aux_init(struct intel_dp * intel_dp)771 void intel_dp_aux_init(struct intel_dp *intel_dp)
772 {
773 struct intel_display *display = to_intel_display(intel_dp);
774 struct drm_i915_private *i915 = to_i915(display->drm);
775 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
776 struct intel_encoder *encoder = &dig_port->base;
777 enum aux_ch aux_ch = dig_port->aux_ch;
778 char buf[AUX_CH_NAME_BUFSIZE];
779
780 if (DISPLAY_VER(display) >= 14) {
781 intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
782 intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
783 } else if (DISPLAY_VER(display) >= 12) {
784 intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
785 intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
786 } else if (DISPLAY_VER(display) >= 9) {
787 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
788 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
789 } else if (HAS_PCH_SPLIT(i915)) {
790 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
791 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
792 } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
793 intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
794 intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
795 } else {
796 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
797 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
798 }
799
800 if (DISPLAY_VER(display) >= 9)
801 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
802 else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
803 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
804 else if (HAS_PCH_SPLIT(i915))
805 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
806 else
807 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
808
809 if (DISPLAY_VER(display) >= 9)
810 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
811 else
812 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
813
814 intel_dp->aux.drm_dev = display->drm;
815 drm_dp_aux_init(&intel_dp->aux);
816
817 /* Failure to allocate our preferred name is not critical */
818 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
819 aux_ch_name(display, buf, sizeof(buf), aux_ch),
820 encoder->base.name);
821
822 intel_dp->aux.transfer = intel_dp_aux_transfer;
823 cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
824 }
825
default_aux_ch(struct intel_encoder * encoder)826 static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
827 {
828 struct intel_display *display = to_intel_display(encoder);
829
830 /* SKL has DDI E but no AUX E */
831 if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E)
832 return AUX_CH_A;
833
834 return (enum aux_ch)encoder->port;
835 }
836
837 static struct intel_encoder *
get_encoder_by_aux_ch(struct intel_encoder * encoder,enum aux_ch aux_ch)838 get_encoder_by_aux_ch(struct intel_encoder *encoder,
839 enum aux_ch aux_ch)
840 {
841 struct intel_display *display = to_intel_display(encoder);
842 struct intel_encoder *other;
843
844 for_each_intel_encoder(display->drm, other) {
845 if (other == encoder)
846 continue;
847
848 if (!intel_encoder_is_dig_port(other))
849 continue;
850
851 if (enc_to_dig_port(other)->aux_ch == aux_ch)
852 return other;
853 }
854
855 return NULL;
856 }
857
intel_dp_aux_ch(struct intel_encoder * encoder)858 enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
859 {
860 struct intel_display *display = to_intel_display(encoder);
861 struct intel_encoder *other;
862 const char *source;
863 enum aux_ch aux_ch;
864 char buf[AUX_CH_NAME_BUFSIZE];
865
866 aux_ch = intel_bios_dp_aux_ch(encoder->devdata);
867 source = "VBT";
868
869 if (aux_ch == AUX_CH_NONE) {
870 aux_ch = default_aux_ch(encoder);
871 source = "platform default";
872 }
873
874 if (aux_ch == AUX_CH_NONE)
875 return AUX_CH_NONE;
876
877 /* FIXME validate aux_ch against platform caps */
878
879 other = get_encoder_by_aux_ch(encoder, aux_ch);
880 if (other) {
881 drm_dbg_kms(display->drm,
882 "[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
883 encoder->base.base.id, encoder->base.name,
884 aux_ch_name(display, buf, sizeof(buf), aux_ch),
885 other->base.base.id, other->base.name);
886 return AUX_CH_NONE;
887 }
888
889 drm_dbg_kms(display->drm,
890 "[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
891 encoder->base.base.id, encoder->base.name,
892 aux_ch_name(display, buf, sizeof(buf), aux_ch), source);
893
894 return aux_ch;
895 }
896
intel_dp_aux_irq_handler(struct intel_display * display)897 void intel_dp_aux_irq_handler(struct intel_display *display)
898 {
899 wake_up_all(&display->gmbus.wait_queue);
900 }
901