1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Cadence MHDP8546 DP bridge driver.
4 *
5 * Copyright (C) 2020 Cadence Design Systems, Inc.
6 *
7 * Authors: Quentin Schulz <quentin.schulz@free-electrons.com>
8 * Swapnil Jakhade <sjakhade@cadence.com>
9 * Yuti Amonkar <yamonkar@cadence.com>
10 * Tomi Valkeinen <tomi.valkeinen@ti.com>
11 * Jyri Sarha <jsarha@ti.com>
12 *
13 * TODO:
14 * - Implement optimized mailbox communication using mailbox interrupts
15 * - Add support for power management
16 * - Add support for features like audio, MST and fast link training
17 * - Implement request_fw_cancel to handle HW_STATE
18 * - Fix asynchronous loading of firmware implementation
19 * - Add DRM helper function for cdns_mhdp_lower_link_rate
20 */
21
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/media-bus-format.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/phy/phy.h>
33 #include <linux/phy/phy-dp.h>
34 #include <linux/platform_device.h>
35 #include <linux/slab.h>
36 #include <linux/wait.h>
37
38 #include <drm/display/drm_dp_helper.h>
39 #include <drm/display/drm_hdcp_helper.h>
40 #include <drm/drm_atomic.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_atomic_state_helper.h>
43 #include <drm/drm_bridge.h>
44 #include <drm/drm_connector.h>
45 #include <drm/drm_edid.h>
46 #include <drm/drm_modeset_helper_vtables.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_probe_helper.h>
49
50 #include <linux/unaligned.h>
51
52 #include "cdns-mhdp8546-core.h"
53 #include "cdns-mhdp8546-hdcp.h"
54 #include "cdns-mhdp8546-j721e.h"
55
cdns_mhdp_bridge_hpd_enable(struct drm_bridge * bridge)56 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
57 {
58 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
59
60 /* Enable SW event interrupts */
61 if (mhdp->bridge_attached)
62 writel(readl(mhdp->regs + CDNS_APB_INT_MASK) &
63 ~CDNS_APB_INT_MASK_SW_EVENT_INT,
64 mhdp->regs + CDNS_APB_INT_MASK);
65 }
66
cdns_mhdp_bridge_hpd_disable(struct drm_bridge * bridge)67 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
68 {
69 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
70
71 writel(readl(mhdp->regs + CDNS_APB_INT_MASK) |
72 CDNS_APB_INT_MASK_SW_EVENT_INT,
73 mhdp->regs + CDNS_APB_INT_MASK);
74 }
75
cdns_mhdp_mailbox_read(struct cdns_mhdp_device * mhdp)76 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
77 {
78 int ret, empty;
79
80 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
81
82 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
83 empty, !empty, MAILBOX_RETRY_US,
84 MAILBOX_TIMEOUT_US);
85 if (ret < 0)
86 return ret;
87
88 return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
89 }
90
cdns_mhdp_mailbox_write(struct cdns_mhdp_device * mhdp,u8 val)91 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
92 {
93 int ret, full;
94
95 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
96
97 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
98 full, !full, MAILBOX_RETRY_US,
99 MAILBOX_TIMEOUT_US);
100 if (ret < 0)
101 return ret;
102
103 writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
104
105 return 0;
106 }
107
cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device * mhdp,u8 module_id,u8 opcode,u16 req_size)108 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
109 u8 module_id, u8 opcode,
110 u16 req_size)
111 {
112 u32 mbox_size, i;
113 u8 header[4];
114 int ret;
115
116 /* read the header of the message */
117 for (i = 0; i < sizeof(header); i++) {
118 ret = cdns_mhdp_mailbox_read(mhdp);
119 if (ret < 0)
120 return ret;
121
122 header[i] = ret;
123 }
124
125 mbox_size = get_unaligned_be16(header + 2);
126
127 if (opcode != header[0] || module_id != header[1] ||
128 req_size != mbox_size) {
129 /*
130 * If the message in mailbox is not what we want, we need to
131 * clear the mailbox by reading its contents.
132 */
133 for (i = 0; i < mbox_size; i++)
134 if (cdns_mhdp_mailbox_read(mhdp) < 0)
135 break;
136
137 return -EINVAL;
138 }
139
140 return 0;
141 }
142
cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device * mhdp,u8 * buff,u16 buff_size)143 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
144 u8 *buff, u16 buff_size)
145 {
146 u32 i;
147 int ret;
148
149 for (i = 0; i < buff_size; i++) {
150 ret = cdns_mhdp_mailbox_read(mhdp);
151 if (ret < 0)
152 return ret;
153
154 buff[i] = ret;
155 }
156
157 return 0;
158 }
159
cdns_mhdp_mailbox_send(struct cdns_mhdp_device * mhdp,u8 module_id,u8 opcode,u16 size,u8 * message)160 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
161 u8 opcode, u16 size, u8 *message)
162 {
163 u8 header[4];
164 int ret, i;
165
166 header[0] = opcode;
167 header[1] = module_id;
168 put_unaligned_be16(size, header + 2);
169
170 for (i = 0; i < sizeof(header); i++) {
171 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
172 if (ret)
173 return ret;
174 }
175
176 for (i = 0; i < size; i++) {
177 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
178 if (ret)
179 return ret;
180 }
181
182 return 0;
183 }
184
185 static
cdns_mhdp_reg_read(struct cdns_mhdp_device * mhdp,u32 addr,u32 * value)186 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
187 {
188 u8 msg[4], resp[8];
189 int ret;
190
191 put_unaligned_be32(addr, msg);
192
193 mutex_lock(&mhdp->mbox_mutex);
194
195 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
196 GENERAL_REGISTER_READ,
197 sizeof(msg), msg);
198 if (ret)
199 goto out;
200
201 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
202 GENERAL_REGISTER_READ,
203 sizeof(resp));
204 if (ret)
205 goto out;
206
207 ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
208 if (ret)
209 goto out;
210
211 /* Returned address value should be the same as requested */
212 if (memcmp(msg, resp, sizeof(msg))) {
213 ret = -EINVAL;
214 goto out;
215 }
216
217 *value = get_unaligned_be32(resp + 4);
218
219 out:
220 mutex_unlock(&mhdp->mbox_mutex);
221 if (ret) {
222 dev_err(mhdp->dev, "Failed to read register\n");
223 *value = 0;
224 }
225
226 return ret;
227 }
228
229 static
cdns_mhdp_reg_write(struct cdns_mhdp_device * mhdp,u16 addr,u32 val)230 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
231 {
232 u8 msg[6];
233 int ret;
234
235 put_unaligned_be16(addr, msg);
236 put_unaligned_be32(val, msg + 2);
237
238 mutex_lock(&mhdp->mbox_mutex);
239
240 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
241 DPTX_WRITE_REGISTER, sizeof(msg), msg);
242
243 mutex_unlock(&mhdp->mbox_mutex);
244
245 return ret;
246 }
247
248 static
cdns_mhdp_reg_write_bit(struct cdns_mhdp_device * mhdp,u16 addr,u8 start_bit,u8 bits_no,u32 val)249 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
250 u8 start_bit, u8 bits_no, u32 val)
251 {
252 u8 field[8];
253 int ret;
254
255 put_unaligned_be16(addr, field);
256 field[2] = start_bit;
257 field[3] = bits_no;
258 put_unaligned_be32(val, field + 4);
259
260 mutex_lock(&mhdp->mbox_mutex);
261
262 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
263 DPTX_WRITE_FIELD, sizeof(field), field);
264
265 mutex_unlock(&mhdp->mbox_mutex);
266
267 return ret;
268 }
269
270 static
cdns_mhdp_dpcd_read(struct cdns_mhdp_device * mhdp,u32 addr,u8 * data,u16 len)271 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
272 u32 addr, u8 *data, u16 len)
273 {
274 u8 msg[5], reg[5];
275 int ret;
276
277 put_unaligned_be16(len, msg);
278 put_unaligned_be24(addr, msg + 2);
279
280 mutex_lock(&mhdp->mbox_mutex);
281
282 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
283 DPTX_READ_DPCD, sizeof(msg), msg);
284 if (ret)
285 goto out;
286
287 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
288 DPTX_READ_DPCD,
289 sizeof(reg) + len);
290 if (ret)
291 goto out;
292
293 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
294 if (ret)
295 goto out;
296
297 ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
298
299 out:
300 mutex_unlock(&mhdp->mbox_mutex);
301
302 return ret;
303 }
304
305 static
cdns_mhdp_dpcd_write(struct cdns_mhdp_device * mhdp,u32 addr,u8 value)306 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
307 {
308 u8 msg[6], reg[5];
309 int ret;
310
311 put_unaligned_be16(1, msg);
312 put_unaligned_be24(addr, msg + 2);
313 msg[5] = value;
314
315 mutex_lock(&mhdp->mbox_mutex);
316
317 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
318 DPTX_WRITE_DPCD, sizeof(msg), msg);
319 if (ret)
320 goto out;
321
322 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
323 DPTX_WRITE_DPCD, sizeof(reg));
324 if (ret)
325 goto out;
326
327 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
328 if (ret)
329 goto out;
330
331 if (addr != get_unaligned_be24(reg + 2))
332 ret = -EINVAL;
333
334 out:
335 mutex_unlock(&mhdp->mbox_mutex);
336
337 if (ret)
338 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
339 return ret;
340 }
341
342 static
cdns_mhdp_set_firmware_active(struct cdns_mhdp_device * mhdp,bool enable)343 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
344 {
345 u8 msg[5];
346 int ret, i;
347
348 msg[0] = GENERAL_MAIN_CONTROL;
349 msg[1] = MB_MODULE_ID_GENERAL;
350 msg[2] = 0;
351 msg[3] = 1;
352 msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
353
354 mutex_lock(&mhdp->mbox_mutex);
355
356 for (i = 0; i < sizeof(msg); i++) {
357 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
358 if (ret)
359 goto out;
360 }
361
362 /* read the firmware state */
363 ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
364 if (ret)
365 goto out;
366
367 ret = 0;
368
369 out:
370 mutex_unlock(&mhdp->mbox_mutex);
371
372 if (ret < 0)
373 dev_err(mhdp->dev, "set firmware active failed\n");
374 return ret;
375 }
376
377 static
cdns_mhdp_get_hpd_status(struct cdns_mhdp_device * mhdp)378 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
379 {
380 u8 status;
381 int ret;
382
383 mutex_lock(&mhdp->mbox_mutex);
384
385 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
386 DPTX_HPD_STATE, 0, NULL);
387 if (ret)
388 goto err_get_hpd;
389
390 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
391 DPTX_HPD_STATE,
392 sizeof(status));
393 if (ret)
394 goto err_get_hpd;
395
396 ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
397 if (ret)
398 goto err_get_hpd;
399
400 mutex_unlock(&mhdp->mbox_mutex);
401
402 dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
403 status ? "" : "un");
404
405 return status;
406
407 err_get_hpd:
408 mutex_unlock(&mhdp->mbox_mutex);
409
410 return ret;
411 }
412
413 static
cdns_mhdp_get_edid_block(void * data,u8 * edid,unsigned int block,size_t length)414 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
415 unsigned int block, size_t length)
416 {
417 struct cdns_mhdp_device *mhdp = data;
418 u8 msg[2], reg[2], i;
419 int ret;
420
421 mutex_lock(&mhdp->mbox_mutex);
422
423 for (i = 0; i < 4; i++) {
424 msg[0] = block / 2;
425 msg[1] = block % 2;
426
427 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
428 DPTX_GET_EDID, sizeof(msg), msg);
429 if (ret)
430 continue;
431
432 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
433 DPTX_GET_EDID,
434 sizeof(reg) + length);
435 if (ret)
436 continue;
437
438 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
439 if (ret)
440 continue;
441
442 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
443 if (ret)
444 continue;
445
446 if (reg[0] == length && reg[1] == block / 2)
447 break;
448 }
449
450 mutex_unlock(&mhdp->mbox_mutex);
451
452 if (ret)
453 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
454 block, ret);
455
456 return ret;
457 }
458
459 static
cdns_mhdp_read_hpd_event(struct cdns_mhdp_device * mhdp)460 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
461 {
462 u8 event = 0;
463 int ret;
464
465 mutex_lock(&mhdp->mbox_mutex);
466
467 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
468 DPTX_READ_EVENT, 0, NULL);
469 if (ret)
470 goto out;
471
472 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
473 DPTX_READ_EVENT, sizeof(event));
474 if (ret < 0)
475 goto out;
476
477 ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
478 out:
479 mutex_unlock(&mhdp->mbox_mutex);
480
481 if (ret < 0)
482 return ret;
483
484 dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
485 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
486 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
487 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
488 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
489
490 return event;
491 }
492
493 static
cdns_mhdp_adjust_lt(struct cdns_mhdp_device * mhdp,unsigned int nlanes,unsigned int udelay,const u8 * lanes_data,u8 link_status[DP_LINK_STATUS_SIZE])494 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
495 unsigned int udelay, const u8 *lanes_data,
496 u8 link_status[DP_LINK_STATUS_SIZE])
497 {
498 u8 payload[7];
499 u8 hdr[5]; /* For DPCD read response header */
500 u32 addr;
501 int ret;
502
503 if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
504 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
505 ret = -EINVAL;
506 goto out;
507 }
508
509 payload[0] = nlanes;
510 put_unaligned_be16(udelay, payload + 1);
511 memcpy(payload + 3, lanes_data, nlanes);
512
513 mutex_lock(&mhdp->mbox_mutex);
514
515 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
516 DPTX_ADJUST_LT,
517 sizeof(payload), payload);
518 if (ret)
519 goto out;
520
521 /* Yes, read the DPCD read command response */
522 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
523 DPTX_READ_DPCD,
524 sizeof(hdr) + DP_LINK_STATUS_SIZE);
525 if (ret)
526 goto out;
527
528 ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
529 if (ret)
530 goto out;
531
532 addr = get_unaligned_be24(hdr + 2);
533 if (addr != DP_LANE0_1_STATUS)
534 goto out;
535
536 ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
537 DP_LINK_STATUS_SIZE);
538
539 out:
540 mutex_unlock(&mhdp->mbox_mutex);
541
542 if (ret)
543 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
544
545 return ret;
546 }
547
548 /**
549 * cdns_mhdp_link_configure() - configure a DisplayPort link
550 * @aux: DisplayPort AUX channel
551 * @link: pointer to a structure containing the link configuration
552 *
553 * Returns 0 on success or a negative error code on failure.
554 */
555 static
cdns_mhdp_link_configure(struct drm_dp_aux * aux,struct cdns_mhdp_link * link)556 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
557 struct cdns_mhdp_link *link)
558 {
559 u8 values[2];
560 int err;
561
562 values[0] = drm_dp_link_rate_to_bw_code(link->rate);
563 values[1] = link->num_lanes;
564
565 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
566 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
567
568 err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
569 if (err < 0)
570 return err;
571
572 return 0;
573 }
574
cdns_mhdp_max_link_rate(struct cdns_mhdp_device * mhdp)575 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
576 {
577 return min(mhdp->host.link_rate, mhdp->sink.link_rate);
578 }
579
cdns_mhdp_max_num_lanes(struct cdns_mhdp_device * mhdp)580 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
581 {
582 return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
583 }
584
cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device * mhdp)585 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
586 {
587 return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
588 }
589
cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device * mhdp)590 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
591 {
592 /* Check if SSC is supported by both sides */
593 return mhdp->host.ssc && mhdp->sink.ssc;
594 }
595
cdns_mhdp_detect(struct cdns_mhdp_device * mhdp)596 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
597 {
598 dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
599
600 if (mhdp->plugged)
601 return connector_status_connected;
602 else
603 return connector_status_disconnected;
604 }
605
cdns_mhdp_check_fw_version(struct cdns_mhdp_device * mhdp)606 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
607 {
608 u32 major_num, minor_num, revision;
609 u32 fw_ver, lib_ver;
610
611 fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
612 | readl(mhdp->regs + CDNS_VER_L);
613
614 lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
615 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
616
617 if (lib_ver < 33984) {
618 /*
619 * Older FW versions with major number 1, used to store FW
620 * version information by storing repository revision number
621 * in registers. This is for identifying these FW versions.
622 */
623 major_num = 1;
624 minor_num = 2;
625 if (fw_ver == 26098) {
626 revision = 15;
627 } else if (lib_ver == 0 && fw_ver == 0) {
628 revision = 17;
629 } else {
630 dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
631 fw_ver, lib_ver);
632 return -ENODEV;
633 }
634 } else {
635 /* To identify newer FW versions with major number 2 onwards. */
636 major_num = fw_ver / 10000;
637 minor_num = (fw_ver / 100) % 100;
638 revision = (fw_ver % 10000) % 100;
639 }
640
641 dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
642 revision);
643 return 0;
644 }
645
cdns_mhdp_fw_activate(const struct firmware * fw,struct cdns_mhdp_device * mhdp)646 static int cdns_mhdp_fw_activate(const struct firmware *fw,
647 struct cdns_mhdp_device *mhdp)
648 {
649 unsigned int reg;
650 int ret;
651
652 /* Release uCPU reset and stall it. */
653 writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
654
655 memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
656
657 /* Leave debug mode, release stall */
658 writel(0, mhdp->regs + CDNS_APB_CTRL);
659
660 /*
661 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
662 * Updated each sched "tick" (~2ms)
663 */
664 ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
665 reg & CDNS_KEEP_ALIVE_MASK, 500,
666 CDNS_KEEP_ALIVE_TIMEOUT);
667 if (ret) {
668 dev_err(mhdp->dev,
669 "device didn't give any life sign: reg %d\n", reg);
670 return ret;
671 }
672
673 ret = cdns_mhdp_check_fw_version(mhdp);
674 if (ret)
675 return ret;
676
677 /* Init events to 0 as it's not cleared by FW at boot but on read */
678 readl(mhdp->regs + CDNS_SW_EVENT0);
679 readl(mhdp->regs + CDNS_SW_EVENT1);
680 readl(mhdp->regs + CDNS_SW_EVENT2);
681 readl(mhdp->regs + CDNS_SW_EVENT3);
682
683 /* Activate uCPU */
684 ret = cdns_mhdp_set_firmware_active(mhdp, true);
685 if (ret)
686 return ret;
687
688 spin_lock(&mhdp->start_lock);
689
690 mhdp->hw_state = MHDP_HW_READY;
691
692 /*
693 * Here we must keep the lock while enabling the interrupts
694 * since it would otherwise be possible that interrupt enable
695 * code is executed after the bridge is detached. The similar
696 * situation is not possible in attach()/detach() callbacks
697 * since the hw_state changes from MHDP_HW_READY to
698 * MHDP_HW_STOPPED happens only due to driver removal when
699 * bridge should already be detached.
700 */
701 cdns_mhdp_bridge_hpd_enable(&mhdp->bridge);
702
703 spin_unlock(&mhdp->start_lock);
704
705 wake_up(&mhdp->fw_load_wq);
706 dev_dbg(mhdp->dev, "DP FW activated\n");
707
708 return 0;
709 }
710
cdns_mhdp_fw_cb(const struct firmware * fw,void * context)711 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
712 {
713 struct cdns_mhdp_device *mhdp = context;
714 bool bridge_attached;
715 int ret;
716
717 dev_dbg(mhdp->dev, "firmware callback\n");
718
719 if (!fw || !fw->data) {
720 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
721 return;
722 }
723
724 ret = cdns_mhdp_fw_activate(fw, mhdp);
725
726 release_firmware(fw);
727
728 if (ret)
729 return;
730
731 /*
732 * XXX how to make sure the bridge is still attached when
733 * calling drm_kms_helper_hotplug_event() after releasing
734 * the lock? We should not hold the spin lock when
735 * calling drm_kms_helper_hotplug_event() since it may
736 * cause a dead lock. FB-dev console calls detect from the
737 * same thread just down the call stack started here.
738 */
739 spin_lock(&mhdp->start_lock);
740 bridge_attached = mhdp->bridge_attached;
741 spin_unlock(&mhdp->start_lock);
742 if (bridge_attached) {
743 if (mhdp->connector.dev)
744 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
745 else
746 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
747 }
748 }
749
cdns_mhdp_load_firmware(struct cdns_mhdp_device * mhdp)750 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
751 {
752 int ret;
753
754 ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
755 GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
756 if (ret) {
757 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
758 FW_NAME, ret);
759 return ret;
760 }
761
762 return 0;
763 }
764
cdns_mhdp_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)765 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
766 struct drm_dp_aux_msg *msg)
767 {
768 struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
769 int ret;
770
771 if (msg->request != DP_AUX_NATIVE_WRITE &&
772 msg->request != DP_AUX_NATIVE_READ)
773 return -EOPNOTSUPP;
774
775 if (msg->request == DP_AUX_NATIVE_WRITE) {
776 const u8 *buf = msg->buffer;
777 unsigned int i;
778
779 for (i = 0; i < msg->size; ++i) {
780 ret = cdns_mhdp_dpcd_write(mhdp,
781 msg->address + i, buf[i]);
782 if (!ret)
783 continue;
784
785 dev_err(mhdp->dev,
786 "Failed to write DPCD addr %u\n",
787 msg->address + i);
788
789 return ret;
790 }
791 } else {
792 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
793 msg->buffer, msg->size);
794 if (ret) {
795 dev_err(mhdp->dev,
796 "Failed to read DPCD addr %u\n",
797 msg->address);
798
799 return ret;
800 }
801 }
802
803 return msg->size;
804 }
805
cdns_mhdp_link_training_init(struct cdns_mhdp_device * mhdp)806 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
807 {
808 union phy_configure_opts phy_cfg;
809 u32 reg32;
810 int ret;
811
812 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
813 DP_TRAINING_PATTERN_DISABLE);
814
815 /* Reset PHY configuration */
816 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
817 if (!mhdp->host.scrambler)
818 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
819
820 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
821
822 cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
823 mhdp->sink.enhanced & mhdp->host.enhanced);
824
825 cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
826 CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
827
828 cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
829 phy_cfg.dp.link_rate = mhdp->link.rate / 100;
830 phy_cfg.dp.lanes = mhdp->link.num_lanes;
831
832 memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
833 memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
834
835 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
836 phy_cfg.dp.set_lanes = true;
837 phy_cfg.dp.set_rate = true;
838 phy_cfg.dp.set_voltages = true;
839 ret = phy_configure(mhdp->phy, &phy_cfg);
840 if (ret) {
841 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
842 __func__, ret);
843 return ret;
844 }
845
846 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
847 CDNS_PHY_COMMON_CONFIG |
848 CDNS_PHY_TRAINING_EN |
849 CDNS_PHY_TRAINING_TYPE(1) |
850 CDNS_PHY_SCRAMBLER_BYPASS);
851
852 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
853 DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
854
855 return 0;
856 }
857
cdns_mhdp_get_adjust_train(struct cdns_mhdp_device * mhdp,u8 link_status[DP_LINK_STATUS_SIZE],u8 lanes_data[CDNS_DP_MAX_NUM_LANES],union phy_configure_opts * phy_cfg)858 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
859 u8 link_status[DP_LINK_STATUS_SIZE],
860 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
861 union phy_configure_opts *phy_cfg)
862 {
863 u8 adjust, max_pre_emph, max_volt_swing;
864 u8 set_volt, set_pre;
865 unsigned int i;
866
867 max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
868 << DP_TRAIN_PRE_EMPHASIS_SHIFT;
869 max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
870
871 for (i = 0; i < mhdp->link.num_lanes; i++) {
872 /* Check if Voltage swing and pre-emphasis are within limits */
873 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
874 set_volt = min(adjust, max_volt_swing);
875
876 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
877 set_pre = min(adjust, max_pre_emph)
878 >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
879
880 /*
881 * Voltage swing level and pre-emphasis level combination is
882 * not allowed: leaving pre-emphasis as-is, and adjusting
883 * voltage swing.
884 */
885 if (set_volt + set_pre > 3)
886 set_volt = 3 - set_pre;
887
888 phy_cfg->dp.voltage[i] = set_volt;
889 lanes_data[i] = set_volt;
890
891 if (set_volt == max_volt_swing)
892 lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
893
894 phy_cfg->dp.pre[i] = set_pre;
895 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
896
897 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
898 lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
899 }
900 }
901
902 static
cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],unsigned int lane,u8 volt)903 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
904 unsigned int lane, u8 volt)
905 {
906 unsigned int s = ((lane & 1) ?
907 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
908 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
909 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
910
911 link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
912 link_status[idx] |= volt << s;
913 }
914
915 static
cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],unsigned int lane,u8 pre_emphasis)916 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
917 unsigned int lane, u8 pre_emphasis)
918 {
919 unsigned int s = ((lane & 1) ?
920 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
921 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
922 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
923
924 link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
925 link_status[idx] |= pre_emphasis << s;
926 }
927
cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device * mhdp,u8 link_status[DP_LINK_STATUS_SIZE])928 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
929 u8 link_status[DP_LINK_STATUS_SIZE])
930 {
931 u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
932 u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
933 unsigned int i;
934 u8 volt, pre;
935
936 for (i = 0; i < mhdp->link.num_lanes; i++) {
937 volt = drm_dp_get_adjust_request_voltage(link_status, i);
938 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
939 if (volt + pre > 3)
940 cdns_mhdp_set_adjust_request_voltage(link_status, i,
941 3 - pre);
942 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
943 cdns_mhdp_set_adjust_request_voltage(link_status, i,
944 max_volt);
945 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
946 cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
947 i, max_pre);
948 }
949 }
950
cdns_mhdp_print_lt_status(const char * prefix,struct cdns_mhdp_device * mhdp,union phy_configure_opts * phy_cfg)951 static void cdns_mhdp_print_lt_status(const char *prefix,
952 struct cdns_mhdp_device *mhdp,
953 union phy_configure_opts *phy_cfg)
954 {
955 char vs[8] = "0/0/0/0";
956 char pe[8] = "0/0/0/0";
957 unsigned int i;
958
959 for (i = 0; i < mhdp->link.num_lanes; i++) {
960 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
961 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
962 }
963
964 vs[i * 2 - 1] = '\0';
965 pe[i * 2 - 1] = '\0';
966
967 dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
968 prefix,
969 mhdp->link.num_lanes, mhdp->link.rate / 100,
970 vs, pe);
971 }
972
cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device * mhdp,u8 eq_tps,unsigned int training_interval)973 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
974 u8 eq_tps,
975 unsigned int training_interval)
976 {
977 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
978 u8 link_status[DP_LINK_STATUS_SIZE];
979 union phy_configure_opts phy_cfg;
980 u32 reg32;
981 int ret;
982 bool r;
983
984 dev_dbg(mhdp->dev, "Starting EQ phase\n");
985
986 /* Enable link training TPS[eq_tps] in PHY */
987 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
988 CDNS_PHY_TRAINING_TYPE(eq_tps);
989 if (eq_tps != 4)
990 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
991 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
992
993 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
994 (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
995 CDNS_DP_TRAINING_PATTERN_4);
996
997 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
998
999 do {
1000 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1001 &phy_cfg);
1002 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1003 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1004 phy_cfg.dp.set_lanes = false;
1005 phy_cfg.dp.set_rate = false;
1006 phy_cfg.dp.set_voltages = true;
1007 ret = phy_configure(mhdp->phy, &phy_cfg);
1008 if (ret) {
1009 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1010 __func__, ret);
1011 goto err;
1012 }
1013
1014 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1015 training_interval, lanes_data, link_status);
1016
1017 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1018 if (!r)
1019 goto err;
1020
1021 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1022 cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1023 &phy_cfg);
1024 return true;
1025 }
1026
1027 fail_counter_short++;
1028
1029 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1030 } while (fail_counter_short < 5);
1031
1032 err:
1033 cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1034
1035 return false;
1036 }
1037
cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device * mhdp,u8 link_status[DP_LINK_STATUS_SIZE],u8 * req_volt,u8 * req_pre)1038 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1039 u8 link_status[DP_LINK_STATUS_SIZE],
1040 u8 *req_volt, u8 *req_pre)
1041 {
1042 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1043 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1044 unsigned int i;
1045
1046 for (i = 0; i < mhdp->link.num_lanes; i++) {
1047 u8 val;
1048
1049 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1050 max_volt : req_volt[i];
1051 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1052
1053 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1054 max_pre : req_pre[i];
1055 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1056 }
1057 }
1058
1059 static
cdns_mhdp_validate_cr(struct cdns_mhdp_device * mhdp,bool * cr_done,bool * same_before_adjust,bool * max_swing_reached,u8 before_cr[CDNS_DP_MAX_NUM_LANES],u8 after_cr[DP_LINK_STATUS_SIZE],u8 * req_volt,u8 * req_pre)1060 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1061 bool *same_before_adjust, bool *max_swing_reached,
1062 u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1063 u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1064 u8 *req_pre)
1065 {
1066 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1067 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1068 bool same_pre, same_volt;
1069 unsigned int i;
1070 u8 adjust;
1071
1072 *same_before_adjust = false;
1073 *max_swing_reached = false;
1074 *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1075
1076 for (i = 0; i < mhdp->link.num_lanes; i++) {
1077 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1078 req_volt[i] = min(adjust, max_volt);
1079
1080 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1081 DP_TRAIN_PRE_EMPHASIS_SHIFT;
1082 req_pre[i] = min(adjust, max_pre);
1083
1084 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1085 req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1086 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1087 req_volt[i];
1088 if (same_pre && same_volt)
1089 *same_before_adjust = true;
1090
1091 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1092 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1093 *max_swing_reached = true;
1094 return;
1095 }
1096 }
1097 }
1098
cdns_mhdp_link_training_cr(struct cdns_mhdp_device * mhdp)1099 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1100 {
1101 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1102 fail_counter_short = 0, fail_counter_cr_long = 0;
1103 u8 link_status[DP_LINK_STATUS_SIZE];
1104 bool cr_done;
1105 union phy_configure_opts phy_cfg;
1106 int ret;
1107
1108 dev_dbg(mhdp->dev, "Starting CR phase\n");
1109
1110 ret = cdns_mhdp_link_training_init(mhdp);
1111 if (ret)
1112 goto err;
1113
1114 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1115
1116 do {
1117 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1118 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1119 bool same_before_adjust, max_swing_reached;
1120
1121 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1122 &phy_cfg);
1123 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1124 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1125 phy_cfg.dp.set_lanes = false;
1126 phy_cfg.dp.set_rate = false;
1127 phy_cfg.dp.set_voltages = true;
1128 ret = phy_configure(mhdp->phy, &phy_cfg);
1129 if (ret) {
1130 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1131 __func__, ret);
1132 goto err;
1133 }
1134
1135 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1136 lanes_data, link_status);
1137
1138 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1139 &max_swing_reached, lanes_data,
1140 link_status,
1141 requested_adjust_volt_swing,
1142 requested_adjust_pre_emphasis);
1143
1144 if (max_swing_reached) {
1145 dev_err(mhdp->dev, "CR: max swing reached\n");
1146 goto err;
1147 }
1148
1149 if (cr_done) {
1150 cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1151 &phy_cfg);
1152 return true;
1153 }
1154
1155 /* Not all CR_DONE bits set */
1156 fail_counter_cr_long++;
1157
1158 if (same_before_adjust) {
1159 fail_counter_short++;
1160 continue;
1161 }
1162
1163 fail_counter_short = 0;
1164 /*
1165 * Voltage swing/pre-emphasis adjust requested
1166 * during CR phase
1167 */
1168 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1169 requested_adjust_volt_swing,
1170 requested_adjust_pre_emphasis);
1171 } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1172
1173 err:
1174 cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1175
1176 return false;
1177 }
1178
cdns_mhdp_lower_link_rate(struct cdns_mhdp_link * link)1179 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1180 {
1181 switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1182 case DP_LINK_BW_2_7:
1183 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1184 break;
1185 case DP_LINK_BW_5_4:
1186 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1187 break;
1188 case DP_LINK_BW_8_1:
1189 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1190 break;
1191 }
1192 }
1193
cdns_mhdp_link_training(struct cdns_mhdp_device * mhdp,unsigned int training_interval)1194 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1195 unsigned int training_interval)
1196 {
1197 u32 reg32;
1198 const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1199 int ret;
1200
1201 while (1) {
1202 if (!cdns_mhdp_link_training_cr(mhdp)) {
1203 if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1204 DP_LINK_BW_1_62) {
1205 dev_dbg(mhdp->dev,
1206 "Reducing link rate during CR phase\n");
1207 cdns_mhdp_lower_link_rate(&mhdp->link);
1208
1209 continue;
1210 } else if (mhdp->link.num_lanes > 1) {
1211 dev_dbg(mhdp->dev,
1212 "Reducing lanes number during CR phase\n");
1213 mhdp->link.num_lanes >>= 1;
1214 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1215
1216 continue;
1217 }
1218
1219 dev_err(mhdp->dev,
1220 "Link training failed during CR phase\n");
1221 goto err;
1222 }
1223
1224 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1225 training_interval))
1226 break;
1227
1228 if (mhdp->link.num_lanes > 1) {
1229 dev_dbg(mhdp->dev,
1230 "Reducing lanes number during EQ phase\n");
1231 mhdp->link.num_lanes >>= 1;
1232
1233 continue;
1234 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1235 DP_LINK_BW_1_62) {
1236 dev_dbg(mhdp->dev,
1237 "Reducing link rate during EQ phase\n");
1238 cdns_mhdp_lower_link_rate(&mhdp->link);
1239 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1240
1241 continue;
1242 }
1243
1244 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1245 goto err;
1246 }
1247
1248 dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1249 mhdp->link.num_lanes, mhdp->link.rate / 100);
1250
1251 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1252 mhdp->host.scrambler ? 0 :
1253 DP_LINK_SCRAMBLING_DISABLE);
1254
1255 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32);
1256 if (ret < 0) {
1257 dev_err(mhdp->dev,
1258 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1259 ret);
1260 return ret;
1261 }
1262 reg32 &= ~GENMASK(1, 0);
1263 reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1264 reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1265 reg32 |= CDNS_DP_FRAMER_EN;
1266 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1267
1268 /* Reset PHY config */
1269 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1270 if (!mhdp->host.scrambler)
1271 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1272 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1273
1274 return 0;
1275 err:
1276 /* Reset PHY config */
1277 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1278 if (!mhdp->host.scrambler)
1279 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1280 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1281
1282 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1283 DP_TRAINING_PATTERN_DISABLE);
1284
1285 return -EIO;
1286 }
1287
cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device * mhdp,u32 interval)1288 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1289 u32 interval)
1290 {
1291 if (interval == 0)
1292 return 400;
1293 if (interval < 5)
1294 return 4000 << (interval - 1);
1295 dev_err(mhdp->dev,
1296 "wrong training interval returned by DPCD: %d\n", interval);
1297 return 0;
1298 }
1299
cdns_mhdp_fill_host_caps(struct cdns_mhdp_device * mhdp)1300 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1301 {
1302 unsigned int link_rate;
1303
1304 /* Get source capabilities based on PHY attributes */
1305
1306 mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1307 if (!mhdp->host.lanes_cnt)
1308 mhdp->host.lanes_cnt = 4;
1309
1310 link_rate = mhdp->phy->attrs.max_link_rate;
1311 if (!link_rate)
1312 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1313 else
1314 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1315 link_rate *= 100;
1316
1317 mhdp->host.link_rate = link_rate;
1318 mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1319 mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1320 mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1321 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1322 CDNS_SUPPORT_TPS(4);
1323 mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1324 mhdp->host.fast_link = false;
1325 mhdp->host.enhanced = true;
1326 mhdp->host.scrambler = true;
1327 mhdp->host.ssc = false;
1328 }
1329
cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device * mhdp,u8 dpcd[DP_RECEIVER_CAP_SIZE])1330 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1331 u8 dpcd[DP_RECEIVER_CAP_SIZE])
1332 {
1333 mhdp->sink.link_rate = mhdp->link.rate;
1334 mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1335 mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1336 DP_LINK_CAP_ENHANCED_FRAMING);
1337
1338 /* Set SSC support */
1339 mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1340 DP_MAX_DOWNSPREAD_0_5);
1341
1342 /* Set TPS support */
1343 mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1344 if (drm_dp_tps3_supported(dpcd))
1345 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1346 if (drm_dp_tps4_supported(dpcd))
1347 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1348
1349 /* Set fast link support */
1350 mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1351 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1352 }
1353
cdns_mhdp_link_up(struct cdns_mhdp_device * mhdp)1354 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1355 {
1356 u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1357 u32 resp, interval, interval_us;
1358 u8 ext_cap_chk = 0;
1359 unsigned int addr;
1360 int err;
1361
1362 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1363
1364 drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1365 &ext_cap_chk);
1366
1367 if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1368 addr = DP_DP13_DPCD_REV;
1369 else
1370 addr = DP_DPCD_REV;
1371
1372 err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1373 if (err < 0) {
1374 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1375 return err;
1376 }
1377
1378 mhdp->link.revision = dpcd[0];
1379 mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1380 mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1381
1382 if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1383 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1384
1385 dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1386 drm_dp_link_power_up(&mhdp->aux, mhdp->link.revision);
1387
1388 cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1389
1390 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1391 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1392
1393 /* Disable framer for link training */
1394 err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1395 if (err < 0) {
1396 dev_err(mhdp->dev,
1397 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1398 err);
1399 return err;
1400 }
1401
1402 resp &= ~CDNS_DP_FRAMER_EN;
1403 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1404
1405 /* Spread AMP if required, enable 8b/10b coding */
1406 amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1407 amp[1] = DP_SET_ANSI_8B10B;
1408 drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1409
1410 if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1411 dev_err(mhdp->dev, "fastlink not supported\n");
1412 return -EOPNOTSUPP;
1413 }
1414
1415 interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1416 interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1417 if (!interval_us ||
1418 cdns_mhdp_link_training(mhdp, interval_us)) {
1419 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1420 return -EIO;
1421 }
1422
1423 mhdp->link_up = true;
1424
1425 return 0;
1426 }
1427
cdns_mhdp_link_down(struct cdns_mhdp_device * mhdp)1428 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1429 {
1430 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1431
1432 if (mhdp->plugged)
1433 drm_dp_link_power_down(&mhdp->aux, mhdp->link.revision);
1434
1435 mhdp->link_up = false;
1436 }
1437
cdns_mhdp_edid_read(struct cdns_mhdp_device * mhdp,struct drm_connector * connector)1438 static const struct drm_edid *cdns_mhdp_edid_read(struct cdns_mhdp_device *mhdp,
1439 struct drm_connector *connector)
1440 {
1441 if (!mhdp->plugged)
1442 return NULL;
1443
1444 return drm_edid_read_custom(connector, cdns_mhdp_get_edid_block, mhdp);
1445 }
1446
cdns_mhdp_get_modes(struct drm_connector * connector)1447 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1448 {
1449 struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1450 const struct drm_edid *drm_edid;
1451 int num_modes;
1452
1453 if (!mhdp->plugged)
1454 return 0;
1455
1456 drm_edid = cdns_mhdp_edid_read(mhdp, connector);
1457
1458 drm_edid_connector_update(connector, drm_edid);
1459
1460 if (!drm_edid) {
1461 dev_err(mhdp->dev, "Failed to read EDID\n");
1462 return 0;
1463 }
1464
1465 num_modes = drm_edid_connector_add_modes(connector);
1466 drm_edid_free(drm_edid);
1467
1468 /*
1469 * HACK: Warn about unsupported display formats until we deal
1470 * with them correctly.
1471 */
1472 if (connector->display_info.color_formats &&
1473 !(connector->display_info.color_formats &
1474 mhdp->display_fmt.color_format))
1475 dev_warn(mhdp->dev,
1476 "%s: No supported color_format found (0x%08x)\n",
1477 __func__, connector->display_info.color_formats);
1478
1479 if (connector->display_info.bpc &&
1480 connector->display_info.bpc < mhdp->display_fmt.bpc)
1481 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1482 __func__, connector->display_info.bpc,
1483 mhdp->display_fmt.bpc);
1484
1485 return num_modes;
1486 }
1487
cdns_mhdp_connector_detect(struct drm_connector * conn,struct drm_modeset_acquire_ctx * ctx,bool force)1488 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1489 struct drm_modeset_acquire_ctx *ctx,
1490 bool force)
1491 {
1492 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1493
1494 return cdns_mhdp_detect(mhdp);
1495 }
1496
cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt * fmt)1497 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1498 {
1499 u32 bpp;
1500
1501 if (fmt->y_only)
1502 return fmt->bpc;
1503
1504 switch (fmt->color_format) {
1505 case DRM_COLOR_FORMAT_RGB444:
1506 case DRM_COLOR_FORMAT_YCBCR444:
1507 bpp = fmt->bpc * 3;
1508 break;
1509 case DRM_COLOR_FORMAT_YCBCR422:
1510 bpp = fmt->bpc * 2;
1511 break;
1512 case DRM_COLOR_FORMAT_YCBCR420:
1513 bpp = fmt->bpc * 3 / 2;
1514 break;
1515 default:
1516 bpp = fmt->bpc * 3;
1517 WARN_ON(1);
1518 }
1519 return bpp;
1520 }
1521
1522 static
cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device * mhdp,const struct drm_display_mode * mode,unsigned int lanes,unsigned int rate)1523 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1524 const struct drm_display_mode *mode,
1525 unsigned int lanes, unsigned int rate)
1526 {
1527 u32 max_bw, req_bw, bpp;
1528
1529 /*
1530 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1531 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1532 * value thus equals the bandwidth in 10kb/s units, which matches the
1533 * units of the rate parameter.
1534 */
1535
1536 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1537 req_bw = mode->clock * bpp / 8;
1538 max_bw = lanes * rate;
1539 if (req_bw > max_bw) {
1540 dev_dbg(mhdp->dev,
1541 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1542 mode->name, req_bw, max_bw);
1543
1544 return false;
1545 }
1546
1547 return true;
1548 }
1549
1550 static
cdns_mhdp_mode_valid(struct drm_connector * conn,const struct drm_display_mode * mode)1551 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1552 const struct drm_display_mode *mode)
1553 {
1554 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1555
1556 mutex_lock(&mhdp->link_mutex);
1557
1558 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1559 mhdp->link.rate)) {
1560 mutex_unlock(&mhdp->link_mutex);
1561 return MODE_CLOCK_HIGH;
1562 }
1563
1564 mutex_unlock(&mhdp->link_mutex);
1565 return MODE_OK;
1566 }
1567
cdns_mhdp_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)1568 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1569 struct drm_atomic_state *state)
1570 {
1571 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1572 struct drm_connector_state *old_state, *new_state;
1573 struct drm_crtc_state *crtc_state;
1574 u64 old_cp, new_cp;
1575
1576 if (!mhdp->hdcp_supported)
1577 return 0;
1578
1579 old_state = drm_atomic_get_old_connector_state(state, conn);
1580 new_state = drm_atomic_get_new_connector_state(state, conn);
1581 old_cp = old_state->content_protection;
1582 new_cp = new_state->content_protection;
1583
1584 if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1585 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1586 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1587 goto mode_changed;
1588 }
1589
1590 if (!new_state->crtc) {
1591 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1592 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1593 return 0;
1594 }
1595
1596 if (old_cp == new_cp ||
1597 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1598 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1599 return 0;
1600
1601 mode_changed:
1602 crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1603 crtc_state->mode_changed = true;
1604
1605 return 0;
1606 }
1607
1608 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1609 .detect_ctx = cdns_mhdp_connector_detect,
1610 .get_modes = cdns_mhdp_get_modes,
1611 .mode_valid = cdns_mhdp_mode_valid,
1612 .atomic_check = cdns_mhdp_connector_atomic_check,
1613 };
1614
1615 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1616 .fill_modes = drm_helper_probe_single_connector_modes,
1617 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1618 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1619 .reset = drm_atomic_helper_connector_reset,
1620 .destroy = drm_connector_cleanup,
1621 };
1622
cdns_mhdp_connector_init(struct cdns_mhdp_device * mhdp)1623 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1624 {
1625 u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1626 struct drm_connector *conn = &mhdp->connector;
1627 struct drm_bridge *bridge = &mhdp->bridge;
1628 int ret;
1629
1630 conn->polled = DRM_CONNECTOR_POLL_HPD;
1631
1632 ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1633 DRM_MODE_CONNECTOR_DisplayPort);
1634 if (ret) {
1635 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1636 return ret;
1637 }
1638
1639 drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1640
1641 ret = drm_display_info_set_bus_formats(&conn->display_info,
1642 &bus_format, 1);
1643 if (ret)
1644 return ret;
1645
1646 ret = drm_connector_attach_encoder(conn, bridge->encoder);
1647 if (ret) {
1648 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1649 return ret;
1650 }
1651
1652 if (mhdp->hdcp_supported)
1653 ret = drm_connector_attach_content_protection_property(conn, true);
1654
1655 return ret;
1656 }
1657
cdns_mhdp_attach(struct drm_bridge * bridge,struct drm_encoder * encoder,enum drm_bridge_attach_flags flags)1658 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1659 struct drm_encoder *encoder,
1660 enum drm_bridge_attach_flags flags)
1661 {
1662 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1663 bool hw_ready;
1664 int ret;
1665
1666 dev_dbg(mhdp->dev, "%s\n", __func__);
1667
1668 mhdp->aux.drm_dev = bridge->dev;
1669 ret = drm_dp_aux_register(&mhdp->aux);
1670 if (ret < 0)
1671 return ret;
1672
1673 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1674 ret = cdns_mhdp_connector_init(mhdp);
1675 if (ret)
1676 goto aux_unregister;
1677 }
1678
1679 spin_lock(&mhdp->start_lock);
1680
1681 mhdp->bridge_attached = true;
1682 hw_ready = mhdp->hw_state == MHDP_HW_READY;
1683
1684 spin_unlock(&mhdp->start_lock);
1685
1686 /* Enable SW event interrupts */
1687 if (hw_ready)
1688 cdns_mhdp_bridge_hpd_enable(bridge);
1689
1690 return 0;
1691 aux_unregister:
1692 drm_dp_aux_unregister(&mhdp->aux);
1693 return ret;
1694 }
1695
cdns_mhdp_configure_video(struct cdns_mhdp_device * mhdp,const struct drm_display_mode * mode)1696 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1697 const struct drm_display_mode *mode)
1698 {
1699 unsigned int dp_framer_sp = 0, msa_horizontal_1,
1700 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1701 misc0 = 0, misc1 = 0, pxl_repr,
1702 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1703 dp_vertical_1;
1704 u8 stream_id = mhdp->stream_id;
1705 u32 bpp, bpc, pxlfmt, framer;
1706 int ret;
1707
1708 pxlfmt = mhdp->display_fmt.color_format;
1709 bpc = mhdp->display_fmt.bpc;
1710
1711 /*
1712 * If YCBCR supported and stream not SD, use ITU709
1713 * Need to handle ITU version with YCBCR420 when supported
1714 */
1715 if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
1716 pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
1717 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1718
1719 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1720
1721 switch (pxlfmt) {
1722 case DRM_COLOR_FORMAT_RGB444:
1723 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1724 misc0 |= DP_COLOR_FORMAT_RGB;
1725 break;
1726 case DRM_COLOR_FORMAT_YCBCR444:
1727 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1728 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1729 break;
1730 case DRM_COLOR_FORMAT_YCBCR422:
1731 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1732 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1733 break;
1734 case DRM_COLOR_FORMAT_YCBCR420:
1735 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1736 break;
1737 default:
1738 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1739 }
1740
1741 switch (bpc) {
1742 case 6:
1743 misc0 |= DP_TEST_BIT_DEPTH_6;
1744 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1745 break;
1746 case 8:
1747 misc0 |= DP_TEST_BIT_DEPTH_8;
1748 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1749 break;
1750 case 10:
1751 misc0 |= DP_TEST_BIT_DEPTH_10;
1752 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1753 break;
1754 case 12:
1755 misc0 |= DP_TEST_BIT_DEPTH_12;
1756 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1757 break;
1758 case 16:
1759 misc0 |= DP_TEST_BIT_DEPTH_16;
1760 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1761 break;
1762 }
1763
1764 bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1765 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1766 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1767
1768 cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1769 bnd_hsync2vsync);
1770
1771 hsync2vsync_pol_ctrl = 0;
1772 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1773 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1774 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1775 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1776 cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1777 hsync2vsync_pol_ctrl);
1778
1779 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1780
1781 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1782 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1783 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1784 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1785 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1786 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1787 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1788
1789 front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1790 back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1791 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1792 CDNS_DP_FRONT_PORCH(front_porch) |
1793 CDNS_DP_BACK_PORCH(back_porch));
1794
1795 cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1796 mode->crtc_hdisplay * bpp / 8);
1797
1798 msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1799 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1800 CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1801 CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1802
1803 hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1804 msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1805 CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1806 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1807 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1808 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1809 msa_horizontal_1);
1810
1811 msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1812 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1813 CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1814 CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1815
1816 vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1817 msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1818 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1819 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1820 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1821 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1822 msa_vertical_1);
1823
1824 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1825 mode->crtc_vtotal % 2 == 0)
1826 misc1 = DP_TEST_INTERLACED;
1827 if (mhdp->display_fmt.y_only)
1828 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1829 /* Use VSC SDP for Y420 */
1830 if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
1831 misc1 = CDNS_DP_TEST_VSC_SDP;
1832
1833 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1834 misc0 | (misc1 << 8));
1835
1836 cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1837 CDNS_DP_H_HSYNC_WIDTH(hsync) |
1838 CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1839
1840 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1841 CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1842 CDNS_DP_V0_VSTART(msa_v0));
1843
1844 dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1845 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1846 mode->crtc_vtotal % 2 == 0)
1847 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1848
1849 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1850
1851 cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1852 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1853 CDNS_DP_VB_ID_INTERLACED : 0);
1854
1855 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1856 if (ret < 0) {
1857 dev_err(mhdp->dev,
1858 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1859 ret);
1860 return;
1861 }
1862 framer |= CDNS_DP_FRAMER_EN;
1863 framer &= ~CDNS_DP_NO_VIDEO_MODE;
1864 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1865 }
1866
cdns_mhdp_sst_enable(struct cdns_mhdp_device * mhdp,const struct drm_display_mode * mode)1867 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1868 const struct drm_display_mode *mode)
1869 {
1870 u32 rate, vs, required_bandwidth, available_bandwidth;
1871 s32 line_thresh1, line_thresh2, line_thresh = 0;
1872 int pxlclock = mode->crtc_clock;
1873 u32 tu_size = 64;
1874 u32 bpp;
1875
1876 /* Get rate in MSymbols per second per lane */
1877 rate = mhdp->link.rate / 1000;
1878
1879 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1880
1881 required_bandwidth = pxlclock * bpp / 8;
1882 available_bandwidth = mhdp->link.num_lanes * rate;
1883
1884 vs = tu_size * required_bandwidth / available_bandwidth;
1885 vs /= 1000;
1886
1887 if (vs == tu_size)
1888 vs = tu_size - 1;
1889
1890 line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1891 line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1892 line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1893 line_thresh = (line_thresh >> 5) + 2;
1894
1895 mhdp->stream_id = 0;
1896
1897 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1898 CDNS_DP_FRAMER_TU_VS(vs) |
1899 CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1900 CDNS_DP_FRAMER_TU_CNT_RST_EN);
1901
1902 cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1903 line_thresh & GENMASK(5, 0));
1904
1905 cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1906 CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1907 0 : tu_size - vs));
1908
1909 cdns_mhdp_configure_video(mhdp, mode);
1910 }
1911
cdns_mhdp_atomic_enable(struct drm_bridge * bridge,struct drm_atomic_state * state)1912 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1913 struct drm_atomic_state *state)
1914 {
1915 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1916 struct cdns_mhdp_bridge_state *mhdp_state;
1917 struct drm_crtc_state *crtc_state;
1918 struct drm_connector *connector;
1919 struct drm_connector_state *conn_state;
1920 struct drm_bridge_state *new_state;
1921 const struct drm_display_mode *mode;
1922 u32 resp;
1923 int ret;
1924
1925 dev_dbg(mhdp->dev, "bridge enable\n");
1926
1927 mutex_lock(&mhdp->link_mutex);
1928
1929 if (mhdp->plugged && !mhdp->link_up) {
1930 ret = cdns_mhdp_link_up(mhdp);
1931 if (ret < 0)
1932 goto out;
1933 }
1934
1935 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1936 mhdp->info->ops->enable(mhdp);
1937
1938 /* Enable VIF clock for stream 0 */
1939 ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1940 if (ret < 0) {
1941 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
1942 goto out;
1943 }
1944
1945 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
1946 resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
1947
1948 connector = drm_atomic_get_new_connector_for_encoder(state,
1949 bridge->encoder);
1950 if (WARN_ON(!connector))
1951 goto out;
1952
1953 conn_state = drm_atomic_get_new_connector_state(state, connector);
1954 if (WARN_ON(!conn_state))
1955 goto out;
1956
1957 if (mhdp->hdcp_supported &&
1958 mhdp->hw_state == MHDP_HW_READY &&
1959 conn_state->content_protection ==
1960 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
1961 mutex_unlock(&mhdp->link_mutex);
1962 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
1963 mutex_lock(&mhdp->link_mutex);
1964 }
1965
1966 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
1967 if (WARN_ON(!crtc_state))
1968 goto out;
1969
1970 mode = &crtc_state->adjusted_mode;
1971
1972 new_state = drm_atomic_get_new_bridge_state(state, bridge);
1973 if (WARN_ON(!new_state))
1974 goto out;
1975
1976 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1977 mhdp->link.rate)) {
1978 ret = -EINVAL;
1979 goto out;
1980 }
1981
1982 cdns_mhdp_sst_enable(mhdp, mode);
1983
1984 mhdp_state = to_cdns_mhdp_bridge_state(new_state);
1985
1986 mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
1987 if (!mhdp_state->current_mode)
1988 return;
1989
1990 drm_mode_set_name(mhdp_state->current_mode);
1991
1992 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
1993
1994 mhdp->bridge_enabled = true;
1995
1996 out:
1997 mutex_unlock(&mhdp->link_mutex);
1998 if (ret < 0)
1999 schedule_work(&mhdp->modeset_retry_work);
2000 }
2001
cdns_mhdp_atomic_disable(struct drm_bridge * bridge,struct drm_atomic_state * state)2002 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2003 struct drm_atomic_state *state)
2004 {
2005 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2006 u32 resp;
2007
2008 dev_dbg(mhdp->dev, "%s\n", __func__);
2009
2010 mutex_lock(&mhdp->link_mutex);
2011
2012 if (mhdp->hdcp_supported)
2013 cdns_mhdp_hdcp_disable(mhdp);
2014
2015 mhdp->bridge_enabled = false;
2016 cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2017 resp &= ~CDNS_DP_FRAMER_EN;
2018 resp |= CDNS_DP_NO_VIDEO_MODE;
2019 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2020
2021 cdns_mhdp_link_down(mhdp);
2022
2023 /* Disable VIF clock for stream 0 */
2024 cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2025 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2026 resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2027
2028 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2029 mhdp->info->ops->disable(mhdp);
2030
2031 mutex_unlock(&mhdp->link_mutex);
2032 }
2033
cdns_mhdp_detach(struct drm_bridge * bridge)2034 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2035 {
2036 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2037
2038 dev_dbg(mhdp->dev, "%s\n", __func__);
2039
2040 drm_dp_aux_unregister(&mhdp->aux);
2041
2042 spin_lock(&mhdp->start_lock);
2043
2044 mhdp->bridge_attached = false;
2045
2046 spin_unlock(&mhdp->start_lock);
2047
2048 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2049 }
2050
2051 static struct drm_bridge_state *
cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge * bridge)2052 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2053 {
2054 struct cdns_mhdp_bridge_state *state;
2055
2056 state = kzalloc(sizeof(*state), GFP_KERNEL);
2057 if (!state)
2058 return NULL;
2059
2060 __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2061
2062 return &state->base;
2063 }
2064
2065 static void
cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge * bridge,struct drm_bridge_state * state)2066 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2067 struct drm_bridge_state *state)
2068 {
2069 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2070
2071 cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2072
2073 if (cdns_mhdp_state->current_mode) {
2074 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2075 cdns_mhdp_state->current_mode = NULL;
2076 }
2077
2078 kfree(cdns_mhdp_state);
2079 }
2080
2081 static struct drm_bridge_state *
cdns_mhdp_bridge_atomic_reset(struct drm_bridge * bridge)2082 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2083 {
2084 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2085
2086 cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2087 if (!cdns_mhdp_state)
2088 return NULL;
2089
2090 __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2091
2092 return &cdns_mhdp_state->base;
2093 }
2094
cdns_mhdp_get_input_bus_fmts(struct drm_bridge * bridge,struct drm_bridge_state * bridge_state,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state,u32 output_fmt,unsigned int * num_input_fmts)2095 static u32 *cdns_mhdp_get_input_bus_fmts(struct drm_bridge *bridge,
2096 struct drm_bridge_state *bridge_state,
2097 struct drm_crtc_state *crtc_state,
2098 struct drm_connector_state *conn_state,
2099 u32 output_fmt,
2100 unsigned int *num_input_fmts)
2101 {
2102 u32 *input_fmts;
2103
2104 *num_input_fmts = 0;
2105
2106 input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
2107 if (!input_fmts)
2108 return NULL;
2109
2110 *num_input_fmts = 1;
2111 input_fmts[0] = MEDIA_BUS_FMT_RGB121212_1X36;
2112
2113 return input_fmts;
2114 }
2115
cdns_mhdp_atomic_check(struct drm_bridge * bridge,struct drm_bridge_state * bridge_state,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)2116 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2117 struct drm_bridge_state *bridge_state,
2118 struct drm_crtc_state *crtc_state,
2119 struct drm_connector_state *conn_state)
2120 {
2121 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2122 const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2123
2124 mutex_lock(&mhdp->link_mutex);
2125
2126 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2127 mhdp->link.rate)) {
2128 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2129 __func__, mode->name, mhdp->link.num_lanes,
2130 mhdp->link.rate / 100);
2131 mutex_unlock(&mhdp->link_mutex);
2132 return -EINVAL;
2133 }
2134
2135 /*
2136 * There might be flags negotiation supported in future.
2137 * Set the bus flags in atomic_check statically for now.
2138 */
2139 if (mhdp->info)
2140 bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags;
2141
2142 mutex_unlock(&mhdp->link_mutex);
2143 return 0;
2144 }
2145
2146 static enum drm_connector_status
cdns_mhdp_bridge_detect(struct drm_bridge * bridge,struct drm_connector * connector)2147 cdns_mhdp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector)
2148 {
2149 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2150
2151 return cdns_mhdp_detect(mhdp);
2152 }
2153
cdns_mhdp_bridge_edid_read(struct drm_bridge * bridge,struct drm_connector * connector)2154 static const struct drm_edid *cdns_mhdp_bridge_edid_read(struct drm_bridge *bridge,
2155 struct drm_connector *connector)
2156 {
2157 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2158
2159 return cdns_mhdp_edid_read(mhdp, connector);
2160 }
2161
2162 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2163 .atomic_enable = cdns_mhdp_atomic_enable,
2164 .atomic_disable = cdns_mhdp_atomic_disable,
2165 .atomic_check = cdns_mhdp_atomic_check,
2166 .attach = cdns_mhdp_attach,
2167 .detach = cdns_mhdp_detach,
2168 .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2169 .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2170 .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2171 .atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts,
2172 .detect = cdns_mhdp_bridge_detect,
2173 .edid_read = cdns_mhdp_bridge_edid_read,
2174 .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2175 .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2176 };
2177
cdns_mhdp_detect_hpd(struct cdns_mhdp_device * mhdp,bool * hpd_pulse)2178 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2179 {
2180 int hpd_event, hpd_status;
2181
2182 *hpd_pulse = false;
2183
2184 hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2185
2186 /* Getting event bits failed, bail out */
2187 if (hpd_event < 0) {
2188 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2189 __func__, hpd_event);
2190 return false;
2191 }
2192
2193 hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2194 if (hpd_status < 0) {
2195 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2196 __func__, hpd_status);
2197 return false;
2198 }
2199
2200 if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2201 *hpd_pulse = true;
2202
2203 return !!hpd_status;
2204 }
2205
cdns_mhdp_update_link_status(struct cdns_mhdp_device * mhdp)2206 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2207 {
2208 struct cdns_mhdp_bridge_state *cdns_bridge_state;
2209 struct drm_display_mode *current_mode;
2210 bool old_plugged = mhdp->plugged;
2211 struct drm_bridge_state *state;
2212 u8 status[DP_LINK_STATUS_SIZE];
2213 bool hpd_pulse;
2214 int ret = 0;
2215
2216 mutex_lock(&mhdp->link_mutex);
2217
2218 mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2219
2220 if (!mhdp->plugged) {
2221 cdns_mhdp_link_down(mhdp);
2222 mhdp->link.rate = mhdp->host.link_rate;
2223 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2224 goto out;
2225 }
2226
2227 /*
2228 * If we get a HPD pulse event and we were and still are connected,
2229 * check the link status. If link status is ok, there's nothing to do
2230 * as we don't handle DP interrupts. If link status is bad, continue
2231 * with full link setup.
2232 */
2233 if (hpd_pulse && old_plugged == mhdp->plugged) {
2234 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2235
2236 /*
2237 * If everything looks fine, just return, as we don't handle
2238 * DP IRQs.
2239 */
2240 if (!ret &&
2241 drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2242 drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2243 goto out;
2244
2245 /* If link is bad, mark link as down so that we do a new LT */
2246 mhdp->link_up = false;
2247 }
2248
2249 if (!mhdp->link_up) {
2250 ret = cdns_mhdp_link_up(mhdp);
2251 if (ret < 0)
2252 goto out;
2253 }
2254
2255 if (mhdp->bridge_enabled) {
2256 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2257 if (!state) {
2258 ret = -EINVAL;
2259 goto out;
2260 }
2261
2262 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2263 if (!cdns_bridge_state) {
2264 ret = -EINVAL;
2265 goto out;
2266 }
2267
2268 current_mode = cdns_bridge_state->current_mode;
2269 if (!current_mode) {
2270 ret = -EINVAL;
2271 goto out;
2272 }
2273
2274 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2275 mhdp->link.rate)) {
2276 ret = -EINVAL;
2277 goto out;
2278 }
2279
2280 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2281 current_mode->name);
2282
2283 cdns_mhdp_sst_enable(mhdp, current_mode);
2284 }
2285 out:
2286 mutex_unlock(&mhdp->link_mutex);
2287 return ret;
2288 }
2289
cdns_mhdp_modeset_retry_fn(struct work_struct * work)2290 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2291 {
2292 struct cdns_mhdp_device *mhdp;
2293 struct drm_connector *conn;
2294
2295 mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2296
2297 conn = &mhdp->connector;
2298
2299 /* Grab the locks before changing connector property */
2300 mutex_lock(&conn->dev->mode_config.mutex);
2301
2302 /*
2303 * Set connector link status to BAD and send a Uevent to notify
2304 * userspace to do a modeset.
2305 */
2306 drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2307 mutex_unlock(&conn->dev->mode_config.mutex);
2308
2309 /* Send Hotplug uevent so userspace can reprobe */
2310 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2311 }
2312
cdns_mhdp_irq_handler(int irq,void * data)2313 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2314 {
2315 struct cdns_mhdp_device *mhdp = data;
2316 u32 apb_stat, sw_ev0;
2317 bool bridge_attached;
2318
2319 apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2320 if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2321 return IRQ_NONE;
2322
2323 sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2324
2325 /*
2326 * Calling drm_kms_helper_hotplug_event() when not attached
2327 * to drm device causes an oops because the drm_bridge->dev
2328 * is NULL. See cdns_mhdp_fw_cb() comments for details about the
2329 * problems related drm_kms_helper_hotplug_event() call.
2330 */
2331 spin_lock(&mhdp->start_lock);
2332 bridge_attached = mhdp->bridge_attached;
2333 spin_unlock(&mhdp->start_lock);
2334
2335 if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2336 schedule_work(&mhdp->hpd_work);
2337 }
2338
2339 if (sw_ev0 & ~CDNS_DPTX_HPD) {
2340 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2341 wake_up(&mhdp->sw_events_wq);
2342 }
2343
2344 return IRQ_HANDLED;
2345 }
2346
cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device * mhdp,u32 event)2347 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2348 {
2349 u32 ret;
2350
2351 ret = wait_event_timeout(mhdp->sw_events_wq,
2352 mhdp->sw_events & event,
2353 msecs_to_jiffies(500));
2354 if (!ret) {
2355 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2356 goto sw_event_out;
2357 }
2358
2359 ret = mhdp->sw_events;
2360 mhdp->sw_events &= ~event;
2361
2362 sw_event_out:
2363 return ret;
2364 }
2365
cdns_mhdp_hpd_work(struct work_struct * work)2366 static void cdns_mhdp_hpd_work(struct work_struct *work)
2367 {
2368 struct cdns_mhdp_device *mhdp = container_of(work,
2369 struct cdns_mhdp_device,
2370 hpd_work);
2371 int ret;
2372
2373 ret = cdns_mhdp_update_link_status(mhdp);
2374 if (mhdp->connector.dev) {
2375 if (ret < 0)
2376 schedule_work(&mhdp->modeset_retry_work);
2377 else
2378 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2379 } else {
2380 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2381 }
2382 }
2383
cdns_mhdp_probe(struct platform_device * pdev)2384 static int cdns_mhdp_probe(struct platform_device *pdev)
2385 {
2386 struct device *dev = &pdev->dev;
2387 struct cdns_mhdp_device *mhdp;
2388 unsigned long rate;
2389 struct clk *clk;
2390 int ret;
2391 int irq;
2392
2393 mhdp = devm_drm_bridge_alloc(dev, struct cdns_mhdp_device, bridge,
2394 &cdns_mhdp_bridge_funcs);
2395 if (IS_ERR(mhdp))
2396 return PTR_ERR(mhdp);
2397
2398 clk = devm_clk_get_enabled(dev, NULL);
2399 if (IS_ERR(clk)) {
2400 dev_err(dev, "couldn't get and enable clk: %ld\n", PTR_ERR(clk));
2401 return PTR_ERR(clk);
2402 }
2403
2404 mhdp->clk = clk;
2405 mhdp->dev = dev;
2406 mutex_init(&mhdp->mbox_mutex);
2407 mutex_init(&mhdp->link_mutex);
2408 spin_lock_init(&mhdp->start_lock);
2409
2410 drm_dp_aux_init(&mhdp->aux);
2411 mhdp->aux.dev = dev;
2412 mhdp->aux.transfer = cdns_mhdp_transfer;
2413
2414 mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2415 if (IS_ERR(mhdp->regs)) {
2416 dev_err(dev, "Failed to get memory resource\n");
2417 return PTR_ERR(mhdp->regs);
2418 }
2419
2420 mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2421 if (IS_ERR(mhdp->sapb_regs)) {
2422 mhdp->hdcp_supported = false;
2423 dev_warn(dev,
2424 "Failed to get SAPB memory resource, HDCP not supported\n");
2425 } else {
2426 mhdp->hdcp_supported = true;
2427 }
2428
2429 mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2430 if (IS_ERR(mhdp->phy)) {
2431 dev_err(dev, "no PHY configured\n");
2432 return PTR_ERR(mhdp->phy);
2433 }
2434
2435 platform_set_drvdata(pdev, mhdp);
2436
2437 mhdp->info = of_device_get_match_data(dev);
2438
2439 pm_runtime_enable(dev);
2440 ret = pm_runtime_resume_and_get(dev);
2441 if (ret < 0) {
2442 dev_err(dev, "pm_runtime_resume_and_get failed\n");
2443 pm_runtime_disable(dev);
2444 return ret;
2445 }
2446
2447 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2448 ret = mhdp->info->ops->init(mhdp);
2449 if (ret != 0) {
2450 dev_err(dev, "MHDP platform initialization failed: %d\n",
2451 ret);
2452 goto runtime_put;
2453 }
2454 }
2455
2456 rate = clk_get_rate(clk);
2457 writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2458 writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2459
2460 dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2461
2462 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2463
2464 irq = platform_get_irq(pdev, 0);
2465 ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2466 cdns_mhdp_irq_handler, IRQF_ONESHOT,
2467 "mhdp8546", mhdp);
2468 if (ret) {
2469 dev_err(dev, "cannot install IRQ %d\n", irq);
2470 ret = -EIO;
2471 goto plat_fini;
2472 }
2473
2474 cdns_mhdp_fill_host_caps(mhdp);
2475
2476 /* Initialize link rate and num of lanes to host values */
2477 mhdp->link.rate = mhdp->host.link_rate;
2478 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2479
2480 /* The only currently supported format */
2481 mhdp->display_fmt.y_only = false;
2482 mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2483 mhdp->display_fmt.bpc = 8;
2484
2485 mhdp->bridge.of_node = pdev->dev.of_node;
2486 mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2487 DRM_BRIDGE_OP_HPD;
2488 mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2489
2490 ret = phy_init(mhdp->phy);
2491 if (ret) {
2492 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2493 goto plat_fini;
2494 }
2495
2496 /* Initialize the work for modeset in case of link train failure */
2497 INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2498 INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2499
2500 init_waitqueue_head(&mhdp->fw_load_wq);
2501 init_waitqueue_head(&mhdp->sw_events_wq);
2502
2503 ret = cdns_mhdp_load_firmware(mhdp);
2504 if (ret)
2505 goto phy_exit;
2506
2507 if (mhdp->hdcp_supported)
2508 cdns_mhdp_hdcp_init(mhdp);
2509
2510 drm_bridge_add(&mhdp->bridge);
2511
2512 return 0;
2513
2514 phy_exit:
2515 phy_exit(mhdp->phy);
2516 plat_fini:
2517 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2518 mhdp->info->ops->exit(mhdp);
2519 runtime_put:
2520 pm_runtime_put_sync(dev);
2521 pm_runtime_disable(dev);
2522
2523 return ret;
2524 }
2525
cdns_mhdp_remove(struct platform_device * pdev)2526 static void cdns_mhdp_remove(struct platform_device *pdev)
2527 {
2528 struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2529 unsigned long timeout = msecs_to_jiffies(100);
2530 int ret;
2531
2532 drm_bridge_remove(&mhdp->bridge);
2533
2534 ret = wait_event_timeout(mhdp->fw_load_wq,
2535 mhdp->hw_state == MHDP_HW_READY,
2536 timeout);
2537 spin_lock(&mhdp->start_lock);
2538 mhdp->hw_state = MHDP_HW_STOPPED;
2539 spin_unlock(&mhdp->start_lock);
2540
2541 if (ret == 0) {
2542 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2543 __func__);
2544 } else {
2545 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2546 if (ret)
2547 dev_err(mhdp->dev, "Failed to stop firmware (%pe)\n",
2548 ERR_PTR(ret));
2549 }
2550
2551 phy_exit(mhdp->phy);
2552
2553 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2554 mhdp->info->ops->exit(mhdp);
2555
2556 pm_runtime_put_sync(&pdev->dev);
2557 pm_runtime_disable(&pdev->dev);
2558
2559 cancel_work_sync(&mhdp->modeset_retry_work);
2560 flush_work(&mhdp->hpd_work);
2561 /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
2562 }
2563
2564 static const struct of_device_id mhdp_ids[] = {
2565 { .compatible = "cdns,mhdp8546", },
2566 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2567 { .compatible = "ti,j721e-mhdp8546",
2568 .data = &(const struct cdns_mhdp_platform_info) {
2569 .input_bus_flags = &mhdp_ti_j721e_bridge_input_bus_flags,
2570 .ops = &mhdp_ti_j721e_ops,
2571 },
2572 },
2573 #endif
2574 { /* sentinel */ }
2575 };
2576 MODULE_DEVICE_TABLE(of, mhdp_ids);
2577
2578 static struct platform_driver mhdp_driver = {
2579 .driver = {
2580 .name = "cdns-mhdp8546",
2581 .of_match_table = mhdp_ids,
2582 },
2583 .probe = cdns_mhdp_probe,
2584 .remove = cdns_mhdp_remove,
2585 };
2586 module_platform_driver(mhdp_driver);
2587
2588 MODULE_FIRMWARE(FW_NAME);
2589
2590 MODULE_AUTHOR("Quentin Schulz <quentin.schulz@free-electrons.com>");
2591 MODULE_AUTHOR("Swapnil Jakhade <sjakhade@cadence.com>");
2592 MODULE_AUTHOR("Yuti Amonkar <yamonkar@cadence.com>");
2593 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
2594 MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
2595 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2596 MODULE_LICENSE("GPL");
2597 MODULE_ALIAS("platform:cdns-mhdp8546");
2598